Compare commits

...

19 Commits

Author SHA1 Message Date
Erik Johnston
ac14e5745c Index on created ts 2024-08-27 12:00:25 +01:00
Erik Johnston
4a68975459 Don't assert unkown streams, log 2024-08-27 11:55:59 +01:00
Erik Johnston
0e07f657fe Rename get_and_clear_connection_positions 2024-08-27 11:55:00 +01:00
Erik Johnston
1e5a3a7fb7 Fix errors 2024-08-26 20:15:38 +01:00
Erik Johnston
bae50d31d6 Rename column 2024-08-26 20:12:41 +01:00
Erik Johnston
8ed1c074dd Add timestamp index 2024-08-26 20:11:57 +01:00
Erik Johnston
68a2a98b5b Don't bother to use zip 2024-08-26 20:08:50 +01:00
Erik Johnston
948456b159 Add comment about why we ignore zero position 2024-08-26 20:07:07 +01:00
Erik Johnston
7935423ec4 Apply suggestions from code review
Co-authored-by: Eric Eastwood <eric.eastwood@beta.gouv.fr>
2024-08-26 20:02:10 +01:00
Erik Johnston
03eac5ae60 Newsfile 2024-08-22 18:04:23 +01:00
Erik Johnston
ed7591cbef Remove mark_token_seen 2024-08-22 18:04:23 +01:00
Erik Johnston
3838b18d3b Store state 2024-08-22 18:04:23 +01:00
Erik Johnston
b3d8e2d2bd Add simple_insert_returning_txn 2024-08-22 18:04:23 +01:00
Erik Johnston
d1ee253bef Allow making columns AUTOINCREMENT 2024-08-22 18:04:23 +01:00
Erik Johnston
87d53368d7 Newsfile 2024-08-22 18:03:57 +01:00
Erik Johnston
e34d634778 Make PerConnectionState immutable 2024-08-22 18:03:57 +01:00
Erik Johnston
7087c7c3d5 Make RoomSyncConfig immutable 2024-08-22 18:03:57 +01:00
Erik Johnston
5b77f4a67a Update mypy plugin to handle enums and typevars 2024-08-22 17:04:21 +01:00
Erik Johnston
e2ade85250 Move sliding sync types 2024-08-22 17:04:21 +01:00
19 changed files with 1115 additions and 543 deletions

1
changelog.d/17599.misc Normal file
View File

@@ -0,0 +1 @@
Store sliding sync per-connection state in the database.

1
changelog.d/17600.misc Normal file
View File

@@ -0,0 +1 @@
Make the sliding sync `PerConnectionState` class immutable.

View File

@@ -38,6 +38,7 @@ from mypy.types import (
NoneType,
TupleType,
TypeAliasType,
TypeVarType,
UninhabitedType,
UnionType,
)
@@ -233,6 +234,7 @@ IMMUTABLE_CUSTOM_TYPES = {
"synapse.synapse_rust.push.FilteredPushRules",
# This is technically not immutable, but close enough.
"signedjson.types.VerifyKey",
"synapse.types.StrCollection",
}
# Immutable containers only if the values are also immutable.
@@ -298,7 +300,7 @@ def is_cacheable(
elif rt.type.fullname in MUTABLE_CONTAINER_TYPES:
# Mutable containers are mutable regardless of their underlying type.
return False, None
return False, f"container {rt.type.fullname} is mutable"
elif "attrs" in rt.type.metadata:
# attrs classes are only cachable iff it is frozen (immutable itself)
@@ -318,6 +320,9 @@ def is_cacheable(
else:
return False, "non-frozen attrs class"
elif rt.type.is_enum:
# We assume Enum values are immutable
return True, None
else:
# Ensure we fail for unknown types, these generally means that the
# above code is not complete.
@@ -326,6 +331,18 @@ def is_cacheable(
f"Don't know how to handle {rt.type.fullname} return type instance",
)
elif isinstance(rt, TypeVarType):
# We consider TypeVars immutable if they are bound to a set of immutable
# types.
if rt.values:
for value in rt.values:
ok, note = is_cacheable(value, signature, verbose)
if not ok:
return False, f"TypeVar bound not cacheable {value}"
return True, None
return False, "TypeVar is unbound"
elif isinstance(rt, NoneType):
# None is cachable.
return True, None

View File

@@ -98,6 +98,7 @@ from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
from synapse.storage.databases.main.search import SearchStore
from synapse.storage.databases.main.session import SessionStore
from synapse.storage.databases.main.signatures import SignatureWorkerStore
from synapse.storage.databases.main.sliding_sync import SlidingSyncStore
from synapse.storage.databases.main.state import StateGroupWorkerStore
from synapse.storage.databases.main.stats import StatsStore
from synapse.storage.databases.main.stream import StreamWorkerStore
@@ -159,6 +160,7 @@ class GenericWorkerStore(
SessionStore,
TaskSchedulerWorkerStore,
ExperimentalFeaturesStore,
SlidingSyncStore,
):
# Properties that multiple storage classes define. Tell mypy what the
# expected type is.

View File

@@ -45,13 +45,6 @@ from synapse.events.utils import parse_stripped_state_event, strip_event
from synapse.handlers.relations import BundledAggregations
from synapse.handlers.sliding_sync.extensions import SlidingSyncExtensionHandler
from synapse.handlers.sliding_sync.store import SlidingSyncConnectionStore
from synapse.handlers.sliding_sync.types import (
HaveSentRoomFlag,
MutablePerConnectionState,
PerConnectionState,
RoomSyncConfig,
StateValues,
)
from synapse.logging.opentracing import (
SynapseTags,
log_kv,
@@ -83,7 +76,16 @@ from synapse.types import (
StreamToken,
UserID,
)
from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult
from synapse.types.handlers.sliding_sync import (
HaveSentRoomFlag,
MutablePerConnectionState,
OperationType,
PerConnectionState,
RoomSyncConfig,
SlidingSyncConfig,
SlidingSyncResult,
StateValues,
)
from synapse.types.state import StateFilter
from synapse.util.async_helpers import concurrently_execute
from synapse.visibility import filter_events_for_client
@@ -206,7 +208,7 @@ class SlidingSyncHandler:
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
self.is_mine_id = hs.is_mine_id
self.connection_store = SlidingSyncConnectionStore()
self.connection_store = SlidingSyncConnectionStore(self.store)
self.extensions = SlidingSyncExtensionHandler(hs)
async def wait_for_sync_for_user(
@@ -326,16 +328,11 @@ class SlidingSyncHandler:
# amount of time (more with round-trips and re-processing) in the end to
# get everything again.
previous_connection_state = (
await self.connection_store.get_per_connection_state(
await self.connection_store.get_and_clear_connection_positions(
sync_config, from_token
)
)
await self.connection_store.mark_token_seen(
sync_config=sync_config,
from_token=from_token,
)
# Get all of the room IDs that the user should be able to see in the sync
# response
has_lists = sync_config.lists is not None and len(sync_config.lists) > 0
@@ -430,15 +427,11 @@ class SlidingSyncHandler:
room_id
)
if existing_room_sync_config is not None:
existing_room_sync_config.combine_room_sync_config(
room_sync_config = existing_room_sync_config.combine_room_sync_config(
room_sync_config
)
else:
# Make a copy so if we modify it later, it doesn't
# affect all references.
relevant_room_map[room_id] = (
room_sync_config.deep_copy()
)
relevant_room_map[room_id] = room_sync_config
room_ids_in_list.append(room_id)
@@ -503,11 +496,13 @@ class SlidingSyncHandler:
# and need to fetch more info about.
existing_room_sync_config = relevant_room_map.get(room_id)
if existing_room_sync_config is not None:
existing_room_sync_config.combine_room_sync_config(
room_sync_config
room_sync_config = (
existing_room_sync_config.combine_room_sync_config(
room_sync_config
)
)
else:
relevant_room_map[room_id] = room_sync_config
relevant_room_map[room_id] = room_sync_config
# Fetch room data
rooms: Dict[str, SlidingSyncResult.RoomResult] = {}

View File

@@ -19,11 +19,6 @@ from typing_extensions import assert_never
from synapse.api.constants import AccountDataTypes
from synapse.handlers.receipts import ReceiptEventSource
from synapse.handlers.sliding_sync.types import (
HaveSentRoomFlag,
MutablePerConnectionState,
PerConnectionState,
)
from synapse.logging.opentracing import trace
from synapse.types import (
DeviceListUpdates,
@@ -32,7 +27,14 @@ from synapse.types import (
SlidingSyncStreamToken,
StreamToken,
)
from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult
from synapse.types.handlers.sliding_sync import (
HaveSentRoomFlag,
MutablePerConnectionState,
OperationType,
PerConnectionState,
SlidingSyncConfig,
SlidingSyncResult,
)
if TYPE_CHECKING:
from synapse.server import HomeServer

View File

@@ -13,18 +13,18 @@
#
import logging
from typing import TYPE_CHECKING, Dict, Optional, Tuple
from typing import TYPE_CHECKING, Optional
import attr
from synapse.api.errors import SlidingSyncUnknownPosition
from synapse.handlers.sliding_sync.types import (
from synapse.logging.opentracing import trace
from synapse.storage.databases.main import DataStore
from synapse.types import SlidingSyncStreamToken
from synapse.types.handlers.sliding_sync import (
MutablePerConnectionState,
PerConnectionState,
SlidingSyncConfig,
)
from synapse.logging.opentracing import trace
from synapse.types import SlidingSyncStreamToken
from synapse.types.handlers import SlidingSyncConfig
if TYPE_CHECKING:
pass
@@ -61,22 +61,9 @@ class SlidingSyncConnectionStore:
to mapping of room ID to `HaveSentRoom`.
"""
# `(user_id, conn_id)` -> `connection_position` -> `PerConnectionState`
_connections: Dict[Tuple[str, str], Dict[int, PerConnectionState]] = attr.Factory(
dict
)
store: "DataStore"
async def is_valid_token(
self, sync_config: SlidingSyncConfig, connection_token: int
) -> bool:
"""Return whether the connection token is valid/recognized"""
if connection_token == 0:
return True
conn_key = self._get_connection_key(sync_config)
return connection_token in self._connections.get(conn_key, {})
async def get_per_connection_state(
async def get_and_clear_connection_positions(
self,
sync_config: SlidingSyncConfig,
from_token: Optional[SlidingSyncStreamToken],
@@ -86,23 +73,21 @@ class SlidingSyncConnectionStore:
Raises:
SlidingSyncUnknownPosition if the connection_token is unknown
"""
if from_token is None:
# If this is our first request, there is no previous connection state to fetch out of the database
if from_token is None or from_token.connection_position == 0:
return PerConnectionState()
connection_position = from_token.connection_position
if connection_position == 0:
# Initial sync (request without a `from_token`) starts at `0` so
# there is no existing per-connection state
return PerConnectionState()
conn_id = sync_config.conn_id or ""
conn_key = self._get_connection_key(sync_config)
sync_statuses = self._connections.get(conn_key, {})
connection_state = sync_statuses.get(connection_position)
device_id = sync_config.requester.device_id
assert device_id is not None
if connection_state is None:
raise SlidingSyncUnknownPosition()
return connection_state
return await self.store.get_and_clear_connection_positions(
sync_config.user.to_string(),
device_id,
conn_id,
from_token.connection_position,
)
@trace
async def record_new_state(
@@ -116,85 +101,28 @@ class SlidingSyncConnectionStore:
If there are no changes to the state this may return the same token as
the existing per-connection state.
"""
prev_connection_token = 0
if from_token is not None:
prev_connection_token = from_token.connection_position
if not new_connection_state.has_updates():
return prev_connection_token
if from_token is not None:
return from_token.connection_position
else:
return 0
conn_key = self._get_connection_key(sync_config)
sync_statuses = self._connections.setdefault(conn_key, {})
# A from token with a zero connection position means there was no
# previously stored connection state, so we treat a zero the same as
# there being no previous position.
previous_connection_position = None
if from_token is not None and from_token.connection_position != 0:
previous_connection_position = from_token.connection_position
# Generate a new token, removing any existing entries in that token
# (which can happen if requests get resent).
new_store_token = prev_connection_token + 1
sync_statuses.pop(new_store_token, None)
# We copy the `MutablePerConnectionState` so that the inner `ChainMap`s
# don't grow forever.
sync_statuses[new_store_token] = new_connection_state.copy()
return new_store_token
@trace
async def mark_token_seen(
self,
sync_config: SlidingSyncConfig,
from_token: Optional[SlidingSyncStreamToken],
) -> None:
"""We have received a request with the given token, so we can clear out
any other tokens associated with the connection.
If there is no from token then we have started afresh, and so we delete
all tokens associated with the device.
"""
# Clear out any tokens for the connection that doesn't match the one
# from the request.
conn_key = self._get_connection_key(sync_config)
sync_statuses = self._connections.pop(conn_key, {})
if from_token is None:
return
sync_statuses = {
connection_token: room_statuses
for connection_token, room_statuses in sync_statuses.items()
if connection_token == from_token.connection_position
}
if sync_statuses:
self._connections[conn_key] = sync_statuses
@staticmethod
def _get_connection_key(sync_config: SlidingSyncConfig) -> Tuple[str, str]:
"""Return a unique identifier for this connection.
The first part is simply the user ID.
The second part is generally a combination of device ID and conn_id.
However, both these two are optional (e.g. puppet access tokens don't
have device IDs), so this handles those edge cases.
We use this over the raw `conn_id` to avoid clashes between different
clients that use the same `conn_id`. Imagine a user uses a web client
that uses `conn_id: main_sync_loop` and an Android client that also has
a `conn_id: main_sync_loop`.
"""
user_id = sync_config.user.to_string()
# Only one sliding sync connection is allowed per given conn_id (empty
# or not).
conn_id = sync_config.conn_id or ""
if sync_config.requester.device_id:
return (user_id, f"D/{sync_config.requester.device_id}/{conn_id}")
device_id = sync_config.requester.device_id
assert device_id is not None
if sync_config.requester.access_token_id:
# If we don't have a device, then the access token ID should be a
# stable ID.
return (user_id, f"A/{sync_config.requester.access_token_id}/{conn_id}")
# If we have neither then its likely an AS or some weird token. Either
# way we can just fail here.
raise Exception("Cannot use sliding sync with access token type")
return await self.store.persist_per_connection_state(
sync_config.user.to_string(),
device_id,
conn_id,
previous_connection_position,
new_connection_state,
)

View File

@@ -64,6 +64,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.background_updates import BackgroundUpdater
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
from synapse.storage.types import Connection, Cursor, SQLQueryParameters
from synapse.types import StrCollection
from synapse.util.async_helpers import delay_cancellation
from synapse.util.iterutils import batch_iter
@@ -1095,6 +1096,48 @@ class DatabasePool:
txn.execute(sql, vals)
@staticmethod
def simple_insert_returning_txn(
txn: LoggingTransaction,
table: str,
values: Dict[str, Any],
returning: StrCollection,
) -> Tuple[Any, ...]:
"""Executes a `INSERT INTO... RETURNING...` statement (or equivalent for
SQLite versions that don't support it).
"""
if txn.database_engine.supports_returning:
sql = "INSERT INTO %s (%s) VALUES(%s) RETURNING %s" % (
table,
", ".join(k for k in values.keys()),
", ".join("?" for _ in values.keys()),
", ".join(k for k in returning),
)
txn.execute(sql, list(values.values()))
row = txn.fetchone()
assert row is not None
return row
else:
# For old versions of SQLite we do a standard insert and then can
# use `last_insert_rowid` to get at the row we just inserted
DatabasePool.simple_insert_txn(
txn,
table=table,
values=values,
)
txn.execute("SELECT last_insert_rowid()")
row = txn.fetchone()
assert row is not None
(rowid,) = row
row = DatabasePool.simple_select_one_txn(
txn, table=table, keyvalues={"rowid": rowid}, retcols=returning
)
assert row is not None
return row
async def simple_insert_many(
self,
table: str,

View File

@@ -33,6 +33,7 @@ from synapse.storage.database import (
LoggingDatabaseConnection,
LoggingTransaction,
)
from synapse.storage.databases.main.sliding_sync import SlidingSyncStore
from synapse.storage.databases.main.stats import UserSortOrder
from synapse.storage.engines import BaseDatabaseEngine
from synapse.storage.types import Cursor
@@ -156,6 +157,7 @@ class DataStore(
LockStore,
SessionStore,
TaskSchedulerWorkerStore,
SlidingSyncStore,
):
def __init__(
self,

View File

@@ -0,0 +1,482 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2023 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Set, cast
import attr
from synapse.api.errors import SlidingSyncUnknownPosition
from synapse.logging.opentracing import log_kv
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import LoggingTransaction
from synapse.types import MultiWriterStreamToken, RoomStreamToken
from synapse.types.handlers.sliding_sync import (
HaveSentRoom,
HaveSentRoomFlag,
MutablePerConnectionState,
PerConnectionState,
RoomStatusMap,
RoomSyncConfig,
)
from synapse.util import json_encoder
from synapse.util.caches.descriptors import cached
if TYPE_CHECKING:
from synapse.storage.databases.main import DataStore
logger = logging.getLogger(__name__)
class SlidingSyncStore(SQLBaseStore):
async def persist_per_connection_state(
self,
user_id: str,
device_id: str,
conn_id: str,
previous_connection_position: Optional[int],
per_connection_state: "MutablePerConnectionState",
) -> int:
"""Persist updates to the per-connection state for a sliding sync
connection.
Returns:
The connection position of the newly persisted state.
"""
store = cast("DataStore", self)
return await self.db_pool.runInteraction(
"persist_per_connection_state",
self.persist_per_connection_state_txn,
user_id=user_id,
device_id=device_id,
conn_id=conn_id,
previous_connection_position=previous_connection_position,
per_connection_state=await PerConnectionStateDB.from_state(
per_connection_state, store
),
)
def persist_per_connection_state_txn(
self,
txn: LoggingTransaction,
user_id: str,
device_id: str,
conn_id: str,
previous_connection_position: Optional[int],
per_connection_state: "PerConnectionStateDB",
) -> int:
# First we fetch (or create) the connection key associated with the
# previous connection position.
if previous_connection_position is not None:
# The `previous_connection_position` is a user-supplied value, so we
# need to make sure that the one they supplied is actually theirs.
sql = """
SELECT connection_key
FROM sliding_sync_connection_positions
INNER JOIN sliding_sync_connections USING (connection_key)
WHERE
connection_position = ?
AND user_id = ? AND device_id = ? AND conn_id = ?
"""
txn.execute(
sql, (previous_connection_position, user_id, device_id, conn_id)
)
row = txn.fetchone()
if row is None:
raise SlidingSyncUnknownPosition()
(connection_key,) = row
else:
# We're restarting the connection, so we clear the previous existing data we
# used to track it. We do this here to ensure that if we get lots of
# one-shot requests we don't stack up lots of entries. We have `ON DELETE
# CASCADE` setup on the dependent tables so this will clear out all the
# associated data.
self.db_pool.simple_delete_txn(
txn,
table="sliding_sync_connections",
keyvalues={
"user_id": user_id,
"device_id": device_id,
"conn_id": conn_id,
},
)
(connection_key,) = self.db_pool.simple_insert_returning_txn(
txn,
table="sliding_sync_connections",
values={
"user_id": user_id,
"device_id": device_id,
"conn_id": conn_id,
"created_ts": self._clock.time_msec(),
},
returning=("connection_key",),
)
# Define a new connection position for the updates
(connection_position,) = self.db_pool.simple_insert_returning_txn(
txn,
table="sliding_sync_connection_positions",
values={
"connection_key": connection_key,
"created_ts": self._clock.time_msec(),
},
returning=("connection_position",),
)
# We need to deduplicate the `required_state` JSON. We do this by
# fetching all JSON associated with the connection and comparing that
# with the updates to `required_state`
# Dict from required state json -> required state ID
required_state_to_id: Dict[str, int] = {}
if previous_connection_position is not None:
rows = self.db_pool.simple_select_list_txn(
txn,
table="sliding_sync_connection_required_state",
keyvalues={"connection_key": connection_key},
retcols=("required_state_id", "required_state"),
)
for required_state_id, required_state in rows:
required_state_to_id[required_state] = required_state_id
room_to_state_ids: Dict[str, int] = {}
unique_required_state: Dict[str, List[str]] = {}
for room_id, room_state in per_connection_state.room_configs.items():
serialized_state = json_encoder.encode(
# We store the required state as a sorted list of event type /
# state key tuples.
sorted(
(event_type, state_key)
for event_type, state_keys in room_state.required_state_map.items()
for state_key in state_keys
)
)
existing_state_id = required_state_to_id.get(serialized_state)
if existing_state_id is not None:
room_to_state_ids[room_id] = existing_state_id
else:
unique_required_state.setdefault(serialized_state, []).append(room_id)
# Insert any new `required_state` json we haven't previously seen.
for serialized_required_state, room_ids in unique_required_state.items():
(required_state_id,) = self.db_pool.simple_insert_returning_txn(
txn,
table="sliding_sync_connection_required_state",
values={
"connection_key": connection_key,
"required_state": serialized_required_state,
},
returning=("required_state_id",),
)
for room_id in room_ids:
room_to_state_ids[room_id] = required_state_id
# Copy over state from the previous connection position (we'll overwrite
# these rows with any changes).
if previous_connection_position is not None:
sql = """
INSERT INTO sliding_sync_connection_streams
(connection_position, stream, room_id, room_status, last_token)
SELECT ?, stream, room_id, room_status, last_token
FROM sliding_sync_connection_streams
WHERE connection_position = ?
"""
txn.execute(sql, (connection_position, previous_connection_position))
sql = """
INSERT INTO sliding_sync_connection_room_configs
(connection_position, room_id, timeline_limit, required_state_id)
SELECT ?, room_id, timeline_limit, required_state_id
FROM sliding_sync_connection_room_configs
WHERE connection_position = ?
"""
txn.execute(sql, (connection_position, previous_connection_position))
# We now upsert the changes to the various streams.
key_values = []
value_values = []
for room_id, have_sent_room in per_connection_state.rooms._statuses.items():
key_values.append((connection_position, "rooms", room_id))
value_values.append(
(have_sent_room.status.value, have_sent_room.last_token)
)
for room_id, have_sent_room in per_connection_state.receipts._statuses.items():
key_values.append((connection_position, "receipts", room_id))
value_values.append(
(have_sent_room.status.value, have_sent_room.last_token)
)
self.db_pool.simple_upsert_many_txn(
txn,
table="sliding_sync_connection_streams",
key_names=(
"connection_position",
"stream",
"room_id",
),
key_values=key_values,
value_names=(
"room_status",
"last_token",
),
value_values=value_values,
)
# ... and upsert changes to the room configs.
keys = []
values = []
for room_id, room_config in per_connection_state.room_configs.items():
keys.append((connection_position, room_id))
values.append((room_config.timeline_limit, room_to_state_ids[room_id]))
self.db_pool.simple_upsert_many_txn(
txn,
table="sliding_sync_connection_room_configs",
key_names=(
"connection_position",
"room_id",
),
key_values=keys,
value_names=(
"timeline_limit",
"required_state_id",
),
value_values=values,
)
return connection_position
@cached(iterable=True, max_entries=100000)
async def get_and_clear_connection_positions(
self, user_id: str, device_id: str, conn_id: str, connection_position: int
) -> "PerConnectionState":
"""Get the per-connection state for the given connection position."""
per_connection_state_db = await self.db_pool.runInteraction(
"get_and_clear_connection_positions",
self._get_and_clear_connection_positions_txn,
user_id=user_id,
device_id=device_id,
conn_id=conn_id,
connection_position=connection_position,
)
store = cast("DataStore", self)
return await per_connection_state_db.to_state(store)
def _get_and_clear_connection_positions_txn(
self,
txn: LoggingTransaction,
user_id: str,
device_id: str,
conn_id: str,
connection_position: int,
) -> "PerConnectionStateDB":
# The `previous_connection_position` is a user-supplied value, so we
# need to make sure that the one they supplied is actually theirs.
sql = """
SELECT connection_key
FROM sliding_sync_connection_positions
INNER JOIN sliding_sync_connections USING (connection_key)
WHERE
connection_position = ?
AND user_id = ? AND device_id = ? AND conn_id = ?
"""
txn.execute(sql, (connection_position, user_id, device_id, conn_id))
row = txn.fetchone()
if row is None:
raise SlidingSyncUnknownPosition()
(connection_key,) = row
# Now that we have seen the client has received and used the connection
# position, we can delete all the other connection positions.
sql = """
DELETE FROM sliding_sync_connection_positions
WHERE connection_key = ? AND connection_position != ?
"""
txn.execute(sql, (connection_key, connection_position))
# Fetch and create a mapping from required state ID to the actual
# required state for the connection.
rows = self.db_pool.simple_select_list_txn(
txn,
table="sliding_sync_connection_required_state",
keyvalues={"connection_key": connection_key},
retcols=(
"required_state_id",
"required_state",
),
)
required_state_map: Dict[int, Dict[str, Set[str]]] = {}
for row in rows:
state = required_state_map[row[0]] = {}
for event_type, state_keys in db_to_json(row[1]):
state[event_type] = set(state_keys)
# Get all the room configs, looking up the required state from the map
# above.
room_config_rows = self.db_pool.simple_select_list_txn(
txn,
table="sliding_sync_connection_room_configs",
keyvalues={"connection_position": connection_position},
retcols=(
"room_id",
"timeline_limit",
"required_state_id",
),
)
room_configs: Dict[str, RoomSyncConfig] = {}
for (
room_id,
timeline_limit,
required_state_id,
) in room_config_rows:
room_configs[room_id] = RoomSyncConfig(
timeline_limit=timeline_limit,
required_state_map=required_state_map[required_state_id],
)
# Now look up the per-room stream data.
rooms: Dict[str, HaveSentRoom[str]] = {}
receipts: Dict[str, HaveSentRoom[str]] = {}
receipt_rows = self.db_pool.simple_select_list_txn(
txn,
table="sliding_sync_connection_streams",
keyvalues={"connection_position": connection_position},
retcols=(
"stream",
"room_id",
"room_status",
"last_token",
),
)
for stream, room_id, room_status, last_token in receipt_rows:
have_sent_room: HaveSentRoom[str] = HaveSentRoom(
status=HaveSentRoomFlag(room_status), last_token=last_token
)
if stream == "rooms":
rooms[room_id] = have_sent_room
elif stream == "receipts":
receipts[room_id] = have_sent_room
else:
# For forwards compatibility we ignore unknown streams, as in
# future we want to be able to easily add more stream types.
logger.warning("Unrecognized sliding sync stream in DB %r", stream)
return PerConnectionStateDB(
rooms=RoomStatusMap(rooms),
receipts=RoomStatusMap(receipts),
room_configs=room_configs,
)
@attr.s(auto_attribs=True, frozen=True)
class PerConnectionStateDB:
"""An equivalent to `PerConnectionState` that holds data in a format stored
in the DB.
The principle difference is that the tokens for the different streams are
serialized to strings.
When persisting this *only* contains updates to the state.
"""
rooms: "RoomStatusMap[str]"
receipts: "RoomStatusMap[str]"
room_configs: Mapping[str, "RoomSyncConfig"]
@staticmethod
async def from_state(
per_connection_state: "MutablePerConnectionState", store: "DataStore"
) -> "PerConnectionStateDB":
"""Convert from a standard `PerConnectionState`"""
rooms = {
room_id: HaveSentRoom(
status=status.status,
last_token=(
await status.last_token.to_string(store)
if status.last_token is not None
else None
),
)
for room_id, status in per_connection_state.rooms.get_updates().items()
}
receipts = {
room_id: HaveSentRoom(
status=status.status,
last_token=(
await status.last_token.to_string(store)
if status.last_token is not None
else None
),
)
for room_id, status in per_connection_state.receipts.get_updates().items()
}
log_kv(
{
"rooms": rooms,
"receipts": receipts,
"room_configs": per_connection_state.room_configs.maps[0],
}
)
return PerConnectionStateDB(
rooms=RoomStatusMap(rooms),
receipts=RoomStatusMap(receipts),
room_configs=per_connection_state.room_configs.maps[0],
)
async def to_state(self, store: "DataStore") -> "PerConnectionState":
"""Convert into a standard `PerConnectionState`"""
rooms = {
room_id: HaveSentRoom(
status=status.status,
last_token=(
await RoomStreamToken.parse(store, status.last_token)
if status.last_token is not None
else None
),
)
for room_id, status in self.rooms._statuses.items()
}
receipts = {
room_id: HaveSentRoom(
status=status.status,
last_token=(
await MultiWriterStreamToken.parse(store, status.last_token)
if status.last_token is not None
else None
),
)
for room_id, status in self.receipts._statuses.items()
}
return PerConnectionState(
rooms=RoomStatusMap(rooms),
receipts=RoomStatusMap(receipts),
room_configs=self.room_configs,
)

View File

@@ -28,6 +28,11 @@ if TYPE_CHECKING:
from synapse.storage.database import LoggingDatabaseConnection
# A string that will be replaced with the appropriate auto increment directive
# for the database engine, expands to an auto incrementing integer primary key.
AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER = "$%AUTO_INCREMENT_PRIMARY_KEY%$"
class IsolationLevel(IntEnum):
READ_COMMITTED: int = 1
REPEATABLE_READ: int = 2

View File

@@ -25,6 +25,7 @@ from typing import TYPE_CHECKING, Any, Mapping, NoReturn, Optional, Tuple, cast
import psycopg2.extensions
from synapse.storage.engines._base import (
AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER,
BaseDatabaseEngine,
IncorrectDatabaseSetup,
IsolationLevel,
@@ -256,4 +257,10 @@ class PostgresEngine(
executing the script in its own transaction. The script transaction is
left open and it is the responsibility of the caller to commit it.
"""
# Replace auto increment placeholder with the appropriate directive
script = script.replace(
AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER,
"BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY",
)
cursor.execute(f"COMMIT; BEGIN TRANSACTION; {script}")

View File

@@ -25,6 +25,7 @@ import threading
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from synapse.storage.engines import BaseDatabaseEngine
from synapse.storage.engines._base import AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER
from synapse.storage.types import Cursor
if TYPE_CHECKING:
@@ -168,6 +169,11 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]):
> first. No other implicit transaction control is performed; any transaction
> control must be added to sql_script.
"""
# Replace auto increment placeholder with the appropriate directive
script = script.replace(
AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER, "INTEGER PRIMARY KEY AUTOINCREMENT"
)
# The implementation of `executescript` can be found at
# https://github.com/python/cpython/blob/3.11/Modules/_sqlite/cursor.c#L1035.
cursor.executescript(f"BEGIN TRANSACTION; {script}")

View File

@@ -19,7 +19,7 @@
#
#
SCHEMA_VERSION = 86 # remember to update the list below when updating
SCHEMA_VERSION = 87 # remember to update the list below when updating
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the

View File

@@ -0,0 +1,80 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2024 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- Table to track active sliding sync connections.
--
-- A new connection will be created for every sliding sync request without a
-- `since` token for a given `conn_id` for a device.#
--
-- Once a new connection is created and used we delete all other connections for
-- the `conn_id`.
CREATE TABLE sliding_sync_connections(
connection_key $%AUTO_INCREMENT_PRIMARY_KEY%$,
user_id TEXT NOT NULL,
device_id TEXT NOT NULL,
conn_id TEXT NOT NULL,
created_ts BIGINT NOT NULL
);
CREATE INDEX sliding_sync_connections_idx ON sliding_sync_connections(user_id, device_id, conn_id);
CREATE INDEX sliding_sync_connections_ts_idx ON sliding_sync_connections(created_ts);
-- We track per-connection state by associating changes to the state with
-- connection positions. This ensures that we correctly track state even if we
-- see retries of requests.
--
-- If the client starts a "new" connection (by not specifying a since token),
-- we'll clear out the other connections (to ensure that we don't end up with
-- lots of connection keys).
CREATE TABLE sliding_sync_connection_positions(
connection_position $%AUTO_INCREMENT_PRIMARY_KEY%$,
connection_key BIGINT NOT NULL REFERENCES sliding_sync_connections(connection_key) ON DELETE CASCADE,
created_ts BIGINT NOT NULL
);
CREATE INDEX sliding_sync_connection_positions_key ON sliding_sync_connection_positions(connection_key);
CREATE INDEX sliding_sync_connection_positions_ts_idx ON sliding_sync_connection_positions(created_ts);
-- To save space we deduplicate the `required_state` json by assigning IDs to
-- different values.
CREATE TABLE sliding_sync_connection_required_state(
required_state_id $%AUTO_INCREMENT_PRIMARY_KEY%$,
connection_key BIGINT NOT NULL REFERENCES sliding_sync_connections(connection_key) ON DELETE CASCADE,
required_state TEXT NOT NULL -- We store this as a json list of event type / state key tuples.
);
CREATE INDEX sliding_sync_connection_required_state_conn_pos ON sliding_sync_connection_required_state(connection_key);
-- Stores the room configs we have seen for rooms in a connection.
CREATE TABLE sliding_sync_connection_room_configs(
connection_position BIGINT NOT NULL REFERENCES sliding_sync_connection_positions(connection_position) ON DELETE CASCADE,
room_id TEXT NOT NULL,
timeline_limit BIGINT NOT NULL,
required_state_id BIGINT NOT NULL REFERENCES sliding_sync_connection_required_state(required_state_id)
);
CREATE UNIQUE INDEX sliding_sync_connection_room_configs_idx ON sliding_sync_connection_room_configs(connection_position, room_id);
-- Stores what data we have sent for given streams down given connections.
CREATE TABLE sliding_sync_connection_streams(
connection_position BIGINT NOT NULL REFERENCES sliding_sync_connection_positions(connection_position) ON DELETE CASCADE,
stream TEXT NOT NULL, -- e.g. "events" or "receipts"
room_id TEXT NOT NULL,
room_status TEXT NOT NULL, -- "live" or "previously", i.e. the `HaveSentRoomFlag` value
last_token TEXT -- For "previously" the token for the stream we have sent up to.
);
CREATE UNIQUE INDEX sliding_sync_connection_streams_idx ON sliding_sync_connection_streams(connection_position, room_id, stream);

View File

@@ -17,33 +17,9 @@
# [This file includes modifications made by New Vector Limited]
#
#
from enum import Enum
from typing import TYPE_CHECKING, Dict, Final, List, Mapping, Optional, Sequence, Tuple
import attr
from typing_extensions import TypedDict
from synapse._pydantic_compat import HAS_PYDANTIC_V2
if TYPE_CHECKING or HAS_PYDANTIC_V2:
from pydantic.v1 import Extra
else:
from pydantic import Extra
from synapse.events import EventBase
from synapse.types import (
DeviceListUpdates,
JsonDict,
JsonMapping,
Requester,
SlidingSyncStreamToken,
StreamToken,
UserID,
)
from synapse.types.rest.client import SlidingSyncBody
if TYPE_CHECKING:
from synapse.handlers.relations import BundledAggregations
from typing import List, Optional, TypedDict
class ShutdownRoomParams(TypedDict):
@@ -101,335 +77,3 @@ class ShutdownRoomResponse(TypedDict):
failed_to_kick_users: List[str]
local_aliases: List[str]
new_room_id: Optional[str]
class SlidingSyncConfig(SlidingSyncBody):
"""
Inherit from `SlidingSyncBody` since we need all of the same fields and add a few
extra fields that we need in the handler
"""
user: UserID
requester: Requester
# Pydantic config
class Config:
# By default, ignore fields that we don't recognise.
extra = Extra.ignore
# By default, don't allow fields to be reassigned after parsing.
allow_mutation = False
# Allow custom types like `UserID` to be used in the model
arbitrary_types_allowed = True
class OperationType(Enum):
"""
Represents the operation types in a Sliding Sync window.
Attributes:
SYNC: Sets a range of entries. Clients SHOULD discard what they previous knew about
entries in this range.
INSERT: Sets a single entry. If the position is not empty then clients MUST move
entries to the left or the right depending on where the closest empty space is.
DELETE: Remove a single entry. Often comes before an INSERT to allow entries to move
places.
INVALIDATE: Remove a range of entries. Clients MAY persist the invalidated range for
offline support, but they should be treated as empty when additional operations
which concern indexes in the range arrive from the server.
"""
SYNC: Final = "SYNC"
INSERT: Final = "INSERT"
DELETE: Final = "DELETE"
INVALIDATE: Final = "INVALIDATE"
@attr.s(slots=True, frozen=True, auto_attribs=True)
class SlidingSyncResult:
"""
The Sliding Sync result to be serialized to JSON for a response.
Attributes:
next_pos: The next position token in the sliding window to request (next_batch).
lists: Sliding window API. A map of list key to list results.
rooms: Room subscription API. A map of room ID to room results.
extensions: Extensions API. A map of extension key to extension results.
"""
@attr.s(slots=True, frozen=True, auto_attribs=True)
class RoomResult:
"""
Attributes:
name: Room name or calculated room name.
avatar: Room avatar
heroes: List of stripped membership events (containing `user_id` and optionally
`avatar_url` and `displayname`) for the users used to calculate the room name.
is_dm: Flag to specify whether the room is a direct-message room (most likely
between two people).
initial: Flag which is set when this is the first time the server is sending this
data on this connection. Clients can use this flag to replace or update
their local state. When there is an update, servers MUST omit this flag
entirely and NOT send "initial":false as this is wasteful on bandwidth. The
absence of this flag means 'false'.
unstable_expanded_timeline: Flag which is set if we're returning more historic
events due to the timeline limit having increased. See "XXX: Odd behavior"
comment ing `synapse.handlers.sliding_sync`.
required_state: The current state of the room
timeline: Latest events in the room. The last event is the most recent.
bundled_aggregations: A mapping of event ID to the bundled aggregations for
the timeline events above. This allows clients to show accurate reaction
counts (or edits, threads), even if some of the reaction events were skipped
over in a gappy sync.
stripped_state: Stripped state events (for rooms where the usre is
invited/knocked). Same as `rooms.invite.$room_id.invite_state` in sync v2,
absent on joined/left rooms
prev_batch: A token that can be passed as a start parameter to the
`/rooms/<room_id>/messages` API to retrieve earlier messages.
limited: True if there are more events than `timeline_limit` looking
backwards from the `response.pos` to the `request.pos`.
num_live: The number of timeline events which have just occurred and are not historical.
The last N events are 'live' and should be treated as such. This is mostly
useful to determine whether a given @mention event should make a noise or not.
Clients cannot rely solely on the absence of `initial: true` to determine live
events because if a room not in the sliding window bumps into the window because
of an @mention it will have `initial: true` yet contain a single live event
(with potentially other old events in the timeline).
bump_stamp: The `stream_ordering` of the last event according to the
`bump_event_types`. This helps clients sort more readily without them
needing to pull in a bunch of the timeline to determine the last activity.
`bump_event_types` is a thing because for example, we don't want display
name changes to mark the room as unread and bump it to the top. For
encrypted rooms, we just have to consider any activity as a bump because we
can't see the content and the client has to figure it out for themselves.
joined_count: The number of users with membership of join, including the client's
own user ID. (same as sync `v2 m.joined_member_count`)
invited_count: The number of users with membership of invite. (same as sync v2
`m.invited_member_count`)
notification_count: The total number of unread notifications for this room. (same
as sync v2)
highlight_count: The number of unread notifications for this room with the highlight
flag set. (same as sync v2)
"""
@attr.s(slots=True, frozen=True, auto_attribs=True)
class StrippedHero:
user_id: str
display_name: Optional[str]
avatar_url: Optional[str]
name: Optional[str]
avatar: Optional[str]
heroes: Optional[List[StrippedHero]]
is_dm: bool
initial: bool
unstable_expanded_timeline: bool
# Should be empty for invite/knock rooms with `stripped_state`
required_state: List[EventBase]
# Should be empty for invite/knock rooms with `stripped_state`
timeline_events: List[EventBase]
bundled_aggregations: Optional[Dict[str, "BundledAggregations"]]
# Optional because it's only relevant to invite/knock rooms
stripped_state: List[JsonDict]
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
prev_batch: Optional[StreamToken]
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
limited: Optional[bool]
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
num_live: Optional[int]
bump_stamp: int
joined_count: int
invited_count: int
notification_count: int
highlight_count: int
def __bool__(self) -> bool:
return (
# If this is the first time the client is seeing the room, we should not filter it out
# under any circumstance.
self.initial
# We need to let the client know if there are any new events
or bool(self.required_state)
or bool(self.timeline_events)
or bool(self.stripped_state)
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class SlidingWindowList:
"""
Attributes:
count: The total number of entries in the list. Always present if this list
is.
ops: The sliding list operations to perform.
"""
@attr.s(slots=True, frozen=True, auto_attribs=True)
class Operation:
"""
Attributes:
op: The operation type to perform.
range: Which index positions are affected by this operation. These are
both inclusive.
room_ids: Which room IDs are affected by this operation. These IDs match
up to the positions in the `range`, so the last room ID in this list
matches the 9th index. The room data is held in a separate object.
"""
op: OperationType
range: Tuple[int, int]
room_ids: List[str]
count: int
ops: List[Operation]
@attr.s(slots=True, frozen=True, auto_attribs=True)
class Extensions:
"""Responses for extensions
Attributes:
to_device: The to-device extension (MSC3885)
e2ee: The E2EE device extension (MSC3884)
"""
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ToDeviceExtension:
"""The to-device extension (MSC3885)
Attributes:
next_batch: The to-device stream token the client should use
to get more results
events: A list of to-device messages for the client
"""
next_batch: str
events: Sequence[JsonMapping]
def __bool__(self) -> bool:
return bool(self.events)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class E2eeExtension:
"""The E2EE device extension (MSC3884)
Attributes:
device_list_updates: List of user_ids whose devices have changed or left (only
present on incremental syncs).
device_one_time_keys_count: Map from key algorithm to the number of
unclaimed one-time keys currently held on the server for this device. If
an algorithm is unlisted, the count for that algorithm is assumed to be
zero. If this entire parameter is missing, the count for all algorithms
is assumed to be zero.
device_unused_fallback_key_types: List of unused fallback key algorithms
for this device.
"""
# Only present on incremental syncs
device_list_updates: Optional[DeviceListUpdates]
device_one_time_keys_count: Mapping[str, int]
device_unused_fallback_key_types: Sequence[str]
def __bool__(self) -> bool:
# Note that "signed_curve25519" is always returned in key count responses
# regardless of whether we uploaded any keys for it. This is necessary until
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
#
# Also related:
# https://github.com/element-hq/element-android/issues/3725 and
# https://github.com/matrix-org/synapse/issues/10456
default_otk = self.device_one_time_keys_count.get("signed_curve25519")
more_than_default_otk = len(self.device_one_time_keys_count) > 1 or (
default_otk is not None and default_otk > 0
)
return bool(
more_than_default_otk
or self.device_list_updates
or self.device_unused_fallback_key_types
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class AccountDataExtension:
"""The Account Data extension (MSC3959)
Attributes:
global_account_data_map: Mapping from `type` to `content` of global account
data events.
account_data_by_room_map: Mapping from room_id to mapping of `type` to
`content` of room account data events.
"""
global_account_data_map: Mapping[str, JsonMapping]
account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]]
def __bool__(self) -> bool:
return bool(
self.global_account_data_map or self.account_data_by_room_map
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ReceiptsExtension:
"""The Receipts extension (MSC3960)
Attributes:
room_id_to_receipt_map: Mapping from room_id to `m.receipt` ephemeral
event (type, content)
"""
room_id_to_receipt_map: Mapping[str, JsonMapping]
def __bool__(self) -> bool:
return bool(self.room_id_to_receipt_map)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class TypingExtension:
"""The Typing Notification extension (MSC3961)
Attributes:
room_id_to_typing_map: Mapping from room_id to `m.typing` ephemeral
event (type, content)
"""
room_id_to_typing_map: Mapping[str, JsonMapping]
def __bool__(self) -> bool:
return bool(self.room_id_to_typing_map)
to_device: Optional[ToDeviceExtension] = None
e2ee: Optional[E2eeExtension] = None
account_data: Optional[AccountDataExtension] = None
receipts: Optional[ReceiptsExtension] = None
typing: Optional[TypingExtension] = None
def __bool__(self) -> bool:
return bool(
self.to_device
or self.e2ee
or self.account_data
or self.receipts
or self.typing
)
next_pos: SlidingSyncStreamToken
lists: Dict[str, SlidingWindowList]
rooms: Dict[str, RoomResult]
extensions: Extensions
def __bool__(self) -> bool:
"""Make the result appear empty if there are no updates. This is used
to tell if the notifier needs to wait for more events when polling for
events.
"""
# We don't include `self.lists` here, as a) `lists` is always non-empty even if
# there are no changes, and b) since we're sorting rooms by `stream_ordering` of
# the latest activity, anything that would cause the order to change would end
# up in `self.rooms` and cause us to send down the change.
return bool(self.rooms or self.extensions)
@staticmethod
def empty(next_pos: SlidingSyncStreamToken) -> "SlidingSyncResult":
"Return a new empty result"
return SlidingSyncResult(
next_pos=next_pos,
lists={},
rooms={},
extensions=SlidingSyncResult.Extensions(),
)

View File

@@ -18,30 +18,382 @@ from collections import ChainMap
from enum import Enum
from typing import (
TYPE_CHECKING,
AbstractSet,
Callable,
Dict,
Final,
Generic,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
cast,
)
import attr
from synapse._pydantic_compat import HAS_PYDANTIC_V2
from synapse.api.constants import EventTypes
from synapse.types import MultiWriterStreamToken, RoomStreamToken, StrCollection, UserID
from synapse.types.handlers import SlidingSyncConfig
if TYPE_CHECKING or HAS_PYDANTIC_V2:
from pydantic.v1 import Extra
else:
from pydantic import Extra
from synapse.events import EventBase
from synapse.types import (
DeviceListUpdates,
JsonDict,
JsonMapping,
Requester,
SlidingSyncStreamToken,
StreamToken,
)
from synapse.types.rest.client import SlidingSyncBody
if TYPE_CHECKING:
pass
from synapse.handlers.relations import BundledAggregations
logger = logging.getLogger(__name__)
class SlidingSyncConfig(SlidingSyncBody):
"""
Inherit from `SlidingSyncBody` since we need all of the same fields and add a few
extra fields that we need in the handler
"""
user: UserID
requester: Requester
# Pydantic config
class Config:
# By default, ignore fields that we don't recognise.
extra = Extra.ignore
# By default, don't allow fields to be reassigned after parsing.
allow_mutation = False
# Allow custom types like `UserID` to be used in the model
arbitrary_types_allowed = True
class OperationType(Enum):
"""
Represents the operation types in a Sliding Sync window.
Attributes:
SYNC: Sets a range of entries. Clients SHOULD discard what they previous knew about
entries in this range.
INSERT: Sets a single entry. If the position is not empty then clients MUST move
entries to the left or the right depending on where the closest empty space is.
DELETE: Remove a single entry. Often comes before an INSERT to allow entries to move
places.
INVALIDATE: Remove a range of entries. Clients MAY persist the invalidated range for
offline support, but they should be treated as empty when additional operations
which concern indexes in the range arrive from the server.
"""
SYNC: Final = "SYNC"
INSERT: Final = "INSERT"
DELETE: Final = "DELETE"
INVALIDATE: Final = "INVALIDATE"
@attr.s(slots=True, frozen=True, auto_attribs=True)
class SlidingSyncResult:
"""
The Sliding Sync result to be serialized to JSON for a response.
Attributes:
next_pos: The next position token in the sliding window to request (next_batch).
lists: Sliding window API. A map of list key to list results.
rooms: Room subscription API. A map of room ID to room results.
extensions: Extensions API. A map of extension key to extension results.
"""
@attr.s(slots=True, frozen=True, auto_attribs=True)
class RoomResult:
"""
Attributes:
name: Room name or calculated room name.
avatar: Room avatar
heroes: List of stripped membership events (containing `user_id` and optionally
`avatar_url` and `displayname`) for the users used to calculate the room name.
is_dm: Flag to specify whether the room is a direct-message room (most likely
between two people).
initial: Flag which is set when this is the first time the server is sending this
data on this connection. Clients can use this flag to replace or update
their local state. When there is an update, servers MUST omit this flag
entirely and NOT send "initial":false as this is wasteful on bandwidth. The
absence of this flag means 'false'.
unstable_expanded_timeline: Flag which is set if we're returning more historic
events due to the timeline limit having increased. See "XXX: Odd behavior"
comment ing `synapse.handlers.sliding_sync`.
required_state: The current state of the room
timeline: Latest events in the room. The last event is the most recent.
bundled_aggregations: A mapping of event ID to the bundled aggregations for
the timeline events above. This allows clients to show accurate reaction
counts (or edits, threads), even if some of the reaction events were skipped
over in a gappy sync.
stripped_state: Stripped state events (for rooms where the usre is
invited/knocked). Same as `rooms.invite.$room_id.invite_state` in sync v2,
absent on joined/left rooms
prev_batch: A token that can be passed as a start parameter to the
`/rooms/<room_id>/messages` API to retrieve earlier messages.
limited: True if there are more events than `timeline_limit` looking
backwards from the `response.pos` to the `request.pos`.
num_live: The number of timeline events which have just occurred and are not historical.
The last N events are 'live' and should be treated as such. This is mostly
useful to determine whether a given @mention event should make a noise or not.
Clients cannot rely solely on the absence of `initial: true` to determine live
events because if a room not in the sliding window bumps into the window because
of an @mention it will have `initial: true` yet contain a single live event
(with potentially other old events in the timeline).
bump_stamp: The `stream_ordering` of the last event according to the
`bump_event_types`. This helps clients sort more readily without them
needing to pull in a bunch of the timeline to determine the last activity.
`bump_event_types` is a thing because for example, we don't want display
name changes to mark the room as unread and bump it to the top. For
encrypted rooms, we just have to consider any activity as a bump because we
can't see the content and the client has to figure it out for themselves.
joined_count: The number of users with membership of join, including the client's
own user ID. (same as sync `v2 m.joined_member_count`)
invited_count: The number of users with membership of invite. (same as sync v2
`m.invited_member_count`)
notification_count: The total number of unread notifications for this room. (same
as sync v2)
highlight_count: The number of unread notifications for this room with the highlight
flag set. (same as sync v2)
"""
@attr.s(slots=True, frozen=True, auto_attribs=True)
class StrippedHero:
user_id: str
display_name: Optional[str]
avatar_url: Optional[str]
name: Optional[str]
avatar: Optional[str]
heroes: Optional[List[StrippedHero]]
is_dm: bool
initial: bool
unstable_expanded_timeline: bool
# Should be empty for invite/knock rooms with `stripped_state`
required_state: List[EventBase]
# Should be empty for invite/knock rooms with `stripped_state`
timeline_events: List[EventBase]
bundled_aggregations: Optional[Dict[str, "BundledAggregations"]]
# Optional because it's only relevant to invite/knock rooms
stripped_state: List[JsonDict]
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
prev_batch: Optional[StreamToken]
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
limited: Optional[bool]
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
num_live: Optional[int]
bump_stamp: int
joined_count: int
invited_count: int
notification_count: int
highlight_count: int
def __bool__(self) -> bool:
return (
# If this is the first time the client is seeing the room, we should not filter it out
# under any circumstance.
self.initial
# We need to let the client know if there are any new events
or bool(self.required_state)
or bool(self.timeline_events)
or bool(self.stripped_state)
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class SlidingWindowList:
"""
Attributes:
count: The total number of entries in the list. Always present if this list
is.
ops: The sliding list operations to perform.
"""
@attr.s(slots=True, frozen=True, auto_attribs=True)
class Operation:
"""
Attributes:
op: The operation type to perform.
range: Which index positions are affected by this operation. These are
both inclusive.
room_ids: Which room IDs are affected by this operation. These IDs match
up to the positions in the `range`, so the last room ID in this list
matches the 9th index. The room data is held in a separate object.
"""
op: OperationType
range: Tuple[int, int]
room_ids: List[str]
count: int
ops: List[Operation]
@attr.s(slots=True, frozen=True, auto_attribs=True)
class Extensions:
"""Responses for extensions
Attributes:
to_device: The to-device extension (MSC3885)
e2ee: The E2EE device extension (MSC3884)
"""
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ToDeviceExtension:
"""The to-device extension (MSC3885)
Attributes:
next_batch: The to-device stream token the client should use
to get more results
events: A list of to-device messages for the client
"""
next_batch: str
events: Sequence[JsonMapping]
def __bool__(self) -> bool:
return bool(self.events)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class E2eeExtension:
"""The E2EE device extension (MSC3884)
Attributes:
device_list_updates: List of user_ids whose devices have changed or left (only
present on incremental syncs).
device_one_time_keys_count: Map from key algorithm to the number of
unclaimed one-time keys currently held on the server for this device. If
an algorithm is unlisted, the count for that algorithm is assumed to be
zero. If this entire parameter is missing, the count for all algorithms
is assumed to be zero.
device_unused_fallback_key_types: List of unused fallback key algorithms
for this device.
"""
# Only present on incremental syncs
device_list_updates: Optional[DeviceListUpdates]
device_one_time_keys_count: Mapping[str, int]
device_unused_fallback_key_types: Sequence[str]
def __bool__(self) -> bool:
# Note that "signed_curve25519" is always returned in key count responses
# regardless of whether we uploaded any keys for it. This is necessary until
# https://github.com/matrix-org/matrix-doc/issues/3298 is fixed.
#
# Also related:
# https://github.com/element-hq/element-android/issues/3725 and
# https://github.com/matrix-org/synapse/issues/10456
default_otk = self.device_one_time_keys_count.get("signed_curve25519")
more_than_default_otk = len(self.device_one_time_keys_count) > 1 or (
default_otk is not None and default_otk > 0
)
return bool(
more_than_default_otk
or self.device_list_updates
or self.device_unused_fallback_key_types
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class AccountDataExtension:
"""The Account Data extension (MSC3959)
Attributes:
global_account_data_map: Mapping from `type` to `content` of global account
data events.
account_data_by_room_map: Mapping from room_id to mapping of `type` to
`content` of room account data events.
"""
global_account_data_map: Mapping[str, JsonMapping]
account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]]
def __bool__(self) -> bool:
return bool(
self.global_account_data_map or self.account_data_by_room_map
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ReceiptsExtension:
"""The Receipts extension (MSC3960)
Attributes:
room_id_to_receipt_map: Mapping from room_id to `m.receipt` ephemeral
event (type, content)
"""
room_id_to_receipt_map: Mapping[str, JsonMapping]
def __bool__(self) -> bool:
return bool(self.room_id_to_receipt_map)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class TypingExtension:
"""The Typing Notification extension (MSC3961)
Attributes:
room_id_to_typing_map: Mapping from room_id to `m.typing` ephemeral
event (type, content)
"""
room_id_to_typing_map: Mapping[str, JsonMapping]
def __bool__(self) -> bool:
return bool(self.room_id_to_typing_map)
to_device: Optional[ToDeviceExtension] = None
e2ee: Optional[E2eeExtension] = None
account_data: Optional[AccountDataExtension] = None
receipts: Optional[ReceiptsExtension] = None
typing: Optional[TypingExtension] = None
def __bool__(self) -> bool:
return bool(
self.to_device
or self.e2ee
or self.account_data
or self.receipts
or self.typing
)
next_pos: SlidingSyncStreamToken
lists: Dict[str, SlidingWindowList]
rooms: Dict[str, RoomResult]
extensions: Extensions
def __bool__(self) -> bool:
"""Make the result appear empty if there are no updates. This is used
to tell if the notifier needs to wait for more events when polling for
events.
"""
# We don't include `self.lists` here, as a) `lists` is always non-empty even if
# there are no changes, and b) since we're sorting rooms by `stream_ordering` of
# the latest activity, anything that would cause the order to change would end
# up in `self.rooms` and cause us to send down the change.
return bool(self.rooms or self.extensions)
@staticmethod
def empty(next_pos: SlidingSyncStreamToken) -> "SlidingSyncResult":
"Return a new empty result"
return SlidingSyncResult(
next_pos=next_pos,
lists={},
rooms={},
extensions=SlidingSyncResult.Extensions(),
)
class StateValues:
"""
Understood values of the (type, state_key) tuple in `required_state`.
@@ -60,7 +412,7 @@ class StateValues:
# We can't freeze this class because we want to update it in place with the
# de-duplicated data.
@attr.s(slots=True, auto_attribs=True)
@attr.s(slots=True, auto_attribs=True, frozen=True)
class RoomSyncConfig:
"""
Holds the config for what data we should fetch for a room in the sync response.
@@ -74,7 +426,7 @@ class RoomSyncConfig:
"""
timeline_limit: int
required_state_map: Dict[str, Set[str]]
required_state_map: Mapping[str, AbstractSet[str]]
@classmethod
def from_room_config(
@@ -148,7 +500,7 @@ class RoomSyncConfig:
def deep_copy(self) -> "RoomSyncConfig":
required_state_map: Dict[str, Set[str]] = {
state_type: state_key_set.copy()
state_type: set(state_key_set)
for state_type, state_key_set in self.required_state_map.items()
}
@@ -159,14 +511,20 @@ class RoomSyncConfig:
def combine_room_sync_config(
self, other_room_sync_config: "RoomSyncConfig"
) -> None:
) -> "RoomSyncConfig":
"""
Combine this `RoomSyncConfig` with another `RoomSyncConfig` and take the
Combine this `RoomSyncConfig` with another `RoomSyncConfig` and return the
superset union of the two.
"""
timeline_limit = self.timeline_limit
required_state_map = {
event_type: set(state_keys)
for event_type, state_keys in self.required_state_map.items()
}
# Take the highest timeline limit
if self.timeline_limit < other_room_sync_config.timeline_limit:
self.timeline_limit = other_room_sync_config.timeline_limit
timeline_limit = other_room_sync_config.timeline_limit
# Union the required state
for (
@@ -175,14 +533,14 @@ class RoomSyncConfig:
) in other_room_sync_config.required_state_map.items():
# If we already have a wildcard for everything, we don't need to add
# anything else
if StateValues.WILDCARD in self.required_state_map.get(
if StateValues.WILDCARD in required_state_map.get(
StateValues.WILDCARD, set()
):
break
# If we already have a wildcard `state_key` for this `state_type`, we don't need
# to add anything else
if StateValues.WILDCARD in self.required_state_map.get(state_type, set()):
if StateValues.WILDCARD in required_state_map.get(state_type, set()):
continue
# If we're getting wildcards for the `state_type` and `state_key`, that's
@@ -191,16 +549,14 @@ class RoomSyncConfig:
state_type == StateValues.WILDCARD
and StateValues.WILDCARD in state_key_set
):
self.required_state_map = {state_type: {StateValues.WILDCARD}}
required_state_map = {state_type: {StateValues.WILDCARD}}
# We can break, since we don't need to add anything else
break
for state_key in state_key_set:
# If we already have a wildcard for this specific `state_key`, we don't need
# to add it since the wildcard already covers it.
if state_key in self.required_state_map.get(
StateValues.WILDCARD, set()
):
if state_key in required_state_map.get(StateValues.WILDCARD, set()):
continue
# If we're getting a wildcard for the `state_type`, get rid of any other
@@ -211,7 +567,7 @@ class RoomSyncConfig:
# Make a copy so we don't run into an error: `dictionary changed size
# during iteration`, when we remove items
for existing_state_type, existing_state_key_set in list(
self.required_state_map.items()
required_state_map.items()
):
# Make a copy so we don't run into an error: `Set changed size during
# iteration`, when we filter out and remove items
@@ -221,19 +577,21 @@ class RoomSyncConfig:
# If we've the left the `set()` empty, remove it from the map
if existing_state_key_set == set():
self.required_state_map.pop(existing_state_type, None)
required_state_map.pop(existing_state_type, None)
# If we're getting a wildcard `state_key`, get rid of any other state_keys
# for this `state_type` since the wildcard will cover it already.
if state_key == StateValues.WILDCARD:
self.required_state_map[state_type] = {state_key}
required_state_map[state_type] = {state_key}
break
# Otherwise, just add it to the set
else:
if self.required_state_map.get(state_type) is None:
self.required_state_map[state_type] = {state_key}
if required_state_map.get(state_type) is None:
required_state_map[state_type] = {state_key}
else:
self.required_state_map[state_type].add(state_key)
required_state_map[state_type].add(state_key)
return RoomSyncConfig(timeline_limit, required_state_map)
def must_await_full_state(
self,
@@ -324,7 +682,7 @@ class HaveSentRoomFlag(Enum):
LIVE = "live"
T = TypeVar("T")
T = TypeVar("T", str, RoomStreamToken, MultiWriterStreamToken)
@attr.s(auto_attribs=True, slots=True, frozen=True)
@@ -383,6 +741,9 @@ class RoomStatusMap(Generic[T]):
return RoomStatusMap(statuses=dict(self._statuses))
def __len__(self) -> int:
return len(self._statuses)
class MutableRoomStatusMap(RoomStatusMap[T]):
"""A mutable version of `RoomStatusMap`"""
@@ -439,7 +800,7 @@ class MutableRoomStatusMap(RoomStatusMap[T]):
self._statuses[room_id] = HaveSentRoom.previously(from_token)
@attr.s(auto_attribs=True)
@attr.s(auto_attribs=True, frozen=True)
class PerConnectionState:
"""The per-connection state. A snapshot of what we've sent down the
connection before.
@@ -484,6 +845,9 @@ class PerConnectionState:
room_configs=dict(self.room_configs),
)
def __len__(self) -> int:
return len(self.rooms) + len(self.receipts) + len(self.room_configs)
@attr.s(auto_attribs=True)
class MutablePerConnectionState(PerConnectionState):

View File

@@ -18,7 +18,6 @@
#
#
import logging
from copy import deepcopy
from typing import Dict, List, Optional
from unittest.mock import patch
@@ -47,7 +46,7 @@ from synapse.rest.client import knock, login, room
from synapse.server import HomeServer
from synapse.storage.util.id_generators import MultiWriterIdGenerator
from synapse.types import JsonDict, StreamToken, UserID
from synapse.types.handlers import SlidingSyncConfig
from synapse.types.handlers.sliding_sync import SlidingSyncConfig
from synapse.util import Clock
from tests.replication._base import BaseMultiWorkerStreamTestCase
@@ -566,23 +565,11 @@ class RoomSyncConfigTestCase(TestCase):
"""
Combine A into B and B into A to make sure we get the same result.
"""
# Since we're mutating these in place, make a copy for each of our trials
room_sync_config_a = deepcopy(a)
room_sync_config_b = deepcopy(b)
combined_config = a.combine_room_sync_config(b)
self._assert_room_config_equal(combined_config, expected, "B into A")
# Combine B into A
room_sync_config_a.combine_room_sync_config(room_sync_config_b)
self._assert_room_config_equal(room_sync_config_a, expected, "B into A")
# Since we're mutating these in place, make a copy for each of our trials
room_sync_config_a = deepcopy(a)
room_sync_config_b = deepcopy(b)
# Combine A into B
room_sync_config_b.combine_room_sync_config(room_sync_config_a)
self._assert_room_config_equal(room_sync_config_b, expected, "A into B")
combined_config = a.combine_room_sync_config(b)
self._assert_room_config_equal(combined_config, expected, "A into B")
class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase):

View File

@@ -191,8 +191,14 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
}
_, from_token = self.do_sync(sync_body, tok=user1_tok)
# Reset the in-memory cache
self.hs.get_sliding_sync_handler().connection_store._connections.clear()
# Reset the positions
self.get_success(
self.store.db_pool.simple_delete(
table="sliding_sync_connections",
keyvalues={"user_id": user1_id},
desc="clear_sliding_sync_connections_cache",
)
)
# Make the Sliding Sync request
channel = self.make_request(