mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-09 01:30:18 +00:00
Compare commits
9 Commits
anoa/valid
...
v1.138.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
527e831b61 | ||
|
|
7069636c2d | ||
|
|
dde1e012a4 | ||
|
|
533d5e0a7a | ||
|
|
6c292dc4ee | ||
|
|
120389b077 | ||
|
|
71b34b3a07 | ||
|
|
0fbf296c99 | ||
|
|
0c8594c9a8 |
31
CHANGES.md
31
CHANGES.md
@@ -1,3 +1,34 @@
|
||||
# Synapse 1.138.3 (2025-10-07)
|
||||
|
||||
## Security Fixes
|
||||
|
||||
- Fix [CVE-2025-61672](https://www.cve.org/CVERecord?id=CVE-2025-61672) / [GHSA-fh66-fcv5-jjfr](https://github.com/element-hq/synapse/security/advisories/GHSA-fh66-fcv5-jjfr). Lack of validation for device keys in Synapse before 1.139.1 allows an attacker registered on the victim homeserver to degrade federation functionality, unpredictably breaking outbound federation to other homeservers. ([\#17097](https://github.com/element-hq/synapse/issues/17097))
|
||||
|
||||
## Deprecations and Removals
|
||||
|
||||
- Drop support for unstable field names from the long-accepted [MSC2732](https://github.com/matrix-org/matrix-spec-proposals/pull/2732) (Olm fallback keys) proposal. This change allows unit tests to pass following the security patch above. ([\#18996](https://github.com/element-hq/synapse/issues/18996))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.138.2 (2025-09-24)
|
||||
|
||||
## Internal Changes
|
||||
|
||||
- Drop support for Ubuntu 24.10 Oracular Oriole, and add support for Ubuntu 25.04 Plucky Puffin. ([\#18962](https://github.com/element-hq/synapse/issues/18962))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.138.1 (2025-09-24)
|
||||
|
||||
## Bugfixes
|
||||
|
||||
- Fix a performance regression related to the experimental Delayed Events ([MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140)) feature. ([\#18926](https://github.com/element-hq/synapse/issues/18926))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.138.0 (2025-09-09)
|
||||
|
||||
No significant changes since 1.138.0rc1.
|
||||
|
||||
18
debian/changelog
vendored
18
debian/changelog
vendored
@@ -1,3 +1,21 @@
|
||||
matrix-synapse-py3 (1.138.3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.138.3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Oct 2025 12:54:18 +0100
|
||||
|
||||
matrix-synapse-py3 (1.138.2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.138.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 24 Sep 2025 12:26:16 +0100
|
||||
|
||||
matrix-synapse-py3 (1.138.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.138.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 24 Sep 2025 11:32:38 +0100
|
||||
|
||||
matrix-synapse-py3 (1.138.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.138.0.
|
||||
|
||||
@@ -117,6 +117,16 @@ each upgrade are complete before moving on to the next upgrade, to avoid
|
||||
stacking them up. You can monitor the currently running background updates with
|
||||
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||
|
||||
# Upgrading to v1.138.1
|
||||
|
||||
## Drop support for Ubuntu 24.10 Oracular Oriole, and add support for Ubuntu 25.04 Plucky Puffin
|
||||
|
||||
Ubuntu 24.10 Oracular Oriole [has been end-of-life since 10 Jul
|
||||
2025](https://endoflife.date/ubuntu). This release drops support for Ubuntu
|
||||
24.10, and in its place adds support for Ubuntu 25.04 Plucky Puffin.
|
||||
|
||||
This notice also applies to the v1.139.0 release.
|
||||
|
||||
# Upgrading to v1.136.0
|
||||
|
||||
## Deprecate `run_as_background_process` exported as part of the module API interface in favor of `ModuleApi.run_as_background_process`
|
||||
|
||||
@@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.138.0"
|
||||
version = "1.138.3"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
||||
@@ -32,7 +32,7 @@ DISTS = (
|
||||
"debian:sid", # (rolling distro, no EOL)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:noble", # 24.04 LTS (EOL 2029-06)
|
||||
"ubuntu:oracular", # 24.10 (EOL 2025-07)
|
||||
"ubuntu:plucky", # 25.04 (EOL 2026-01)
|
||||
"debian:trixie", # (EOL not specified yet)
|
||||
)
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ from typing import TYPE_CHECKING, List, Optional, Set, Tuple
|
||||
from twisted.internet.interfaces import IDelayedCall
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import ShadowBanError
|
||||
from synapse.api.errors import ShadowBanError, SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME
|
||||
from synapse.logging.opentracing import set_tag
|
||||
@@ -45,6 +45,7 @@ from synapse.types import (
|
||||
)
|
||||
from synapse.util.events import generate_fake_event_id
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.sentinel import Sentinel
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -146,10 +147,37 @@ class DelayedEventsHandler:
|
||||
)
|
||||
|
||||
async def _unsafe_process_new_event(self) -> None:
|
||||
# We purposefully fetch the current max room stream ordering before
|
||||
# doing anything else, as it could increment duing processing of state
|
||||
# deltas. We want to avoid updating `delayed_events_stream_pos` past
|
||||
# the stream ordering of the state deltas we've processed. Otherwise
|
||||
# we'll leave gaps in our processing.
|
||||
room_max_stream_ordering = self._store.get_room_max_stream_ordering()
|
||||
|
||||
# Check that there are actually any delayed events to process. If not, bail early.
|
||||
delayed_events_count = await self._store.get_count_of_delayed_events()
|
||||
if delayed_events_count == 0:
|
||||
# There are no delayed events to process. Update the
|
||||
# `delayed_events_stream_pos` to the latest `events` stream pos and
|
||||
# exit early.
|
||||
self._event_pos = room_max_stream_ordering
|
||||
|
||||
logger.debug(
|
||||
"No delayed events to process. Updating `delayed_events_stream_pos` to max stream ordering (%s)",
|
||||
room_max_stream_ordering,
|
||||
)
|
||||
|
||||
await self._store.update_delayed_events_stream_pos(room_max_stream_ordering)
|
||||
|
||||
event_processing_positions.labels(
|
||||
name="delayed_events", **{SERVER_NAME_LABEL: self.server_name}
|
||||
).set(room_max_stream_ordering)
|
||||
|
||||
return
|
||||
|
||||
# If self._event_pos is None then means we haven't fetched it from the DB yet
|
||||
if self._event_pos is None:
|
||||
self._event_pos = await self._store.get_delayed_events_stream_pos()
|
||||
room_max_stream_ordering = self._store.get_room_max_stream_ordering()
|
||||
if self._event_pos > room_max_stream_ordering:
|
||||
# apparently, we've processed more events than exist in the database!
|
||||
# this can happen if events are removed with history purge or similar.
|
||||
@@ -167,7 +195,7 @@ class DelayedEventsHandler:
|
||||
self._clock, name="delayed_events_delta", server_name=self.server_name
|
||||
):
|
||||
room_max_stream_ordering = self._store.get_room_max_stream_ordering()
|
||||
if self._event_pos == room_max_stream_ordering:
|
||||
if self._event_pos >= room_max_stream_ordering:
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
@@ -202,23 +230,81 @@ class DelayedEventsHandler:
|
||||
Process current state deltas to cancel other users' pending delayed events
|
||||
that target the same state.
|
||||
"""
|
||||
# Get the senders of each delta's state event (as sender information is
|
||||
# not currently stored in the `current_state_deltas` table).
|
||||
event_id_and_sender_dict = await self._store.get_senders_for_event_ids(
|
||||
[delta.event_id for delta in deltas if delta.event_id is not None]
|
||||
)
|
||||
|
||||
# Note: No need to batch as `get_current_state_deltas` will only ever
|
||||
# return 100 rows at a time.
|
||||
for delta in deltas:
|
||||
logger.debug(
|
||||
"Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id
|
||||
)
|
||||
|
||||
# `delta.event_id` and `delta.sender` can be `None` in a few valid
|
||||
# cases (see the docstring of
|
||||
# `get_current_state_delta_membership_changes_for_user` for details).
|
||||
if delta.event_id is None:
|
||||
logger.debug(
|
||||
"Not handling delta for deleted state: %r %r",
|
||||
# TODO: Differentiate between this being caused by a state reset
|
||||
# which removed a user from a room, or the homeserver
|
||||
# purposefully having left the room. We can do so by checking
|
||||
# whether there are any local memberships still left in the
|
||||
# room. If so, then this is the result of a state reset.
|
||||
#
|
||||
# If it is a state reset, we should avoid cancelling new,
|
||||
# delayed state events due to old state resurfacing. So we
|
||||
# should skip and log a warning in this case.
|
||||
#
|
||||
# If the homeserver has left the room, then we should cancel all
|
||||
# delayed state events intended for this room, as there is no
|
||||
# need to try and send a delayed event into a room we've left.
|
||||
logger.warning(
|
||||
"Skipping state delta (%r, %r) without corresponding event ID. "
|
||||
"This can happen if the homeserver has left the room (in which "
|
||||
"case this can be ignored), or if there has been a state reset "
|
||||
"which has caused the sender to be kicked out of the room",
|
||||
delta.event_type,
|
||||
delta.state_key,
|
||||
)
|
||||
continue
|
||||
|
||||
logger.debug(
|
||||
"Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id
|
||||
sender_str = event_id_and_sender_dict.get(
|
||||
delta.event_id, Sentinel.UNSET_SENTINEL
|
||||
)
|
||||
|
||||
event = await self._store.get_event(delta.event_id, allow_none=True)
|
||||
if not event:
|
||||
if sender_str is None:
|
||||
# An event exists, but the `sender` field was "null" and Synapse
|
||||
# incorrectly accepted the event. This is not expected.
|
||||
logger.error(
|
||||
"Skipping state delta with event ID '%s' as 'sender' was None. "
|
||||
"This is unexpected - please report it as a bug!",
|
||||
delta.event_id,
|
||||
)
|
||||
continue
|
||||
if sender_str is Sentinel.UNSET_SENTINEL:
|
||||
# We have an event ID, but the event was not found in the
|
||||
# datastore. This can happen if a room, or its history, is
|
||||
# purged. State deltas related to the room are left behind, but
|
||||
# the event no longer exists.
|
||||
#
|
||||
# As we cannot get the sender of this event, we can't calculate
|
||||
# whether to cancel delayed events related to this one. So we skip.
|
||||
logger.debug(
|
||||
"Skipping state delta with event ID '%s' - the room, or its history, may have been purged",
|
||||
delta.event_id,
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
sender = UserID.from_string(sender_str)
|
||||
except SynapseError as e:
|
||||
logger.error(
|
||||
"Skipping state delta with Matrix User ID '%s' that failed to parse: %s",
|
||||
sender_str,
|
||||
e,
|
||||
)
|
||||
continue
|
||||
sender = UserID.from_string(event.sender)
|
||||
|
||||
next_send_ts = await self._store.cancel_delayed_state_events(
|
||||
room_id=delta.room_id,
|
||||
|
||||
@@ -57,7 +57,6 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
ONE_TIME_KEY_UPLOAD = "one_time_key_upload_lock"
|
||||
|
||||
|
||||
@@ -848,14 +847,22 @@ class E2eKeysHandler:
|
||||
"""
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
device_keys = keys.get("device_keys", None)
|
||||
if device_keys:
|
||||
log_kv(
|
||||
{
|
||||
"message": "Updating device_keys for user.",
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
}
|
||||
)
|
||||
await self.upload_device_keys_for_user(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
keys={"device_keys": device_keys},
|
||||
)
|
||||
else:
|
||||
log_kv({"message": "Did not update device_keys", "reason": "not a dict"})
|
||||
|
||||
one_time_keys = keys.get("one_time_keys", None)
|
||||
if one_time_keys:
|
||||
@@ -873,10 +880,9 @@ class E2eKeysHandler:
|
||||
log_kv(
|
||||
{"message": "Did not update one_time_keys", "reason": "no keys given"}
|
||||
)
|
||||
fallback_keys = keys.get("fallback_keys") or keys.get(
|
||||
"org.matrix.msc2732.fallback_keys"
|
||||
)
|
||||
if fallback_keys and isinstance(fallback_keys, dict):
|
||||
|
||||
fallback_keys = keys.get("fallback_keys")
|
||||
if fallback_keys:
|
||||
log_kv(
|
||||
{
|
||||
"message": "Updating fallback_keys for device.",
|
||||
@@ -885,8 +891,6 @@ class E2eKeysHandler:
|
||||
}
|
||||
)
|
||||
await self.store.set_e2e_fallback_keys(user_id, device_id, fallback_keys)
|
||||
elif fallback_keys:
|
||||
log_kv({"message": "Did not update fallback_keys", "reason": "not a dict"})
|
||||
else:
|
||||
log_kv(
|
||||
{"message": "Did not update fallback_keys", "reason": "no keys given"}
|
||||
|
||||
@@ -1540,7 +1540,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
self.clock, name="presence_delta", server_name=self.server_name
|
||||
):
|
||||
room_max_stream_ordering = self.store.get_room_max_stream_ordering()
|
||||
if self._event_pos == room_max_stream_ordering:
|
||||
if self._event_pos >= room_max_stream_ordering:
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#
|
||||
|
||||
|
||||
import enum
|
||||
import logging
|
||||
from itertools import chain
|
||||
from typing import (
|
||||
@@ -75,6 +74,7 @@ from synapse.types.handlers.sliding_sync import (
|
||||
)
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util import MutableOverlayMapping
|
||||
from synapse.util.sentinel import Sentinel
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -83,12 +83,6 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Sentinel(enum.Enum):
|
||||
# defining a sentinel in this way allows mypy to correctly handle the
|
||||
# type of a dictionary lookup and subsequent type narrowing.
|
||||
UNSET_SENTINEL = object()
|
||||
|
||||
|
||||
# Helper definition for the types that we might return. We do this to avoid
|
||||
# copying data between types (which can be expensive for many rooms).
|
||||
RoomsForUserType = Union[RoomsForUserStateReset, RoomsForUser, RoomsForUserSlidingSync]
|
||||
|
||||
@@ -23,10 +23,19 @@
|
||||
import logging
|
||||
import re
|
||||
from collections import Counter
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
from synapse._pydantic_compat import (
|
||||
StrictBool,
|
||||
StrictStr,
|
||||
validator,
|
||||
)
|
||||
from synapse.api.auth.mas import MasDelegatedAuth
|
||||
from synapse.api.errors import (
|
||||
Codes,
|
||||
InteractiveAuthIncompleteError,
|
||||
InvalidAPICallError,
|
||||
SynapseError,
|
||||
@@ -37,11 +46,13 @@ from synapse.http.servlet import (
|
||||
parse_integer,
|
||||
parse_json_object_from_request,
|
||||
parse_string,
|
||||
validate_json_object,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import log_kv, set_tag
|
||||
from synapse.rest.client._base import client_patterns, interactive_auth_handler
|
||||
from synapse.types import JsonDict, StreamToken
|
||||
from synapse.types.rest import RequestBodyModel
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -59,7 +70,6 @@ class KeyUploadServlet(RestServlet):
|
||||
"device_keys": {
|
||||
"user_id": "<user_id>",
|
||||
"device_id": "<device_id>",
|
||||
"valid_until_ts": <millisecond_timestamp>,
|
||||
"algorithms": [
|
||||
"m.olm.curve25519-aes-sha2",
|
||||
]
|
||||
@@ -111,12 +121,123 @@ class KeyUploadServlet(RestServlet):
|
||||
self._clock = hs.get_clock()
|
||||
self._store = hs.get_datastores().main
|
||||
|
||||
class KeyUploadRequestBody(RequestBodyModel):
|
||||
"""
|
||||
The body of a `POST /_matrix/client/v3/keys/upload` request.
|
||||
|
||||
Based on https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload.
|
||||
"""
|
||||
|
||||
class DeviceKeys(RequestBodyModel):
|
||||
algorithms: List[StrictStr]
|
||||
"""The encryption algorithms supported by this device."""
|
||||
|
||||
device_id: StrictStr
|
||||
"""The ID of the device these keys belong to. Must match the device ID used when logging in."""
|
||||
|
||||
keys: Mapping[StrictStr, StrictStr]
|
||||
"""
|
||||
Public identity keys. The names of the properties should be in the
|
||||
format `<algorithm>:<device_id>`. The keys themselves should be encoded as
|
||||
specified by the key algorithm.
|
||||
"""
|
||||
|
||||
signatures: Mapping[StrictStr, Mapping[StrictStr, StrictStr]]
|
||||
"""Signatures for the device key object. A map from user ID, to a map from "<algorithm>:<device_id>" to the signature."""
|
||||
|
||||
user_id: StrictStr
|
||||
"""The ID of the user the device belongs to. Must match the user ID used when logging in."""
|
||||
|
||||
class KeyObject(RequestBodyModel):
|
||||
key: StrictStr
|
||||
"""The key, encoded using unpadded base64."""
|
||||
|
||||
fallback: Optional[StrictBool] = False
|
||||
"""Whether this is a fallback key. Only used when handling fallback keys."""
|
||||
|
||||
signatures: Mapping[StrictStr, Mapping[StrictStr, StrictStr]]
|
||||
"""Signature for the device. Mapped from user ID to another map of key signing identifier to the signature itself.
|
||||
|
||||
See the following for more detail: https://spec.matrix.org/v1.16/appendices/#signing-details
|
||||
"""
|
||||
|
||||
device_keys: Optional[DeviceKeys] = None
|
||||
"""Identity keys for the device. May be absent if no new identity keys are required."""
|
||||
|
||||
fallback_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]]
|
||||
"""
|
||||
The public key which should be used if the device's one-time keys are
|
||||
exhausted. The fallback key is not deleted once used, but should be
|
||||
replaced when additional one-time keys are being uploaded. The server
|
||||
will notify the client of the fallback key being used through `/sync`.
|
||||
|
||||
There can only be at most one key per algorithm uploaded, and the server
|
||||
will only persist one key per algorithm.
|
||||
|
||||
When uploading a signed key, an additional fallback: true key should be
|
||||
included to denote that the key is a fallback key.
|
||||
|
||||
May be absent if a new fallback key is not required.
|
||||
"""
|
||||
|
||||
@validator("fallback_keys", pre=True)
|
||||
def validate_fallback_keys(cls: Self, v: Any) -> Any:
|
||||
if v is None:
|
||||
return v
|
||||
if not isinstance(v, dict):
|
||||
raise TypeError("fallback_keys must be a mapping")
|
||||
|
||||
for k in v.keys():
|
||||
if not len(k.split(":")) == 2:
|
||||
raise SynapseError(
|
||||
code=HTTPStatus.BAD_REQUEST,
|
||||
errcode=Codes.BAD_JSON,
|
||||
msg=f"Invalid fallback_keys key {k!r}. "
|
||||
'Expected "<algorithm>:<device_id>".',
|
||||
)
|
||||
return v
|
||||
|
||||
one_time_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]] = None
|
||||
"""
|
||||
One-time public keys for "pre-key" messages. The names of the properties
|
||||
should be in the format `<algorithm>:<key_id>`.
|
||||
|
||||
The format of the key is determined by the key algorithm, see:
|
||||
https://spec.matrix.org/v1.16/client-server-api/#key-algorithms.
|
||||
"""
|
||||
|
||||
@validator("one_time_keys", pre=True)
|
||||
def validate_one_time_keys(cls: Self, v: Any) -> Any:
|
||||
if v is None:
|
||||
return v
|
||||
if not isinstance(v, dict):
|
||||
raise TypeError("one_time_keys must be a mapping")
|
||||
|
||||
for k, _ in v.items():
|
||||
if not len(k.split(":")) == 2:
|
||||
raise SynapseError(
|
||||
code=HTTPStatus.BAD_REQUEST,
|
||||
errcode=Codes.BAD_JSON,
|
||||
msg=f"Invalid one_time_keys key {k!r}. "
|
||||
'Expected "<algorithm>:<device_id>".',
|
||||
)
|
||||
return v
|
||||
|
||||
async def on_POST(
|
||||
self, request: SynapseRequest, device_id: Optional[str]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
# Parse the request body. Validate separately, as the handler expects a
|
||||
# plain dict, rather than any parsed object.
|
||||
#
|
||||
# Note: It would be nice to work with a parsed object, but the handler
|
||||
# needs to encode portions of the request body as canonical JSON before
|
||||
# storing the result in the DB. There's little point in converted to a
|
||||
# parsed object and then back to a dict.
|
||||
body = parse_json_object_from_request(request)
|
||||
validate_json_object(body, self.KeyUploadRequestBody)
|
||||
|
||||
if device_id is not None:
|
||||
# Providing the device_id should only be done for setting keys
|
||||
@@ -149,8 +270,31 @@ class KeyUploadServlet(RestServlet):
|
||||
400, "To upload keys, you must pass device_id when authenticating"
|
||||
)
|
||||
|
||||
if "device_keys" in body:
|
||||
# Validate the provided `user_id` and `device_id` fields in
|
||||
# `device_keys` match that of the requesting user. We can't do
|
||||
# this directly in the pydantic model as we don't have access
|
||||
# to the requester yet.
|
||||
#
|
||||
# TODO: We could use ValidationInfo when we switch to Pydantic v2.
|
||||
# https://docs.pydantic.dev/latest/concepts/validators/#validation-info
|
||||
if body["device_keys"]["user_id"] != user_id:
|
||||
raise SynapseError(
|
||||
code=HTTPStatus.BAD_REQUEST,
|
||||
errcode=Codes.BAD_JSON,
|
||||
msg="Provided `user_id` in `device_keys` does not match that of the authenticated user",
|
||||
)
|
||||
if body["device_keys"]["device_id"] != device_id:
|
||||
raise SynapseError(
|
||||
code=HTTPStatus.BAD_REQUEST,
|
||||
errcode=Codes.BAD_JSON,
|
||||
msg="Provided `device_id` in `device_keys` does not match that of the authenticated user device",
|
||||
)
|
||||
|
||||
result = await self.e2e_keys_handler.upload_keys_for_user(
|
||||
user_id=user_id, device_id=device_id, keys=body
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
keys=body,
|
||||
)
|
||||
|
||||
return 200, result
|
||||
|
||||
@@ -363,9 +363,6 @@ class SyncRestServlet(RestServlet):
|
||||
|
||||
# https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md
|
||||
# states that this field should always be included, as long as the server supports the feature.
|
||||
response["org.matrix.msc2732.device_unused_fallback_key_types"] = (
|
||||
sync_result.device_unused_fallback_key_types
|
||||
)
|
||||
response["device_unused_fallback_key_types"] = (
|
||||
sync_result.device_unused_fallback_key_types
|
||||
)
|
||||
|
||||
@@ -682,6 +682,8 @@ class StateStorageController:
|
||||
- the stream id which these results go up to
|
||||
- list of current_state_delta_stream rows. If it is empty, we are
|
||||
up to date.
|
||||
|
||||
A maximum of 100 rows will be returned.
|
||||
"""
|
||||
# FIXME(faster_joins): what do we do here?
|
||||
# https://github.com/matrix-org/synapse/issues/13008
|
||||
|
||||
@@ -182,6 +182,21 @@ class DelayedEventsStore(SQLBaseStore):
|
||||
"restart_delayed_event", restart_delayed_event_txn
|
||||
)
|
||||
|
||||
async def get_count_of_delayed_events(self) -> int:
|
||||
"""Returns the number of pending delayed events in the DB."""
|
||||
|
||||
def _get_count_of_delayed_events(txn: LoggingTransaction) -> int:
|
||||
sql = "SELECT count(*) FROM delayed_events"
|
||||
|
||||
txn.execute(sql)
|
||||
resp = txn.fetchone()
|
||||
return resp[0] if resp is not None else 0
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_count_of_delayed_events",
|
||||
_get_count_of_delayed_events,
|
||||
)
|
||||
|
||||
async def get_all_delayed_events_for_user(
|
||||
self,
|
||||
user_localpart: str,
|
||||
|
||||
@@ -2135,6 +2135,39 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
|
||||
return rows, to_token, True
|
||||
|
||||
async def get_senders_for_event_ids(
|
||||
self, event_ids: Collection[str]
|
||||
) -> Dict[str, Optional[str]]:
|
||||
"""
|
||||
Given a sequence of event IDs, return the sender associated with each.
|
||||
|
||||
Args:
|
||||
event_ids: A collection of event IDs as strings.
|
||||
|
||||
Returns:
|
||||
A dict of event ID -> sender of the event.
|
||||
|
||||
If a given event ID does not exist in the `events` table, then no entry
|
||||
for that event ID will be returned.
|
||||
"""
|
||||
|
||||
def _get_senders_for_event_ids(
|
||||
txn: LoggingTransaction,
|
||||
) -> Dict[str, Optional[str]]:
|
||||
rows = self.db_pool.simple_select_many_txn(
|
||||
txn=txn,
|
||||
table="events",
|
||||
column="event_id",
|
||||
iterable=event_ids,
|
||||
keyvalues={},
|
||||
retcols=["event_id", "sender"],
|
||||
)
|
||||
return dict(rows)
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_senders_for_event_ids", _get_senders_for_event_ids
|
||||
)
|
||||
|
||||
@cached(max_entries=5000)
|
||||
async def get_event_ordering(self, event_id: str, room_id: str) -> Tuple[int, int]:
|
||||
res = await self.db_pool.simple_select_one(
|
||||
|
||||
@@ -94,6 +94,8 @@ class StateDeltasStore(SQLBaseStore):
|
||||
- the stream id which these results go up to
|
||||
- list of current_state_delta_stream rows. If it is empty, we are
|
||||
up to date.
|
||||
|
||||
A maximum of 100 rows will be returned.
|
||||
"""
|
||||
prev_stream_id = int(prev_stream_id)
|
||||
|
||||
|
||||
21
synapse/util/sentinel.py
Normal file
21
synapse/util/sentinel.py
Normal file
@@ -0,0 +1,21 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2025 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
|
||||
import enum
|
||||
|
||||
|
||||
class Sentinel(enum.Enum):
|
||||
# defining a sentinel in this way allows mypy to correctly handle the
|
||||
# type of a dictionary lookup and subsequent type narrowing.
|
||||
UNSET_SENTINEL = object()
|
||||
@@ -410,7 +410,6 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
|
||||
device_id = "xyz"
|
||||
fallback_key = {"alg1:k1": "fallback_key1"}
|
||||
fallback_key2 = {"alg1:k2": "fallback_key2"}
|
||||
fallback_key3 = {"alg1:k2": "fallback_key3"}
|
||||
otk = {"alg1:k2": "key2"}
|
||||
|
||||
# we shouldn't have any unused fallback keys yet
|
||||
@@ -531,28 +530,6 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
|
||||
{"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key2}}},
|
||||
)
|
||||
|
||||
# using the unstable prefix should also set the fallback key
|
||||
self.get_success(
|
||||
self.handler.upload_keys_for_user(
|
||||
local_user,
|
||||
device_id,
|
||||
{"org.matrix.msc2732.fallback_keys": fallback_key3},
|
||||
)
|
||||
)
|
||||
|
||||
claim_res = self.get_success(
|
||||
self.handler.claim_one_time_keys(
|
||||
{local_user: {device_id: {"alg1": 1}}},
|
||||
self.requester,
|
||||
timeout=None,
|
||||
always_include_fallback_keys=False,
|
||||
)
|
||||
)
|
||||
self.assertEqual(
|
||||
claim_res,
|
||||
{"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}},
|
||||
)
|
||||
|
||||
def test_fallback_key_bulk(self) -> None:
|
||||
"""Like test_fallback_key, but claims multiple keys in one handler call."""
|
||||
alice = f"@alice:{self.hs.hostname}"
|
||||
|
||||
@@ -40,6 +40,127 @@ from tests.unittest import override_config
|
||||
from tests.utils import HAS_AUTHLIB
|
||||
|
||||
|
||||
class KeyUploadTestCase(unittest.HomeserverTestCase):
|
||||
servlets = [
|
||||
keys.register_servlets,
|
||||
admin.register_servlets_for_client_rest_resource,
|
||||
login.register_servlets,
|
||||
]
|
||||
|
||||
def test_upload_keys_fails_on_invalid_structure(self) -> None:
|
||||
"""Check that we validate the structure of keys upon upload.
|
||||
|
||||
Regression test for https://github.com/element-hq/synapse/pull/17097
|
||||
"""
|
||||
self.register_user("alice", "wonderland")
|
||||
alice_token = self.login("alice", "wonderland")
|
||||
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
"/_matrix/client/v3/keys/upload",
|
||||
{
|
||||
# Error: device_keys must be a dict
|
||||
"device_keys": ["some", "stuff", "weewoo"]
|
||||
},
|
||||
alice_token,
|
||||
)
|
||||
self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result)
|
||||
self.assertEqual(
|
||||
channel.json_body["errcode"],
|
||||
Codes.BAD_JSON,
|
||||
channel.result,
|
||||
)
|
||||
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
"/_matrix/client/v3/keys/upload",
|
||||
{
|
||||
# Error: properties of fallback_keys must be in the form `<algorithm>:<device_id>`
|
||||
"fallback_keys": {"invalid_key": "signature_base64"}
|
||||
},
|
||||
alice_token,
|
||||
)
|
||||
self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result)
|
||||
self.assertEqual(
|
||||
channel.json_body["errcode"],
|
||||
Codes.BAD_JSON,
|
||||
channel.result,
|
||||
)
|
||||
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
"/_matrix/client/v3/keys/upload",
|
||||
{
|
||||
# Same as above, but for one_time_keys
|
||||
"one_time_keys": {"invalid_key": "signature_base64"}
|
||||
},
|
||||
alice_token,
|
||||
)
|
||||
self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result)
|
||||
self.assertEqual(
|
||||
channel.json_body["errcode"],
|
||||
Codes.BAD_JSON,
|
||||
channel.result,
|
||||
)
|
||||
|
||||
def test_upload_keys_fails_on_invalid_user_id_or_device_id(self) -> None:
|
||||
"""
|
||||
Validate that the requesting user is uploading their own keys and nobody
|
||||
else's.
|
||||
"""
|
||||
device_id = "DEVICE_ID"
|
||||
alice_user_id = self.register_user("alice", "wonderland")
|
||||
alice_token = self.login("alice", "wonderland", device_id=device_id)
|
||||
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
"/_matrix/client/v3/keys/upload",
|
||||
{
|
||||
"device_keys": {
|
||||
# Included `user_id` does not match requesting user.
|
||||
"user_id": "@unknown_user:test",
|
||||
"device_id": device_id,
|
||||
"algorithms": ["m.olm.curve25519-aes-sha2"],
|
||||
"keys": {
|
||||
f"ed25519:{device_id}": "publickey",
|
||||
},
|
||||
"signatures": {},
|
||||
}
|
||||
},
|
||||
alice_token,
|
||||
)
|
||||
self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result)
|
||||
self.assertEqual(
|
||||
channel.json_body["errcode"],
|
||||
Codes.BAD_JSON,
|
||||
channel.result,
|
||||
)
|
||||
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
"/_matrix/client/v3/keys/upload",
|
||||
{
|
||||
"device_keys": {
|
||||
"user_id": alice_user_id,
|
||||
# Included `device_id` does not match requesting user's.
|
||||
"device_id": "UNKNOWN_DEVICE_ID",
|
||||
"algorithms": ["m.olm.curve25519-aes-sha2"],
|
||||
"keys": {
|
||||
f"ed25519:{device_id}": "publickey",
|
||||
},
|
||||
"signatures": {},
|
||||
}
|
||||
},
|
||||
alice_token,
|
||||
)
|
||||
self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result)
|
||||
self.assertEqual(
|
||||
channel.json_body["errcode"],
|
||||
Codes.BAD_JSON,
|
||||
channel.result,
|
||||
)
|
||||
|
||||
|
||||
class KeyQueryTestCase(unittest.HomeserverTestCase):
|
||||
servlets = [
|
||||
keys.register_servlets,
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#
|
||||
|
||||
import logging
|
||||
from typing import List, Optional
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from twisted.internet.testing import MemoryReactor
|
||||
|
||||
@@ -39,6 +39,77 @@ from tests.unittest import HomeserverTestCase
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EventsTestCase(HomeserverTestCase):
|
||||
servlets = [
|
||||
admin.register_servlets,
|
||||
room.register_servlets,
|
||||
login.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(
|
||||
self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
|
||||
) -> None:
|
||||
self._store = self.hs.get_datastores().main
|
||||
|
||||
def test_get_senders_for_event_ids(self) -> None:
|
||||
"""Tests the `get_senders_for_event_ids` storage function."""
|
||||
|
||||
users_and_tokens: Dict[str, str] = {}
|
||||
for localpart_suffix in range(10):
|
||||
localpart = f"user_{localpart_suffix}"
|
||||
user_id = self.register_user(localpart, "rabbit")
|
||||
token = self.login(localpart, "rabbit")
|
||||
|
||||
users_and_tokens[user_id] = token
|
||||
|
||||
room_creator_user_id = self.register_user("room_creator", "rabbit")
|
||||
room_creator_token = self.login("room_creator", "rabbit")
|
||||
users_and_tokens[room_creator_user_id] = room_creator_token
|
||||
|
||||
# Create a room and invite some users.
|
||||
room_id = self.helper.create_room_as(
|
||||
room_creator_user_id, tok=room_creator_token
|
||||
)
|
||||
event_ids_to_senders: Dict[str, str] = {}
|
||||
for user_id, token in users_and_tokens.items():
|
||||
if user_id == room_creator_user_id:
|
||||
continue
|
||||
|
||||
self.helper.invite(
|
||||
room=room_id,
|
||||
targ=user_id,
|
||||
tok=room_creator_token,
|
||||
)
|
||||
|
||||
# Have the user accept the invite and join the room.
|
||||
self.helper.join(
|
||||
room=room_id,
|
||||
user=user_id,
|
||||
tok=token,
|
||||
)
|
||||
|
||||
# Have the user send an event.
|
||||
response = self.helper.send_event(
|
||||
room_id=room_id,
|
||||
type="m.room.message",
|
||||
content={
|
||||
"msgtype": "m.text",
|
||||
"body": f"hello, I'm {user_id}!",
|
||||
},
|
||||
tok=token,
|
||||
)
|
||||
|
||||
# Record the event ID and sender.
|
||||
event_id = response["event_id"]
|
||||
event_ids_to_senders[event_id] = user_id
|
||||
|
||||
# Check that `get_senders_for_event_ids` returns the correct data.
|
||||
response = self.get_success(
|
||||
self._store.get_senders_for_event_ids(list(event_ids_to_senders.keys()))
|
||||
)
|
||||
self.assert_dict(event_ids_to_senders, response)
|
||||
|
||||
|
||||
class ExtremPruneTestCase(HomeserverTestCase):
|
||||
servlets = [
|
||||
admin.register_servlets,
|
||||
|
||||
Reference in New Issue
Block a user