mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-11 01:40:27 +00:00
Compare commits
6 Commits
dmr/reject
...
erikj/fixu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2e3a83d8d4 | ||
|
|
2f497cbe7b | ||
|
|
3fc49255ba | ||
|
|
942c30b16b | ||
|
|
24b590de32 | ||
|
|
a34a41f135 |
1
changelog.d/12623.feature
Normal file
1
changelog.d/12623.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add support for [MSC3787: Allowing knocks to restricted rooms](https://github.com/matrix-org/matrix-spec-proposals/pull/3787).
|
||||
1
changelog.d/12680.misc
Normal file
1
changelog.d/12680.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove code which updates unused database column `application_services_state.last_txn`.
|
||||
1
changelog.d/12721.bugfix
Normal file
1
changelog.d/12721.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix push to dismiss notifications when read on another client. Contributed by @SpiritCroc @ Beeper.
|
||||
1
changelog.d/12760.feature
Normal file
1
changelog.d/12760.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add a config options to allow for auto-tuning of caches.
|
||||
@@ -784,22 +784,33 @@ caches:
|
||||
#
|
||||
#cache_entry_ttl: 30m
|
||||
|
||||
# This flag enables cache autotuning, and is further specified by the sub-options `max_cache_memory_usage`,
|
||||
# `target_cache_memory_usage`, `min_cache_ttl`. These flags work in conjunction with each other to maintain
|
||||
# a balance between cache memory usage and cache entry availability. You must be using jemalloc to utilize
|
||||
# this option, and all three of the options must be specified for this feature to work.
|
||||
# This flag enables cache autotuning, and is further specified by the
|
||||
# sub-options `max_cache_memory_usage`, `target_cache_memory_usage`,
|
||||
# `min_cache_ttl`. These flags work in conjunction with each other to
|
||||
# maintain a balance between cache memory usage and cache entry
|
||||
# availability. You must be using jemalloc to utilize this option, and
|
||||
# all three of the options must be specified for this feature to work.
|
||||
#
|
||||
#cache_autotuning:
|
||||
# This flag sets a ceiling on much memory the cache can use before caches begin to be continuously evicted.
|
||||
# They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in
|
||||
# the flag below, or until the `min_cache_ttl` is hit.
|
||||
# This flag sets a ceiling on much memory the cache can use before
|
||||
# caches begin to be continuously evicted. They will continue to be
|
||||
# evicted until the memory usage drops below the
|
||||
# `target_memory_usage`, set in the flag below, or until the
|
||||
# `min_cache_ttl` is hit.
|
||||
#
|
||||
#max_cache_memory_usage: 1024M
|
||||
|
||||
# This flag sets a rough target for the desired memory usage of the caches.
|
||||
# This flag sets a rough target for the desired memory usage of the
|
||||
# caches.
|
||||
#
|
||||
#target_cache_memory_usage: 758M
|
||||
|
||||
# 'min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when
|
||||
# caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches
|
||||
# from being emptied while Synapse is evicting due to memory.
|
||||
# 'min_cache_ttl` sets a limit under which newer cache entries are not
|
||||
# evicted and is only applied when caches are actively being
|
||||
# evicted/`max_cache_memory_usage` has been exceeded. This is to
|
||||
# protect hot caches from being emptied while Synapse is evicting due
|
||||
# to memory.
|
||||
#
|
||||
#min_cache_ttl: 5m
|
||||
|
||||
# Controls how long the results of a /sync request are cached for after
|
||||
|
||||
@@ -65,6 +65,8 @@ class JoinRules:
|
||||
PRIVATE: Final = "private"
|
||||
# As defined for MSC3083.
|
||||
RESTRICTED: Final = "restricted"
|
||||
# As defined for MSC3787.
|
||||
KNOCK_RESTRICTED: Final = "knock_restricted"
|
||||
|
||||
|
||||
class RestrictedJoinRuleTypes:
|
||||
|
||||
@@ -81,6 +81,9 @@ class RoomVersion:
|
||||
msc2716_historical: bool
|
||||
# MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events
|
||||
msc2716_redactions: bool
|
||||
# MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of
|
||||
# knocks and restricted join rules into the same join condition.
|
||||
msc3787_knock_restricted_join_rule: bool
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@@ -99,6 +102,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -115,6 +119,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -131,6 +136,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -147,6 +153,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -163,6 +170,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@@ -179,6 +187,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
@@ -195,6 +204,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
@@ -211,6 +221,7 @@ class RoomVersions:
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
@@ -227,6 +238,7 @@ class RoomVersions:
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
@@ -243,6 +255,7 @@ class RoomVersions:
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
MSC2716v3 = RoomVersion(
|
||||
"org.matrix.msc2716v3",
|
||||
@@ -259,6 +272,24 @@ class RoomVersions:
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=True,
|
||||
msc2716_redactions=True,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -276,6 +307,7 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||
RoomVersions.V8,
|
||||
RoomVersions.V9,
|
||||
RoomVersions.MSC2716v3,
|
||||
RoomVersions.MSC3787,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -176,22 +176,33 @@ class CacheConfig(Config):
|
||||
#
|
||||
#cache_entry_ttl: 30m
|
||||
|
||||
# This flag enables cache autotuning, and is further specified by the sub-options `max_cache_memory_usage`,
|
||||
# `target_cache_memory_usage`, `min_cache_ttl`. These flags work in conjunction with each other to maintain
|
||||
# a balance between cache memory usage and cache entry availability. You must be using jemalloc to utilize
|
||||
# this option, and all three of the options must be specified for this feature to work.
|
||||
# This flag enables cache autotuning, and is further specified by the
|
||||
# sub-options `max_cache_memory_usage`, `target_cache_memory_usage`,
|
||||
# `min_cache_ttl`. These flags work in conjunction with each other to
|
||||
# maintain a balance between cache memory usage and cache entry
|
||||
# availability. You must be using jemalloc to utilize this option, and
|
||||
# all three of the options must be specified for this feature to work.
|
||||
#
|
||||
#cache_autotuning:
|
||||
# This flag sets a ceiling on much memory the cache can use before caches begin to be continuously evicted.
|
||||
# They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in
|
||||
# the flag below, or until the `min_cache_ttl` is hit.
|
||||
# This flag sets a ceiling on much memory the cache can use before
|
||||
# caches begin to be continuously evicted. They will continue to be
|
||||
# evicted until the memory usage drops below the
|
||||
# `target_memory_usage`, set in the flag below, or until the
|
||||
# `min_cache_ttl` is hit.
|
||||
#
|
||||
#max_cache_memory_usage: 1024M
|
||||
|
||||
# This flag sets a rough target for the desired memory usage of the caches.
|
||||
# This flag sets a rough target for the desired memory usage of the
|
||||
# caches.
|
||||
#
|
||||
#target_cache_memory_usage: 758M
|
||||
|
||||
# 'min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when
|
||||
# caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches
|
||||
# from being emptied while Synapse is evicting due to memory.
|
||||
# 'min_cache_ttl` sets a limit under which newer cache entries are not
|
||||
# evicted and is only applied when caches are actively being
|
||||
# evicted/`max_cache_memory_usage` has been exceeded. This is to
|
||||
# protect hot caches from being emptied while Synapse is evicting due
|
||||
# to memory.
|
||||
#
|
||||
#min_cache_ttl: 5m
|
||||
|
||||
# Controls how long the results of a /sync request are cached for after
|
||||
|
||||
@@ -414,7 +414,12 @@ def _is_membership_change_allowed(
|
||||
raise AuthError(403, "You are banned from this room")
|
||||
elif join_rule == JoinRules.PUBLIC:
|
||||
pass
|
||||
elif room_version.msc3083_join_rules and join_rule == JoinRules.RESTRICTED:
|
||||
elif (
|
||||
room_version.msc3083_join_rules and join_rule == JoinRules.RESTRICTED
|
||||
) or (
|
||||
room_version.msc3787_knock_restricted_join_rule
|
||||
and join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
):
|
||||
# This is the same as public, but the event must contain a reference
|
||||
# to the server who authorised the join. If the event does not contain
|
||||
# the proper content it is rejected.
|
||||
@@ -440,8 +445,13 @@ def _is_membership_change_allowed(
|
||||
if authorising_user_level < invite_level:
|
||||
raise AuthError(403, "Join event authorised by invalid server.")
|
||||
|
||||
elif join_rule == JoinRules.INVITE or (
|
||||
room_version.msc2403_knocking and join_rule == JoinRules.KNOCK
|
||||
elif (
|
||||
join_rule == JoinRules.INVITE
|
||||
or (room_version.msc2403_knocking and join_rule == JoinRules.KNOCK)
|
||||
or (
|
||||
room_version.msc3787_knock_restricted_join_rule
|
||||
and join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
)
|
||||
):
|
||||
if not caller_in_room and not caller_invited:
|
||||
raise AuthError(403, "You are not invited to this room.")
|
||||
@@ -462,7 +472,10 @@ def _is_membership_change_allowed(
|
||||
if user_level < ban_level or user_level <= target_level:
|
||||
raise AuthError(403, "You don't have permission to ban")
|
||||
elif room_version.msc2403_knocking and Membership.KNOCK == membership:
|
||||
if join_rule != JoinRules.KNOCK:
|
||||
if join_rule != JoinRules.KNOCK and (
|
||||
not room_version.msc3787_knock_restricted_join_rule
|
||||
or join_rule != JoinRules.KNOCK_RESTRICTED
|
||||
):
|
||||
raise AuthError(403, "You don't have permission to knock")
|
||||
elif target_user_id != event.user_id:
|
||||
raise AuthError(403, "You cannot knock for other users")
|
||||
|
||||
@@ -241,7 +241,15 @@ class EventAuthHandler:
|
||||
|
||||
# If the join rule is not restricted, this doesn't apply.
|
||||
join_rules_event = await self._store.get_event(join_rules_event_id)
|
||||
return join_rules_event.content.get("join_rule") == JoinRules.RESTRICTED
|
||||
content_join_rule = join_rules_event.content.get("join_rule")
|
||||
if content_join_rule == JoinRules.RESTRICTED:
|
||||
return True
|
||||
|
||||
# also check for MSC3787 behaviour
|
||||
if room_version.msc3787_knock_restricted_join_rule:
|
||||
return content_join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
|
||||
return False
|
||||
|
||||
async def get_rooms_that_allow_join(
|
||||
self, state_ids: StateMap[str]
|
||||
|
||||
@@ -562,8 +562,13 @@ class RoomSummaryHandler:
|
||||
if join_rules_event_id:
|
||||
join_rules_event = await self._store.get_event(join_rules_event_id)
|
||||
join_rule = join_rules_event.content.get("join_rule")
|
||||
if join_rule == JoinRules.PUBLIC or (
|
||||
room_version.msc2403_knocking and join_rule == JoinRules.KNOCK
|
||||
if (
|
||||
join_rule == JoinRules.PUBLIC
|
||||
or (room_version.msc2403_knocking and join_rule == JoinRules.KNOCK)
|
||||
or (
|
||||
room_version.msc3787_knock_restricted_join_rule
|
||||
and join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
)
|
||||
):
|
||||
return True
|
||||
|
||||
|
||||
@@ -405,7 +405,7 @@ class HttpPusher(Pusher):
|
||||
rejected = []
|
||||
if "rejected" in resp:
|
||||
rejected = resp["rejected"]
|
||||
else:
|
||||
if not rejected:
|
||||
self.badge_count_last_call = badge
|
||||
return rejected
|
||||
|
||||
|
||||
@@ -203,19 +203,29 @@ class ApplicationServiceTransactionWorkerStore(
|
||||
"""Get the application service state.
|
||||
|
||||
Args:
|
||||
service: The service whose state to set.
|
||||
service: The service whose state to get.
|
||||
Returns:
|
||||
An ApplicationServiceState or none.
|
||||
An ApplicationServiceState, or None if we have yet to attempt any
|
||||
transactions to the AS.
|
||||
"""
|
||||
result = await self.db_pool.simple_select_one(
|
||||
# if we have created transactions for this AS but not yet attempted to send
|
||||
# them, we will have a row in the table with state=NULL (recording the stream
|
||||
# positions we have processed up to).
|
||||
#
|
||||
# On the other hand, if we have yet to create any transactions for this AS at
|
||||
# all, then there will be no row for the AS.
|
||||
#
|
||||
# In either case, we return None to indicate "we don't yet know the state of
|
||||
# this AS".
|
||||
result = await self.db_pool.simple_select_one_onecol(
|
||||
"application_services_state",
|
||||
{"as_id": service.id},
|
||||
["state"],
|
||||
retcol="state",
|
||||
allow_none=True,
|
||||
desc="get_appservice_state",
|
||||
)
|
||||
if result:
|
||||
return ApplicationServiceState(result.get("state"))
|
||||
return ApplicationServiceState(result)
|
||||
return None
|
||||
|
||||
async def set_appservice_state(
|
||||
@@ -296,14 +306,6 @@ class ApplicationServiceTransactionWorkerStore(
|
||||
"""
|
||||
|
||||
def _complete_appservice_txn(txn: LoggingTransaction) -> None:
|
||||
# Set current txn_id for AS to 'txn_id'
|
||||
self.db_pool.simple_upsert_txn(
|
||||
txn,
|
||||
"application_services_state",
|
||||
{"as_id": service.id},
|
||||
{"last_txn": txn_id},
|
||||
)
|
||||
|
||||
# Delete txn
|
||||
self.db_pool.simple_delete_txn(
|
||||
txn,
|
||||
@@ -452,16 +454,15 @@ class ApplicationServiceTransactionWorkerStore(
|
||||
% (stream_type,)
|
||||
)
|
||||
|
||||
def set_appservice_stream_type_pos_txn(txn: LoggingTransaction) -> None:
|
||||
stream_id_type = "%s_stream_id" % stream_type
|
||||
txn.execute(
|
||||
"UPDATE application_services_state SET %s = ? WHERE as_id=?"
|
||||
% stream_id_type,
|
||||
(pos, service.id),
|
||||
)
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"set_appservice_stream_type_pos", set_appservice_stream_type_pos_txn
|
||||
# this may be the first time that we're recording any state for this AS, so
|
||||
# we don't yet know if a row for it exists; hence we have to upsert here.
|
||||
await self.db_pool.simple_upsert(
|
||||
table="application_services_state",
|
||||
keyvalues={"as_id": service.id},
|
||||
values={f"{stream_type}_stream_id": pos},
|
||||
# no need to lock when emulating upsert: as_id is a unique key
|
||||
lock=False,
|
||||
desc="set_appservice_stream_type_pos",
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -61,7 +61,9 @@ Changes in SCHEMA_VERSION = 68:
|
||||
|
||||
Changes in SCHEMA_VERSION = 69:
|
||||
- We now write to `device_lists_changes_in_room` table.
|
||||
- Use sequence to generate future `application_services_txns.txn_id`s
|
||||
- We now use a PostgreSQL sequence to generate future txn_ids for
|
||||
`application_services_txns`. `application_services_state.last_txn` is no longer
|
||||
updated.
|
||||
|
||||
Changes in SCHEMA_VERSION = 70:
|
||||
- event_reference_hashes is no longer written to.
|
||||
@@ -71,6 +73,7 @@ Changes in SCHEMA_VERSION = 70:
|
||||
SCHEMA_COMPAT_VERSION = (
|
||||
# We now assume that `device_lists_changes_in_room` has been filled out for
|
||||
# recent device_list_updates.
|
||||
# ... and that `application_services_state.last_txn` is not used.
|
||||
69
|
||||
)
|
||||
"""Limit on how far the synapse codebase can be rolled back without breaking db compat
|
||||
|
||||
@@ -434,16 +434,6 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
|
||||
},
|
||||
)
|
||||
|
||||
# "Complete" a transaction.
|
||||
# All this really does for us is make an entry in the application_services_state
|
||||
# database table, which tracks the current stream_token per stream ID per AS.
|
||||
self.get_success(
|
||||
self.hs.get_datastores().main.complete_appservice_txn(
|
||||
0,
|
||||
interested_appservice,
|
||||
)
|
||||
)
|
||||
|
||||
# Now, pretend that we receive a large burst of read receipts (300 total) that
|
||||
# all come in at once.
|
||||
for i in range(300):
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from typing import List, Optional, cast
|
||||
from typing import List, cast
|
||||
from unittest.mock import Mock
|
||||
|
||||
import yaml
|
||||
@@ -149,15 +149,12 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
|
||||
outfile.write(yaml.dump(as_yaml))
|
||||
self.as_yaml_files.append(as_token)
|
||||
|
||||
def _set_state(
|
||||
self, id: str, state: ApplicationServiceState, txn: Optional[int] = None
|
||||
):
|
||||
def _set_state(self, id: str, state: ApplicationServiceState):
|
||||
return self.db_pool.runOperation(
|
||||
self.engine.convert_param_style(
|
||||
"INSERT INTO application_services_state(as_id, state, last_txn) "
|
||||
"VALUES(?,?,?)"
|
||||
"INSERT INTO application_services_state(as_id, state) VALUES(?,?)"
|
||||
),
|
||||
(id, state.value, txn),
|
||||
(id, state.value),
|
||||
)
|
||||
|
||||
def _insert_txn(self, as_id, txn_id, events):
|
||||
@@ -280,17 +277,6 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
|
||||
self.store.complete_appservice_txn(txn_id=txn_id, service=service)
|
||||
)
|
||||
|
||||
res = self.get_success(
|
||||
self.db_pool.runQuery(
|
||||
self.engine.convert_param_style(
|
||||
"SELECT last_txn FROM application_services_state WHERE as_id=?"
|
||||
),
|
||||
(service.id,),
|
||||
)
|
||||
)
|
||||
self.assertEqual(1, len(res))
|
||||
self.assertEqual(txn_id, res[0][0])
|
||||
|
||||
res = self.get_success(
|
||||
self.db_pool.runQuery(
|
||||
self.engine.convert_param_style(
|
||||
@@ -316,14 +302,13 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
|
||||
res = self.get_success(
|
||||
self.db_pool.runQuery(
|
||||
self.engine.convert_param_style(
|
||||
"SELECT last_txn, state FROM application_services_state WHERE as_id=?"
|
||||
"SELECT state FROM application_services_state WHERE as_id=?"
|
||||
),
|
||||
(service.id,),
|
||||
)
|
||||
)
|
||||
self.assertEqual(1, len(res))
|
||||
self.assertEqual(txn_id, res[0][0])
|
||||
self.assertEqual(ApplicationServiceState.UP.value, res[0][1])
|
||||
self.assertEqual(ApplicationServiceState.UP.value, res[0][0])
|
||||
|
||||
res = self.get_success(
|
||||
self.db_pool.runQuery(
|
||||
|
||||
Reference in New Issue
Block a user