Compare commits

...

5 Commits

Author SHA1 Message Date
Andrew Morgan
034c5e625c Move call invite filtering logic to filter_events_for_client (#17782) 2025-11-28 17:41:56 +00:00
Andrew Morgan
778897a4e9 Add a unit test that ensures that deleting a device purges the associated refresh token (#19230) 2025-11-28 17:01:15 +00:00
Erik Johnston
78ec3043d6 Use sqlglot to properly check SQL delta files (#19224)
Rather than using dodgy regexes which keep breaking.

Also fixes a regression where it looks like we didn't fail CI if the
delta was in the wrong place.
2025-11-28 15:49:15 +00:00
Andrew Morgan
566670c363 Move RestartDelayedEventServlet to workers (#19207) 2025-11-27 16:44:17 +00:00
Andrew Morgan
52089f1f79 Prevent lint-newsfile job activating when fixing dependabot PR branches (#19220) 2025-11-27 16:15:06 +00:00
19 changed files with 249 additions and 112 deletions

View File

@@ -110,7 +110,7 @@ jobs:
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: "3.x"
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20' 'sqlglot>=28.0.0'"
- run: scripts-dev/check_schema_delta.py --force-colors
check-lockfile:
@@ -192,7 +192,8 @@ jobs:
run: scripts-dev/check_line_terminators.sh
lint-newsfile:
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
# Only run on pull_request events, targeting develop/release branches, and skip when the PR author is dependabot[bot].
if: ${{ github.event_name == 'pull_request' && (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.event.pull_request.user.login != 'dependabot[bot]' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0

1
changelog.d/17782.misc Normal file
View File

@@ -0,0 +1 @@
Improve event filtering for Simplified Sliding Sync.

View File

@@ -0,0 +1 @@
Allow restarting delayed event timeouts on workers.

1
changelog.d/19220.misc Normal file
View File

@@ -0,0 +1 @@
Prevent changelog check CI running on @dependabot's PRs even when a human has modified the branch.

1
changelog.d/19224.misc Normal file
View File

@@ -0,0 +1 @@
Improve robustness of the SQL schema linting in CI.

1
changelog.d/19230.misc Normal file
View File

@@ -0,0 +1 @@
Add a unit test for ensuring associated refresh tokens are erased when a device is delted.

View File

@@ -196,6 +196,7 @@ WORKERS_CONFIG: dict[str, dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload",
"^/_matrix/client/(api/v1|r0|v3|unstable)/keys/device_signing/upload$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/keys/signatures/upload$",
"^/_matrix/client/unstable/org.matrix.msc4140/delayed_events(/.*/restart)?$",
],
"shared_extra_conf": {},
"worker_extra_conf": "",

View File

@@ -119,6 +119,14 @@ stacking them up. You can monitor the currently running background updates with
# Upgrading to v1.144.0
## Worker support for unstable MSC4140 `/restart` endpoint
The following unstable endpoint pattern may now be routed to worker processes:
```
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events/.*/restart$
```
## Unstable mutual rooms endpoint is now behind an experimental feature flag
The unstable mutual rooms endpoint from

View File

@@ -285,10 +285,13 @@ information.
# User directory search requests
^/_matrix/client/(r0|v3|unstable)/user_directory/search$
# Unstable MSC4140 support
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events(/.*/restart)?$
Additionally, the following REST endpoints can be handled for GET requests:
# Push rules requests
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events
# Account data requests
^/_matrix/client/(r0|v3|unstable)/.*/tags

62
poetry.lock generated
View File

@@ -31,7 +31,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\""
files = [
{file = "authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a"},
{file = "authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b"},
@@ -446,7 +446,7 @@ description = "XML bomb protection for Python stdlib modules"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"saml2\""
markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
@@ -471,7 +471,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"all\" or extra == \"saml2\""
markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
{file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
@@ -521,7 +521,7 @@ description = "Python wrapper for hiredis"
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"all\" or extra == \"redis\""
markers = "extra == \"redis\" or extra == \"all\""
files = [
{file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:9937d9b69321b393fbace69f55423480f098120bc55a3316e1ca3508c4dbbd6f"},
{file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:50351b77f89ba6a22aff430b993653847f36b71d444509036baa0f2d79d1ebf4"},
@@ -844,7 +844,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"all\" or extra == \"opentracing\""
markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
]
@@ -982,7 +982,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
files = [
{file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
@@ -998,7 +998,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"all\" or extra == \"url-preview\""
markers = "extra == \"url-preview\" or extra == \"all\""
files = [
{file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"},
{file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"},
@@ -1284,7 +1284,7 @@ description = "An LDAP3 auth provider for Synapse"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
files = [
{file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
{file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
@@ -1526,7 +1526,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"opentracing\""
markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
]
@@ -1716,7 +1716,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter"
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"all\" or extra == \"postgres\""
markers = "extra == \"postgres\" or extra == \"all\""
files = [
{file = "psycopg2-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:103e857f46bb76908768ead4e2d0ba1d1a130e7b8ed77d3ae91e8b33481813e8"},
{file = "psycopg2-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:210daed32e18f35e3140a1ebe059ac29209dd96468f2f7559aa59f75ee82a5cb"},
@@ -1734,7 +1734,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas
optional = true
python-versions = "*"
groups = ["main"]
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
files = [
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
]
@@ -1750,7 +1750,7 @@ description = "A Simple library to enable psycopg2 compatability"
optional = true
python-versions = "*"
groups = ["main"]
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
files = [
{file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
]
@@ -2031,7 +2031,7 @@ description = "A development tool to measure, monitor and analyze the memory beh
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"all\" or extra == \"cache-memory\""
markers = "extra == \"cache-memory\" or extra == \"all\""
files = [
{file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
{file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
@@ -2091,7 +2091,7 @@ description = "Python implementation of SAML Version 2 Standard"
optional = true
python-versions = ">=3.9,<4.0"
groups = ["main"]
markers = "extra == \"all\" or extra == \"saml2\""
markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
{file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
@@ -2116,7 +2116,7 @@ description = "Extensions to the standard Python datetime module"
optional = true
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
groups = ["main"]
markers = "extra == \"all\" or extra == \"saml2\""
markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
@@ -2144,7 +2144,7 @@ description = "World timezone definitions, modern and historical"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"saml2\""
markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
{file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
@@ -2548,7 +2548,7 @@ description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"all\" or extra == \"sentry\""
markers = "extra == \"sentry\" or extra == \"all\""
files = [
{file = "sentry_sdk-2.46.0-py2.py3-none-any.whl", hash = "sha256:4eeeb60198074dff8d066ea153fa6f241fef1668c10900ea53a4200abc8da9b1"},
{file = "sentry_sdk-2.46.0.tar.gz", hash = "sha256:91821a23460725734b7741523021601593f35731808afc0bb2ba46c27b8acd91"},
@@ -2723,6 +2723,22 @@ files = [
{file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"},
]
[[package]]
name = "sqlglot"
version = "28.0.0"
description = "An easily customizable SQL parser and transpiler"
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
{file = "sqlglot-28.0.0-py3-none-any.whl", hash = "sha256:ac1778e7fa4812f4f7e5881b260632fc167b00ca4c1226868891fb15467122e4"},
{file = "sqlglot-28.0.0.tar.gz", hash = "sha256:cc9a651ef4182e61dac58aa955e5fb21845a5865c6a4d7d7b5a7857450285ad4"},
]
[package.extras]
dev = ["duckdb (>=0.6)", "maturin (>=1.4,<2.0)", "mypy", "pandas", "pandas-stubs", "pdoc", "pre-commit", "pyperf", "python-dateutil", "pytz", "ruff (==0.7.2)", "types-python-dateutil", "types-pytz", "typing_extensions"]
rs = ["sqlglotrs (==0.7.3)"]
[[package]]
name = "systemd-python"
version = "235"
@@ -2742,7 +2758,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"opentracing\""
markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
@@ -2758,7 +2774,7 @@ description = "Python bindings for the Apache Thrift RPC system"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"opentracing\""
markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
]
@@ -2831,7 +2847,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"all\" or extra == \"opentracing\""
markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"},
{file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"},
@@ -2965,7 +2981,7 @@ description = "non-blocking redis client for python"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"all\" or extra == \"redis\""
markers = "extra == \"redis\" or extra == \"all\""
files = [
{file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"},
{file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"},
@@ -3211,7 +3227,7 @@ description = "An XML Schema validator and decoder"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"all\" or extra == \"saml2\""
markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
{file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},
@@ -3346,4 +3362,4 @@ url-preview = ["lxml"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10.0,<4.0.0"
content-hash = "4f8d98723236eaf3d13f440dce95ec6cc3c4dc49ba3a0e45bf9cfbb51aca899c"
content-hash = "98b9062f48205a3bcc99b43ae665083d360a15d4a208927fa978df9c36fd5315"

View File

@@ -370,6 +370,9 @@ towncrier = ">=18.6.0rc1"
# Used for checking the Poetry lockfile
tomli = ">=1.2.3"
# Used for checking the schema delta files
sqlglot = ">=28.0.0"
[build-system]
# The upper bounds here are defensive, intended to prevent situations like

View File

@@ -9,15 +9,11 @@ from typing import Any
import click
import git
import sqlglot
import sqlglot.expressions
SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$")
INDEX_CREATION_REGEX = re.compile(
r"CREATE .*INDEX .*ON ([a-z_0-9]+)", flags=re.IGNORECASE
)
INDEX_DELETION_REGEX = re.compile(r"DROP .*INDEX ([a-z_0-9]+)", flags=re.IGNORECASE)
TABLE_CREATION_REGEX = re.compile(
r"CREATE .*TABLE.* ([a-z_0-9]+)\s*\(", flags=re.IGNORECASE
)
# The base branch we want to check against. We use the main development branch
# on the assumption that is what we are developing against.
@@ -141,6 +137,9 @@ def main(force_colors: bool) -> None:
color=force_colors,
)
# Mark this run as not successful, but continue so that we report *all*
# errors.
return_code = 1
else:
click.secho(
f"All deltas are in the correct folder: {current_schema_version}!",
@@ -153,60 +152,90 @@ def main(force_colors: bool) -> None:
# and delta files are also numbered in order.
changed_delta_files.sort()
# Now check that we're not trying to create or drop indices. If we want to
# do that they should be in background updates. The exception is when we
# create indices on tables we've just created.
created_tables = set()
for delta_file in changed_delta_files:
with open(delta_file) as fd:
delta_lines = fd.readlines()
for line in delta_lines:
# Strip SQL comments
line = line.split("--", maxsplit=1)[0]
# Check and track any tables we create
match = TABLE_CREATION_REGEX.search(line)
if match:
table_name = match.group(1)
created_tables.add(table_name)
# Check for dropping indices, these are always banned
match = INDEX_DELETION_REGEX.search(line)
if match:
clause = match.group()
click.secho(
f"Found delta with index deletion: '{clause}' in {delta_file}",
fg="red",
bold=True,
color=force_colors,
)
click.secho(
" ↪ These should be in background updates.",
)
return_code = 1
# Check for index creation, which is only allowed for tables we've
# created.
match = INDEX_CREATION_REGEX.search(line)
if match:
clause = match.group()
table_name = match.group(1)
if table_name not in created_tables:
click.secho(
f"Found delta with index creation for existing table: '{clause}' in {delta_file}",
fg="red",
bold=True,
color=force_colors,
)
click.secho(
" ↪ These should be in background updates (or the table should be created in the same delta).",
)
return_code = 1
success = check_schema_delta(changed_delta_files, force_colors)
if not success:
return_code = 1
click.get_current_context().exit(return_code)
def check_schema_delta(delta_files: list[str], force_colors: bool) -> bool:
"""Check that the given schema delta files do not create or drop indices
inappropriately.
Index creation is only allowed on tables created in the same set of deltas.
Index deletion is never allowed and should be done in background updates.
Returns:
True if all checks succeeded, False if at least one failed.
"""
# The tables created in this delta
created_tables = set[str]()
# The indices created/dropped in this delta, each a tuple of (table_name, sql)
created_indices = list[tuple[str, str]]()
# The indices dropped in this delta, just the sql
dropped_indices = list[str]()
for delta_file in delta_files:
with open(delta_file) as fd:
delta_contents = fd.read()
# Assume the SQL dialect from the file extension, defaulting to Postgres.
sql_lang = "postgres"
if delta_file.endswith(".sqlite"):
sql_lang = "sqlite"
statements = sqlglot.parse(delta_contents, read=sql_lang)
for statement in statements:
if isinstance(statement, sqlglot.expressions.Create):
if statement.kind == "TABLE":
assert isinstance(statement.this, sqlglot.expressions.Schema)
assert isinstance(statement.this.this, sqlglot.expressions.Table)
table_name = statement.this.this.name
created_tables.add(table_name)
elif statement.kind == "INDEX":
assert isinstance(statement.this, sqlglot.expressions.Index)
table_name = statement.this.args["table"].name
created_indices.append((table_name, statement.sql()))
elif isinstance(statement, sqlglot.expressions.Drop):
if statement.kind == "INDEX":
dropped_indices.append(statement.sql())
success = True
for table_name, clause in created_indices:
if table_name not in created_tables:
click.secho(
f"Found delta with index creation for existing table: '{clause}'",
fg="red",
bold=True,
color=force_colors,
)
click.secho(
" ↪ These should be in background updates (or the table should be created in the same delta).",
)
success = False
for clause in dropped_indices:
click.secho(
f"Found delta with index deletion: '{clause}'",
fg="red",
bold=True,
color=force_colors,
)
click.secho(
" ↪ These should be in background updates.",
)
success = False
return success
if __name__ == "__main__":
main()

View File

@@ -96,16 +96,18 @@ class DelayedEventsHandler:
self.notify_new_event,
)
# Delayed events that are already marked as processed on startup might not have been
# sent properly on the last run of the server, so unmark them to send them again.
# Now process any delayed events that are due to be sent.
#
# We set `reprocess_events` to True in case any events had been
# marked as processed, but had not yet actually been sent,
# before the homeserver stopped.
#
# Caveat: this will double-send delayed events that successfully persisted, but failed
# to be removed from the DB table of delayed events.
# TODO: To avoid double-sending, scan the timeline to find which of these events were
# already sent. To do so, must store delay_ids in sent events to retrieve them later.
await self._store.unprocess_delayed_events()
events, next_send_ts = await self._store.process_timeout_delayed_events(
self._get_current_ts()
self._get_current_ts(), reprocess_events=True
)
if next_send_ts:
@@ -423,18 +425,23 @@ class DelayedEventsHandler:
Raises:
NotFoundError: if no matching delayed event could be found.
"""
assert self._is_master
await self._delayed_event_mgmt_ratelimiter.ratelimit(
None, request.getClientAddress().host
)
await make_deferred_yieldable(self._initialized_from_db)
# Note: We don't need to wait on `self._initialized_from_db` here as the
# events that deals with are already marked as processed.
#
# `restart_delayed_events` will skip over such events entirely.
next_send_ts = await self._store.restart_delayed_event(
delay_id, self._get_current_ts()
)
if self._next_send_ts_changed(next_send_ts):
self._schedule_next_at(next_send_ts)
# Only the main process handles sending delayed events.
if self._is_master:
if self._next_send_ts_changed(next_send_ts):
self._schedule_next_at(next_send_ts)
async def send(self, request: SynapseRequest, delay_id: str) -> None:
"""

View File

@@ -761,8 +761,6 @@ class SlidingSyncHandler:
!= Membership.JOIN,
filter_send_to_client=True,
)
# TODO: Filter out `EventTypes.CallInvite` in public rooms,
# see https://github.com/element-hq/synapse/issues/17359
# TODO: Handle timeline gaps (`get_timeline_gaps()`)

View File

@@ -36,7 +36,6 @@ from synapse.api.constants import (
Direction,
EventContentFields,
EventTypes,
JoinRules,
Membership,
)
from synapse.api.filtering import FilterCollection
@@ -790,22 +789,13 @@ class SyncHandler:
)
)
filtered_recents = await filter_events_for_client(
loaded_recents = await filter_events_for_client(
self._storage_controllers,
sync_config.user.to_string(),
loaded_recents,
always_include_ids=current_state_ids,
)
loaded_recents = []
for event in filtered_recents:
if event.type == EventTypes.CallInvite:
room_info = await self.store.get_room_with_stats(event.room_id)
assert room_info is not None
if room_info.join_rules == JoinRules.PUBLIC:
continue
loaded_recents.append(event)
log_kv({"loaded_recents_after_client_filtering": len(loaded_recents)})
loaded_recents.extend(recents)

View File

@@ -156,10 +156,10 @@ class DelayedEventsServlet(RestServlet):
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
# The following can't currently be instantiated on workers.
# Most of the following can't currently be instantiated on workers.
if hs.config.worker.worker_app is None:
UpdateDelayedEventServlet(hs).register(http_server)
CancelDelayedEventServlet(hs).register(http_server)
RestartDelayedEventServlet(hs).register(http_server)
SendDelayedEventServlet(hs).register(http_server)
RestartDelayedEventServlet(hs).register(http_server)
DelayedEventsServlet(hs).register(http_server)

View File

@@ -259,7 +259,7 @@ class DelayedEventsStore(SQLBaseStore):
]
async def process_timeout_delayed_events(
self, current_ts: Timestamp
self, current_ts: Timestamp, reprocess_events: bool = False
) -> tuple[
list[DelayedEventDetails],
Timestamp | None,
@@ -268,6 +268,16 @@ class DelayedEventsStore(SQLBaseStore):
Marks for processing all delayed events that should have been sent prior to the provided time
that haven't already been marked as such.
Args:
current_ts: The current timestamp.
reprocess_events: Whether to reprocess already-processed delayed
events. If set to True, events which are marked as processed
will have their `send_ts` re-checked.
This is mainly useful for recovering from a server restart;
which could have occurred between an event being marked as
processed and the event actually being sent.
Returns: The details of all newly-processed delayed events,
and the send time of the next delayed event to be sent, if any.
"""
@@ -292,7 +302,12 @@ class DelayedEventsStore(SQLBaseStore):
)
)
sql_update = "UPDATE delayed_events SET is_processed = TRUE"
sql_where = "WHERE send_ts <= ? AND NOT is_processed"
sql_where = "WHERE send_ts <= ?"
if not reprocess_events:
# Skip already-processed events.
sql_where += " AND NOT is_processed"
sql_args = (current_ts,)
sql_order = "ORDER BY send_ts"
if isinstance(self.database_engine, PostgresEngine):

View File

@@ -33,6 +33,7 @@ from synapse.api.constants import (
EventTypes,
EventUnsignedContentFields,
HistoryVisibility,
JoinRules,
Membership,
)
from synapse.events import EventBase
@@ -111,7 +112,17 @@ async def filter_events_for_client(
# happen within the function.
events_before_filtering = events.copy()
# Default case is to *exclude* soft-failed events
events = [e for e in events if not e.internal_metadata.is_soft_failed()]
events = []
found_call_invite = False
for event in events_before_filtering:
if event.internal_metadata.is_soft_failed():
continue
if event.type == EventTypes.CallInvite and not event.is_state():
found_call_invite = True
events.append(event)
client_config = await storage.main.get_admin_client_config_for_user(user_id)
if filter_send_to_client and await storage.main.is_server_admin(user_id):
if client_config.return_soft_failed_events:
@@ -139,7 +150,11 @@ async def filter_events_for_client(
[event.event_id for event in events],
)
types = (_HISTORY_VIS_KEY, (EventTypes.Member, user_id))
types = [_HISTORY_VIS_KEY, (EventTypes.Member, user_id)]
if found_call_invite:
# We need to fetch the room's join rules state to determine
# whether to allow call invites in public rooms.
types.append((EventTypes.JoinRules, ""))
# we exclude outliers at this point, and then handle them separately later
event_id_to_state = await storage.state.get_state_for_events(
@@ -178,6 +193,25 @@ async def filter_events_for_client(
if filtered is None:
return None
# Filter out call invites in public rooms, as this would potentially
# ring a lot of users.
if event.type == EventTypes.CallInvite and not event.is_state():
# `state_after_event` should only be None if the event is an outlier,
# and earlier code should filter out outliers entirely.
#
# In addition, we only create outliers locally for out-of-band
# invite rejections, invites received over federation, or state
# events needed to authorise other events. None of this applies to
# call invites.
assert state_after_event is not None
room_join_rules = state_after_event.get((EventTypes.JoinRules, ""))
if (
room_join_rules is not None
and room_join_rules.content.get("join_rule") == JoinRules.PUBLIC
):
return None
# Annotate the event with the user's membership after the event.
#
# Normally we just look in `state_after_event`, but if the event is an outlier

View File

@@ -449,6 +449,33 @@ class DeviceTestCase(unittest.HomeserverTestCase):
],
)
def test_delete_device_removes_refresh_tokens(self) -> None:
"""Deleting a device should also purge any refresh tokens for it."""
self._record_users()
self.get_success(
self.store.add_refresh_token_to_user(
user_id=user1,
token="refresh_token",
device_id="abc",
expiry_ts=None,
ultimate_session_expiry_ts=None,
)
)
self.get_success(self.handler.delete_devices(user1, ["abc"]))
remaining_refresh_token = self.get_success(
self.store.db_pool.simple_select_one(
table="refresh_tokens",
keyvalues={"user_id": user1, "device_id": "abc"},
retcols=("id",),
desc="get_refresh_token_for_device",
allow_none=True,
)
)
self.assertIsNone(remaining_refresh_token)
class DehydrationTestCase(unittest.HomeserverTestCase):
servlets = [