Compare commits

...

28 Commits

Author SHA1 Message Date
Eric Eastwood
c75fb2eeb6 Add changelog 2025-11-20 17:36:21 -06:00
Eric Eastwood
ad94b9103e Use consistent indentation 2025-11-20 17:31:42 -06:00
Eric Eastwood
588c2b4db9 Use return instead of exit
This is useful so that the script can be sourced without exiting the calling subshell.
2025-11-20 17:29:07 -06:00
Eric Eastwood
573accd0df Run complement.sh logic as function
This is useful as later we can refactor the early `exit` calls to `return` calls
so that the script can be sourced without exiting the calling subshell.
2025-11-20 17:25:50 -06:00
Devon Hudson
7a9660367a Capitalize Synapse in CHANGES.md 2025-11-18 18:04:26 -07:00
Devon Hudson
b631bf7c2a 1.143.0rc2 2025-11-18 17:38:04 -07:00
Devon Hudson
2cffd755f2 Fix duplicate poetry version from merging patch release 2025-11-18 17:31:55 -07:00
Devon Hudson
f5bf02eff6 1.143.0rc1 2025-11-18 13:20:59 -07:00
Devon Hudson
1b24a145c1 Merge branch 'master' into develop 2025-11-18 13:04:25 -07:00
Devon Hudson
46efbae4c3 1.142.1 2025-11-18 12:26:53 -07:00
Devon Hudson
d01a8abc45 Allow subpaths in MAS endpoints (#19186)
Fixes #19184

### Pull Request Checklist

<!-- Please read
https://element-hq.github.io/synapse/latest/development/contributing_guide.html
before submitting your pull request -->

* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
  - Use markdown where necessary, mostly for `code blocks`.
  - End with either a period (.) or an exclamation mark (!).
  - Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct (run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
2025-11-18 12:05:41 -07:00
Devon Hudson
bc42899008 Allow subpaths in MAS endpoints (#19186)
Fixes #19184

### Pull Request Checklist

<!-- Please read
https://element-hq.github.io/synapse/latest/development/contributing_guide.html
before submitting your pull request -->

* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
  - Use markdown where necessary, mostly for `code blocks`.
  - End with either a period (.) or an exclamation mark (!).
  - Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct (run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
2025-11-18 18:45:33 +00:00
Devon Hudson
322481cd2d Run background updates on all databases (#19181)
Fixes #18322 

This PR changes synapse startup to run background updates against all
databases instead of just the "main" database.
This follows [what the admin api
does](https://github.com/element-hq/synapse/blob/develop/synapse/rest/admin/background_updates.py#L71-L77).

See the above linked issue for further details of why this is
beneficial.

### Pull Request Checklist

<!-- Please read
https://element-hq.github.io/synapse/latest/development/contributing_guide.html
before submitting your pull request -->

* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
  - Use markdown where necessary, mostly for `code blocks`.
  - End with either a period (.) or an exclamation mark (!).
  - Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct (run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
2025-11-17 15:32:21 +00:00
dependabot[bot]
34d93c96ed Bump click from 8.1.8 to 8.3.1 (#19195)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-17 12:09:49 +00:00
dependabot[bot]
ce65b5c8ba Bump sentry-sdk from 2.43.0 to 2.44.0 (#19197)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-17 12:09:38 +00:00
dependabot[bot]
26ddedb753 Bump ruff from 0.14.3 to 0.14.5 (#19196)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-17 12:09:26 +00:00
dependabot[bot]
fca80e2eaa Bump tomli from 2.2.1 to 2.3.0 (#19194)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-17 11:40:03 +00:00
dependabot[bot]
19251fc4cf Bump bytes from 1.10.1 to 1.11.0 (#19193)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-17 11:38:35 +00:00
Eric Eastwood
edc0de9fa0 Fix bad deferred logcontext handling (#19180)
These aren't really something personally experienced but I just went
around the codebase looking for all of the Deferred `.callback`,
`.errback`, and `.cancel` and wrapped them with
`PreserveLoggingContext()`

Spawning from wanting to solve
https://github.com/element-hq/synapse/issues/19165 but unconfirmed
whether this has any effect.

To explain the fix, see the [*Deferred
callbacks*](3b59ac3b69/docs/log_contexts.md (deferred-callbacks))
section of our logcontext docs for more info (specifically using
solution 2).
2025-11-14 11:21:15 -06:00
Andrew Morgan
8da8d4b4f5 Remove explicit python 3.8/9 skips (#19177)
Co-authored-by: Devon Hudson <devon.dmytro@gmail.com>
2025-11-14 11:38:39 +00:00
Eric Eastwood
408a05ebbc Fix potential lost logcontext when PerDestinationQueue.shutdown(...) (#19178)
Spawning from looking at the logs in
https://github.com/element-hq/synapse/issues/19165#issuecomment-3527452941
which mention the `federation_transaction_transmission_loop`. I don't
think it's the source of the lost logcontext that person in the issue is
experiencing because this only applies when you try to `shutdown` the
homeserver.

Problem code introduced in
https://github.com/element-hq/synapse/pull/18828

To explain the fix, see the [*Deferred
callbacks*](3b59ac3b69/docs/log_contexts.md (deferred-callbacks))
section of our logcontext docs for more info (specifically using
solution 2).
2025-11-13 15:17:15 -06:00
Devon Hudson
5d545d1626 Remove support for PostgreSQL 13 (#19170)
This PR removes support for PostgreSQL 13 as it is deprecated
(tomorrow).
Uses https://github.com/element-hq/synapse/pull/18034 as a reference of
where to look, and also found a few other places that needed updating.
I didn't see anywhere in Complement that needs updating.
There is a companion Sytest PR deprecating psql13 over there:
https://github.com/matrix-org/sytest/pull/1418

### Pull Request Checklist

<!-- Please read
https://element-hq.github.io/synapse/latest/development/contributing_guide.html
before submitting your pull request -->

* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
  - Use markdown where necessary, mostly for `code blocks`.
  - End with either a period (.) or an exclamation mark (!).
  - Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct (run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
2025-11-13 19:38:59 +00:00
Andrew Ferrazzutti
9e23cded8f MSC4140: Remove auth from delayed event management endpoints (#19152)
As per recent proposals in MSC4140, remove authentication for
restarting/cancelling/sending a delayed event, and give each of those
actions its own endpoint. (The original consolidated endpoint is still
supported for backwards compatibility.)

### Pull Request Checklist

<!-- Please read
https://element-hq.github.io/synapse/latest/development/contributing_guide.html
before submitting your pull request -->

* [x] Pull request is based on the develop branch
* [x] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
  - Use markdown where necessary, mostly for `code blocks`.
  - End with either a period (.) or an exclamation mark (!).
  - Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [x] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct (run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))

---------

Co-authored-by: Half-Shot <will@half-shot.uk>
2025-11-13 18:56:17 +00:00
Eric Eastwood
4494cc0694 Point out which event caused the exception when checking MSC4293 redactions (#19169)
Spawning from looking at the stack trace in
https://github.com/element-hq/synapse/issues/19128 which has no useful
information on how to dig in deeper.
2025-11-13 12:08:22 -06:00
Eric Eastwood
47d24bd234 Add debug logs to track Clock callbacks (#19173)
Spawning from wanting to find the source of a `Clock.call_later()`
callback, https://github.com/element-hq/synapse/issues/19165
2025-11-13 12:07:23 -06:00
Eric Eastwood
b9dda0ff22 Restore printing sentinel for log_record.request (#19172)
This was unintentionally changed in
https://github.com/element-hq/synapse/pull/19068.

There is no real bug here. Without this PR, we just printed an empty
string for the `sentinel` logcontext whereas the prior art behavior was
to print `sentinel` which this PR restores.

Found while staring at the logs in
https://github.com/element-hq/synapse/issues/19165


### Reproduction strategy

1. Configure Synapse with
[logging](df802882bb/docs/sample_log_config.yaml)
1. Start Synapse: `poetry run synapse_homeserver --config-path
homeserver.yaml`
1. Notice the `asyncio - 64 - DEBUG - - Using selector: EpollSelector`
log line (notice empty string `- -`)
1. With this PR, the log line will be `asyncio - 64 - DEBUG - sentinel -
Using selector: EpollSelector` (notice `sentinel`)
2025-11-13 09:57:56 -06:00
reivilibre
938c97416d Add a shortcut return when there are no events to purge. (#19093)
Fixes: #13417

---------

Signed-off-by: Olivier 'reivilibre <oliverw@matrix.org>
2025-11-13 14:26:37 +00:00
Jason Volk
e67ba69f20 Provide same servers list in s2s alias results as c2s. (#18970)
Signed-off-by: Jason Volk <jason@zemos.net>
Co-authored-by: dasha_uwu <dasha@linuxping.win>
2025-11-13 11:12:03 +00:00
49 changed files with 870 additions and 660 deletions

View File

@@ -72,7 +72,7 @@ trial_postgres_tests = [
{
"python-version": "3.10",
"database": "postgres",
"postgres-version": "13",
"postgres-version": "14",
"extras": "all",
},
{

View File

@@ -617,7 +617,7 @@ jobs:
matrix:
include:
- python-version: "3.10"
postgres-version: "13"
postgres-version: "14"
- python-version: "3.14"
postgres-version: "17"

View File

@@ -1,3 +1,83 @@
# Synapse 1.143.0rc2 (2025-11-18)
## Internal Changes
- Fixes docker image creation in the release workflow.
# Synapse 1.143.0rc1 (2025-11-18)
## Dropping support for PostgreSQL 13
In line with our [deprecation policy](https://github.com/element-hq/synapse/blob/develop/docs/deprecation_policy.md), we've dropped
support for PostgreSQL 13, as it is no longer supported upstream.
This release of Synapse requires PostgreSQL 14+.
## Features
- Support multiple config files in `register_new_matrix_user`. ([\#18784](https://github.com/element-hq/synapse/issues/18784))
- Remove authentication from `POST /_matrix/client/v1/delayed_events`, and allow calling this endpoint with the update action to take (`send`/`cancel`/`restart`) in the request path instead of the body. ([\#19152](https://github.com/element-hq/synapse/issues/19152))
## Bugfixes
- Fixed a longstanding bug where background updates were only run on the `main` database. ([\#19181](https://github.com/element-hq/synapse/issues/19181))
- Fixed a bug introduced in v1.142.0 preventing subpaths in MAS endpoints from working. ([\#19186](https://github.com/element-hq/synapse/issues/19186))
- Fix the SQLite-to-PostgreSQL migration script to correctly migrate a boolean column in the `delayed_events` table. ([\#19155](https://github.com/element-hq/synapse/issues/19155))
## Improved Documentation
- Improve documentation around streams, particularly ID generators and adding new streams. ([\#18943](https://github.com/element-hq/synapse/issues/18943))
## Deprecations and Removals
- Remove support for PostgreSQL 13. ([\#19170](https://github.com/element-hq/synapse/issues/19170))
## Internal Changes
- Provide additional servers with federation room directory results. ([\#18970](https://github.com/element-hq/synapse/issues/18970))
- Add a shortcut return when there are no events to purge. ([\#19093](https://github.com/element-hq/synapse/issues/19093))
- Write union types as `X | Y` where possible, as per PEP 604, added in Python 3.10. ([\#19111](https://github.com/element-hq/synapse/issues/19111))
- Reduce cardinality of `synapse_storage_events_persisted_events_sep_total` metric by removing `origin_entity` label. This also separates out events sent by local application services by changing the `origin_type` for such events to `application_service`. The `type` field also only tracks common event types, and anything else is bucketed under `*other*`. ([\#19133](https://github.com/element-hq/synapse/issues/19133), [\#19168](https://github.com/element-hq/synapse/issues/19168))
- Run trial tests on Python 3.14 for PRs. ([\#19135](https://github.com/element-hq/synapse/issues/19135))
- Update `pyproject.toml` project metadata to be compatible with standard Python packaging tooling. ([\#19137](https://github.com/element-hq/synapse/issues/19137))
- Minor speed up of processing of inbound replication. ([\#19138](https://github.com/element-hq/synapse/issues/19138), [\#19145](https://github.com/element-hq/synapse/issues/19145), [\#19146](https://github.com/element-hq/synapse/issues/19146))
- Ignore recent Python language refactors from git blame (`.git-blame-ignore-revs`). ([\#19150](https://github.com/element-hq/synapse/issues/19150))
- Bump lower bounds of dependencies `parameterized` to `0.9.0` and `idna` to `3.3` as those are the first to advertise support for Python 3.10. ([\#19167](https://github.com/element-hq/synapse/issues/19167))
- Point out which event caused the exception when checking [MSC4293](https://github.com/matrix-org/matrix-spec-proposals/pull/4293) redactions. ([\#19169](https://github.com/element-hq/synapse/issues/19169))
- Restore printing `sentinel` for the log record `request` when no logcontext is active. ([\#19172](https://github.com/element-hq/synapse/issues/19172))
- Add debug logs to track `Clock` utilities. ([\#19173](https://github.com/element-hq/synapse/issues/19173))
- Remove explicit python version skips in `cibuildwheel` config as it's no longer required after [#19137](https://github.com/element-hq/synapse/pull/19137). ([\#19177](https://github.com/element-hq/synapse/issues/19177))
- Fix potential lost logcontext when `PerDestinationQueue.shutdown(...)` is called. ([\#19178](https://github.com/element-hq/synapse/issues/19178))
- Fix bad deferred logcontext handling across the codebase. ([\#19180](https://github.com/element-hq/synapse/issues/19180))
### Updates to locked dependencies
* Bump bytes from 1.10.1 to 1.11.0. ([\#19193](https://github.com/element-hq/synapse/issues/19193))
* Bump click from 8.1.8 to 8.3.1. ([\#19195](https://github.com/element-hq/synapse/issues/19195))
* Bump cryptography from 43.0.3 to 45.0.7. ([\#19159](https://github.com/element-hq/synapse/issues/19159))
* Bump docker/metadata-action from 5.8.0 to 5.9.0. ([\#19161](https://github.com/element-hq/synapse/issues/19161))
* Bump pydantic from 2.12.3 to 2.12.4. ([\#19158](https://github.com/element-hq/synapse/issues/19158))
* Bump pyo3-log from 0.13.1 to 0.13.2. ([\#19156](https://github.com/element-hq/synapse/issues/19156))
* Bump ruff from 0.14.3 to 0.14.5. ([\#19196](https://github.com/element-hq/synapse/issues/19196))
* Bump sentry-sdk from 2.34.1 to 2.43.0. ([\#19157](https://github.com/element-hq/synapse/issues/19157))
* Bump sentry-sdk from 2.43.0 to 2.44.0. ([\#19197](https://github.com/element-hq/synapse/issues/19197))
* Bump tomli from 2.2.1 to 2.3.0. ([\#19194](https://github.com/element-hq/synapse/issues/19194))
* Bump types-netaddr from 1.3.0.20240530 to 1.3.0.20251108. ([\#19160](https://github.com/element-hq/synapse/issues/19160))
# Synapse 1.142.1 (2025-11-18)
## Bugfixes
- Fixed a bug introduced in v1.142.0 preventing subpaths in MAS endpoints from working. ([\#19186](https://github.com/element-hq/synapse/issues/19186))
# Synapse 1.142.0 (2025-11-11)
## Dropped support for Python 3.9

4
Cargo.lock generated
View File

@@ -73,9 +73,9 @@ checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
[[package]]
name = "bytes"
version = "1.10.1"
version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3"
[[package]]
name = "cc"

View File

@@ -1 +0,0 @@
Support multiple config files in `register_new_matrix_user`.

View File

@@ -1 +0,0 @@
Improve documentation around streams, particularly ID generators and adding new streams.

View File

@@ -1 +0,0 @@
Write union types as `X | Y` where possible, as per PEP 604, added in Python 3.10.

View File

@@ -1 +0,0 @@
Reduce cardinality of `synapse_storage_events_persisted_events_sep_total` metric by removing `origin_entity` label. This also separates out events sent by local application services by changing the `origin_type` for such events to `application_service`. The `type` field also only tracks common event types, and anything else is bucketed under `*other*`.

View File

@@ -1 +0,0 @@
Run trial tests on Python 3.14 for PRs.

View File

@@ -1 +0,0 @@
Update `pyproject.toml` project metadata to be compatible with standard Python packaging tooling.

View File

@@ -1 +0,0 @@
Minor speed up of processing of inbound replication.

View File

@@ -1 +0,0 @@
Minor speed up of processing of inbound replication.

View File

@@ -1 +0,0 @@
Minor speed up of processing of inbound replication.

View File

@@ -1 +0,0 @@
Ignore recent Python language refactors from git blame (`.git-blame-ignore-revs`).

View File

@@ -1 +0,0 @@
Let the SQLite-to-PostgreSQL migration script correctly migrate a boolean column in the `delayed_events` table.

View File

@@ -1 +0,0 @@
Bump lower bounds of dependencies `parameterized` to `0.9.0` and `idna` to `3.3` as those are the first to advertise support for Python 3.10.

View File

@@ -1 +0,0 @@
Reduce cardinality of `synapse_storage_events_persisted_events_sep_total` metric by removing `origin_entity` label. This also separates out events sent by local application services by changing the `origin_type` for such events to `application_service`. The `type` field also only tracks common event types, and anything else is bucketed under `*other*`.

1
changelog.d/19209.misc Normal file
View File

@@ -0,0 +1 @@
Refactor `scripts-dev/complement.sh` logic to avoid `exit` to facilitate being able to source it from other scripts (composable).

18
debian/changelog vendored
View File

@@ -1,3 +1,21 @@
matrix-synapse-py3 (1.143.0~rc2) stable; urgency=medium
* New Synapse release 1.143.0rc2.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Nov 2025 17:36:08 -0700
matrix-synapse-py3 (1.143.0~rc1) stable; urgency=medium
* New Synapse release 1.143.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Nov 2025 13:08:39 -0700
matrix-synapse-py3 (1.142.1) stable; urgency=medium
* New Synapse release 1.142.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Nov 2025 12:25:23 -0700
matrix-synapse-py3 (1.142.0) stable; urgency=medium
* New Synapse release 1.142.0.

View File

@@ -11,7 +11,7 @@ ARG SYNAPSE_VERSION=latest
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
ARG DEBIAN_VERSION=trixie
FROM docker.io/library/postgres:13-${DEBIAN_VERSION} AS postgres_base
FROM docker.io/library/postgres:14-${DEBIAN_VERSION} AS postgres_base
FROM $FROM
# First of all, we copy postgres server from the official postgres image,
@@ -26,7 +26,7 @@ RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
COPY --from=postgres_base --chown=postgres /var/run/postgresql /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PATH="${PATH}:/usr/lib/postgresql/14/bin"
ENV PGDATA=/var/lib/postgresql/data
# We also initialize the database at build time, rather than runtime, so that it's faster to spin up the image.

View File

@@ -117,6 +117,14 @@ each upgrade are complete before moving on to the next upgrade, to avoid
stacking them up. You can monitor the currently running background updates with
[the Admin API](usage/administration/admin_api/background_updates.html#status).
# Upgrading to v1.143.0
## Dropping support for PostgreSQL 13
In line with our [deprecation policy](deprecation_policy.md), we've dropped
support for PostgreSQL 13, as it is no longer supported upstream.
This release of Synapse requires PostgreSQL 14+.
# Upgrading to v1.142.0
## Python 3.10+ is now required

176
poetry.lock generated
View File

@@ -39,7 +39,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
files = [
{file = "authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a"},
{file = "authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b"},
@@ -349,14 +349,14 @@ files = [
[[package]]
name = "click"
version = "8.1.8"
version = "8.3.1"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.7"
python-versions = ">=3.10"
groups = ["dev"]
files = [
{file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
{file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
{file = "click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6"},
{file = "click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a"},
]
[package.dependencies]
@@ -454,7 +454,7 @@ description = "XML bomb protection for Python stdlib modules"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
@@ -479,7 +479,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
{file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
@@ -529,7 +529,7 @@ description = "Python wrapper for hiredis"
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"redis\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"redis\""
files = [
{file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:9937d9b69321b393fbace69f55423480f098120bc55a3316e1ca3508c4dbbd6f"},
{file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:50351b77f89ba6a22aff430b993653847f36b71d444509036baa0f2d79d1ebf4"},
@@ -852,7 +852,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
]
@@ -990,7 +990,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
files = [
{file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
@@ -1006,7 +1006,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"url-preview\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"url-preview\""
files = [
{file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"},
{file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"},
@@ -1292,7 +1292,7 @@ description = "An LDAP3 auth provider for Synapse"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
files = [
{file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
{file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
@@ -1534,7 +1534,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
]
@@ -1724,7 +1724,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter"
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"postgres\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"postgres\""
files = [
{file = "psycopg2-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:103e857f46bb76908768ead4e2d0ba1d1a130e7b8ed77d3ae91e8b33481813e8"},
{file = "psycopg2-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:210daed32e18f35e3140a1ebe059ac29209dd96468f2f7559aa59f75ee82a5cb"},
@@ -1742,7 +1742,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas
optional = true
python-versions = "*"
groups = ["main"]
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
files = [
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
]
@@ -1758,7 +1758,7 @@ description = "A Simple library to enable psycopg2 compatability"
optional = true
python-versions = "*"
groups = ["main"]
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
files = [
{file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
]
@@ -2039,7 +2039,7 @@ description = "A development tool to measure, monitor and analyze the memory beh
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"cache-memory\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"cache-memory\""
files = [
{file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
{file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
@@ -2099,7 +2099,7 @@ description = "Python implementation of SAML Version 2 Standard"
optional = true
python-versions = ">=3.9,<4.0"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
{file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
@@ -2124,7 +2124,7 @@ description = "Extensions to the standard Python datetime module"
optional = true
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
@@ -2152,7 +2152,7 @@ description = "World timezone definitions, modern and historical"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
{file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
@@ -2489,31 +2489,31 @@ files = [
[[package]]
name = "ruff"
version = "0.14.3"
version = "0.14.5"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "ruff-0.14.3-py3-none-linux_armv6l.whl", hash = "sha256:876b21e6c824f519446715c1342b8e60f97f93264012de9d8d10314f8a79c371"},
{file = "ruff-0.14.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b6fd8c79b457bedd2abf2702b9b472147cd860ed7855c73a5247fa55c9117654"},
{file = "ruff-0.14.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:71ff6edca490c308f083156938c0c1a66907151263c4abdcb588602c6e696a14"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:786ee3ce6139772ff9272aaf43296d975c0217ee1b97538a98171bf0d21f87ed"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cd6291d0061811c52b8e392f946889916757610d45d004e41140d81fb6cd5ddc"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a497ec0c3d2c88561b6d90f9c29f5ae68221ac00d471f306fa21fa4264ce5fcd"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e231e1be58fc568950a04fbe6887c8e4b85310e7889727e2b81db205c45059eb"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:469e35872a09c0e45fecf48dd960bfbce056b5db2d5e6b50eca329b4f853ae20"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d6bc90307c469cb9d28b7cfad90aaa600b10d67c6e22026869f585e1e8a2db0"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2f8a0bbcffcfd895df39c9a4ecd59bb80dca03dc43f7fb63e647ed176b741e"},
{file = "ruff-0.14.3-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:678fdd7c7d2d94851597c23ee6336d25f9930b460b55f8598e011b57c74fd8c5"},
{file = "ruff-0.14.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1ec1ac071e7e37e0221d2f2dbaf90897a988c531a8592a6a5959f0603a1ecf5e"},
{file = "ruff-0.14.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:afcdc4b5335ef440d19e7df9e8ae2ad9f749352190e96d481dc501b753f0733e"},
{file = "ruff-0.14.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:7bfc42f81862749a7136267a343990f865e71fe2f99cf8d2958f684d23ce3dfa"},
{file = "ruff-0.14.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a65e448cfd7e9c59fae8cf37f9221585d3354febaad9a07f29158af1528e165f"},
{file = "ruff-0.14.3-py3-none-win32.whl", hash = "sha256:f3d91857d023ba93e14ed2d462ab62c3428f9bbf2b4fbac50a03ca66d31991f7"},
{file = "ruff-0.14.3-py3-none-win_amd64.whl", hash = "sha256:d7b7006ac0756306db212fd37116cce2bd307e1e109375e1c6c106002df0ae5f"},
{file = "ruff-0.14.3-py3-none-win_arm64.whl", hash = "sha256:26eb477ede6d399d898791d01961e16b86f02bc2486d0d1a7a9bb2379d055dc1"},
{file = "ruff-0.14.3.tar.gz", hash = "sha256:4ff876d2ab2b161b6de0aa1f5bd714e8e9b4033dc122ee006925fbacc4f62153"},
{file = "ruff-0.14.5-py3-none-linux_armv6l.whl", hash = "sha256:f3b8248123b586de44a8018bcc9fefe31d23dda57a34e6f0e1e53bd51fd63594"},
{file = "ruff-0.14.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:f7a75236570318c7a30edd7f5491945f0169de738d945ca8784500b517163a72"},
{file = "ruff-0.14.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:6d146132d1ee115f8802356a2dc9a634dbf58184c51bff21f313e8cd1c74899a"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2380596653dcd20b057794d55681571a257a42327da8894b93bbd6111aa801f"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2d1fa985a42b1f075a098fa1ab9d472b712bdb17ad87a8ec86e45e7fa6273e68"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88f0770d42b7fa02bbefddde15d235ca3aa24e2f0137388cc15b2dcbb1f7c7a7"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:3676cb02b9061fee7294661071c4709fa21419ea9176087cb77e64410926eb78"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b595bedf6bc9cab647c4a173a61acf4f1ac5f2b545203ba82f30fcb10b0318fb"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f55382725ad0bdb2e8ee2babcbbfb16f124f5a59496a2f6a46f1d9d99d93e6e2"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7497d19dce23976bdaca24345ae131a1d38dcfe1b0850ad8e9e6e4fa321a6e19"},
{file = "ruff-0.14.5-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:410e781f1122d6be4f446981dd479470af86537fb0b8857f27a6e872f65a38e4"},
{file = "ruff-0.14.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c01be527ef4c91a6d55e53b337bfe2c0f82af024cc1a33c44792d6844e2331e1"},
{file = "ruff-0.14.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:f66e9bb762e68d66e48550b59c74314168ebb46199886c5c5aa0b0fbcc81b151"},
{file = "ruff-0.14.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d93be8f1fa01022337f1f8f3bcaa7ffee2d0b03f00922c45c2207954f351f465"},
{file = "ruff-0.14.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:c135d4b681f7401fe0e7312017e41aba9b3160861105726b76cfa14bc25aa367"},
{file = "ruff-0.14.5-py3-none-win32.whl", hash = "sha256:c83642e6fccfb6dea8b785eb9f456800dcd6a63f362238af5fc0c83d027dd08b"},
{file = "ruff-0.14.5-py3-none-win_amd64.whl", hash = "sha256:9d55d7af7166f143c94eae1db3312f9ea8f95a4defef1979ed516dbb38c27621"},
{file = "ruff-0.14.5-py3-none-win_arm64.whl", hash = "sha256:4b700459d4649e2594b31f20a9de33bc7c19976d4746d8d0798ad959621d64a4"},
{file = "ruff-0.14.5.tar.gz", hash = "sha256:8d3b48d7d8aad423d3137af7ab6c8b1e38e4de104800f0d596990f6ada1a9fc1"},
]
[[package]]
@@ -2551,15 +2551,15 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
version = "2.43.0"
version = "2.44.0"
description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"sentry\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"sentry\""
files = [
{file = "sentry_sdk-2.43.0-py2.py3-none-any.whl", hash = "sha256:4aacafcf1756ef066d359ae35030881917160ba7f6fc3ae11e0e58b09edc2d5d"},
{file = "sentry_sdk-2.43.0.tar.gz", hash = "sha256:52ed6e251c5d2c084224d73efee56b007ef5c2d408a4a071270e82131d336e20"},
{file = "sentry_sdk-2.44.0-py2.py3-none-any.whl", hash = "sha256:9e36a0372b881e8f92fdbff4564764ce6cec4b7f25424d0a3a8d609c9e4651a7"},
{file = "sentry_sdk-2.44.0.tar.gz", hash = "sha256:5b1fe54dfafa332e900b07dd8f4dfe35753b64e78e7d9b1655a28fd3065e2493"},
]
[package.dependencies]
@@ -2749,7 +2749,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
@@ -2765,7 +2765,7 @@ description = "Python bindings for the Apache Thrift RPC system"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
]
@@ -2780,44 +2780,54 @@ twisted = ["twisted"]
[[package]]
name = "tomli"
version = "2.2.1"
version = "2.3.0"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
groups = ["main", "dev"]
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"},
{file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"},
{file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"},
{file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"},
{file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"},
{file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"},
{file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"},
{file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"},
{file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"},
{file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"},
{file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"},
{file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"},
{file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"},
{file = "tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45"},
{file = "tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba"},
{file = "tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf"},
{file = "tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441"},
{file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845"},
{file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c"},
{file = "tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456"},
{file = "tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be"},
{file = "tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac"},
{file = "tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22"},
{file = "tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f"},
{file = "tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52"},
{file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8"},
{file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6"},
{file = "tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876"},
{file = "tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878"},
{file = "tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b"},
{file = "tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae"},
{file = "tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b"},
{file = "tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf"},
{file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f"},
{file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05"},
{file = "tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606"},
{file = "tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999"},
{file = "tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e"},
{file = "tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3"},
{file = "tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc"},
{file = "tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0"},
{file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879"},
{file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005"},
{file = "tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463"},
{file = "tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8"},
{file = "tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77"},
{file = "tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf"},
{file = "tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530"},
{file = "tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b"},
{file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67"},
{file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f"},
{file = "tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0"},
{file = "tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba"},
{file = "tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b"},
{file = "tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549"},
]
markers = {main = "python_version < \"3.14\""}
@@ -2828,7 +2838,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"},
{file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"},
@@ -2962,7 +2972,7 @@ description = "non-blocking redis client for python"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"redis\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"redis\""
files = [
{file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"},
{file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"},
@@ -3208,7 +3218,7 @@ description = "An XML Schema validator and decoder"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
{file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},
@@ -3343,4 +3353,4 @@ url-preview = ["lxml"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10.0,<4.0.0"
content-hash = "fd472001e409dd3a2a9f0af980a5630fa9edb98fc4b73caa68802a3fd0df5d5f"
content-hash = "4f8d98723236eaf3d13f440dce95ec6cc3c4dc49ba3a0e45bf9cfbb51aca899c"

View File

@@ -1,6 +1,6 @@
[project]
name = "matrix-synapse"
version = "1.142.0"
version = "1.143.0rc2"
description = "Homeserver for the Matrix decentralised comms protocol"
readme = "README.rst"
authors = [
@@ -328,7 +328,7 @@ generate-setup-file = true
# failing on new releases. Keeping lower bounds loose here means that dependabot
# can bump versions without having to update the content-hash in the lockfile.
# This helps prevents merge conflicts when running a batch of dependabot updates.
ruff = "0.14.3"
ruff = "0.14.5"
# Typechecking
lxml-stubs = ">=0.4.0"
@@ -392,10 +392,8 @@ build-backend = "poetry.core.masonry.api"
#
# We skip:
# - free-threaded cpython builds: these are not currently supported.
# - cp38: Python 3.8 is end-of-life.
# - cp39: Python 3.9 is end-of-life.
# - i686: We don't support 32-bit platforms.
skip = "cp3??t-* cp38-* cp39-* *i686*"
skip = "cp3??t-* *i686*"
# Enable non-default builds. See the list of available options:
# https://cibuildwheel.pypa.io/en/stable/options#enable
#

View File

@@ -1,5 +1,5 @@
$schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json
$id: https://element-hq.github.io/synapse/schema/synapse/v1.142/synapse-config.schema.json
$id: https://element-hq.github.io/synapse/schema/synapse/v1.143/synapse-config.schema.json
type: object
properties:
modules:

View File

@@ -72,153 +72,154 @@ For help on arguments to 'go test', run 'go help testflag'.
EOF
}
# parse our arguments
skip_docker_build=""
skip_complement_run=""
while [ $# -ge 1 ]; do
main() {
# parse our arguments
skip_docker_build=""
skip_complement_run=""
while [ $# -ge 1 ]; do
arg=$1
case "$arg" in
"-h")
usage
exit 1
;;
"-f"|"--fast")
skip_docker_build=1
;;
"--build-only")
skip_complement_run=1
;;
"-e"|"--editable")
use_editable_synapse=1
;;
"--rebuild-editable")
rebuild_editable_synapse=1
;;
*)
# unknown arg: presumably an argument to gotest. break the loop.
break
"-h")
usage
return 1
;;
"-f"|"--fast")
skip_docker_build=1
;;
"--build-only")
skip_complement_run=1
;;
"-e"|"--editable")
use_editable_synapse=1
;;
"--rebuild-editable")
rebuild_editable_synapse=1
;;
*)
# unknown arg: presumably an argument to gotest. break the loop.
break
esac
shift
done
done
# enable buildkit for the docker builds
export DOCKER_BUILDKIT=1
# enable buildkit for the docker builds
export DOCKER_BUILDKIT=1
# Determine whether to use the docker or podman container runtime.
if [ -n "$PODMAN" ]; then
export CONTAINER_RUNTIME=podman
export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock
export BUILDAH_FORMAT=docker
export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal
else
export CONTAINER_RUNTIME=docker
fi
# Determine whether to use the docker or podman container runtime.
if [ -n "$PODMAN" ]; then
export CONTAINER_RUNTIME=podman
export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock
export BUILDAH_FORMAT=docker
export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal
else
export CONTAINER_RUNTIME=docker
fi
# Change to the repository root
cd "$(dirname $0)/.."
# Change to the repository root
cd "$(dirname $0)/.."
# Check for a user-specified Complement checkout
if [[ -z "$COMPLEMENT_DIR" ]]; then
COMPLEMENT_REF=${COMPLEMENT_REF:-main}
echo "COMPLEMENT_DIR not set. Fetching Complement checkout from ${COMPLEMENT_REF}..."
wget -Nq https://github.com/matrix-org/complement/archive/${COMPLEMENT_REF}.tar.gz
tar -xzf ${COMPLEMENT_REF}.tar.gz
COMPLEMENT_DIR=complement-${COMPLEMENT_REF}
echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
fi
# Check for a user-specified Complement checkout
if [[ -z "$COMPLEMENT_DIR" ]]; then
COMPLEMENT_REF=${COMPLEMENT_REF:-main}
echo "COMPLEMENT_DIR not set. Fetching Complement checkout from ${COMPLEMENT_REF}..."
wget -Nq https://github.com/matrix-org/complement/archive/${COMPLEMENT_REF}.tar.gz
tar -xzf ${COMPLEMENT_REF}.tar.gz
COMPLEMENT_DIR=complement-${COMPLEMENT_REF}
echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
fi
if [ -n "$use_editable_synapse" ]; then
if [ -n "$use_editable_synapse" ]; then
if [[ -e synapse/synapse_rust.abi3.so ]]; then
# In an editable install, back up the host's compiled Rust module to prevent
# inconvenience; the container will overwrite the module with its own copy.
mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host
# And restore it on exit:
synapse_pkg=`realpath synapse`
trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT
# In an editable install, back up the host's compiled Rust module to prevent
# inconvenience; the container will overwrite the module with its own copy.
mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host
# And restore it on exit:
synapse_pkg=`realpath synapse`
trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT
fi
editable_mount="$(realpath .):/editable-src:z"
if [ -n "$rebuild_editable_synapse" ]; then
unset skip_docker_build
unset skip_docker_build
elif $CONTAINER_RUNTIME inspect complement-synapse-editable &>/dev/null; then
# complement-synapse-editable already exists: see if we can still use it:
# - The Rust module must still be importable; it will fail to import if the Rust source has changed.
# - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
# complement-synapse-editable already exists: see if we can still use it:
# - The Rust module must still be importable; it will fail to import if the Rust source has changed.
# - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
# First set up the module in the right place for an editable installation.
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
# First set up the module in the right place for an editable installation.
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
&& $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
skip_docker_build=1
else
echo "Editable Synapse image is stale. Will rebuild."
unset skip_docker_build
fi
if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
&& $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
skip_docker_build=1
else
echo "Editable Synapse image is stale. Will rebuild."
unset skip_docker_build
fi
fi
fi
fi
if [ -z "$skip_docker_build" ]; then
if [ -z "$skip_docker_build" ]; then
if [ -n "$use_editable_synapse" ]; then
# Build a special image designed for use in development with editable
# installs.
$CONTAINER_RUNTIME build -t synapse-editable \
-f "docker/editable.Dockerfile" .
# Build a special image designed for use in development with editable
# installs.
$CONTAINER_RUNTIME build -t synapse-editable \
-f "docker/editable.Dockerfile" .
$CONTAINER_RUNTIME build -t synapse-workers-editable \
--build-arg FROM=synapse-editable \
-f "docker/Dockerfile-workers" .
$CONTAINER_RUNTIME build -t synapse-workers-editable \
--build-arg FROM=synapse-editable \
-f "docker/Dockerfile-workers" .
$CONTAINER_RUNTIME build -t complement-synapse-editable \
--build-arg FROM=synapse-workers-editable \
-f "docker/complement/Dockerfile" "docker/complement"
$CONTAINER_RUNTIME build -t complement-synapse-editable \
--build-arg FROM=synapse-workers-editable \
-f "docker/complement/Dockerfile" "docker/complement"
# Prepare the Rust module
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
# Prepare the Rust module
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
else
# Build the base Synapse image from the local checkout
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
$CONTAINER_RUNTIME build -t matrixdotorg/synapse \
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
-f "docker/Dockerfile" .
echo_if_github "::endgroup::"
# Build the base Synapse image from the local checkout
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
$CONTAINER_RUNTIME build -t matrixdotorg/synapse \
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
-f "docker/Dockerfile" .
echo_if_github "::endgroup::"
# Build the workers docker image (from the base Synapse image we just built).
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
$CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
echo_if_github "::endgroup::"
# Build the workers docker image (from the base Synapse image we just built).
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
$CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
echo_if_github "::endgroup::"
# Build the unified Complement image (from the worker Synapse image we just built).
echo_if_github "::group::Build Docker image: complement/Dockerfile"
$CONTAINER_RUNTIME build -t complement-synapse \
`# This is the tag we end up pushing to the registry (see` \
`# .github/workflows/push_complement_image.yml) so let's just label it now` \
`# so people can reference it by the same name locally.` \
-t ghcr.io/element-hq/synapse/complement-synapse \
-f "docker/complement/Dockerfile" "docker/complement"
echo_if_github "::endgroup::"
# Build the unified Complement image (from the worker Synapse image we just built).
echo_if_github "::group::Build Docker image: complement/Dockerfile"
$CONTAINER_RUNTIME build -t complement-synapse \
`# This is the tag we end up pushing to the registry (see` \
`# .github/workflows/push_complement_image.yml) so let's just label it now` \
`# so people can reference it by the same name locally.` \
-t ghcr.io/element-hq/synapse/complement-synapse \
-f "docker/complement/Dockerfile" "docker/complement"
echo_if_github "::endgroup::"
fi
fi
fi
if [ -n "$skip_complement_run" ]; then
echo "Skipping Complement run as requested."
exit
fi
if [ -n "$skip_complement_run" ]; then
echo "Skipping Complement run as requested."
return 0
fi
export COMPLEMENT_BASE_IMAGE=complement-synapse
if [ -n "$use_editable_synapse" ]; then
export COMPLEMENT_BASE_IMAGE=complement-synapse
if [ -n "$use_editable_synapse" ]; then
export COMPLEMENT_BASE_IMAGE=complement-synapse-editable
export COMPLEMENT_HOST_MOUNTS="$editable_mount"
fi
fi
extra_test_args=()
extra_test_args=()
test_packages=(
test_packages=(
./tests/csapi
./tests
./tests/msc3874
@@ -231,71 +232,80 @@ test_packages=(
./tests/msc4140
./tests/msc4155
./tests/msc4306
)
)
# Enable dirty runs, so tests will reuse the same container where possible.
# This significantly speeds up tests, but increases the possibility of test pollution.
export COMPLEMENT_ENABLE_DIRTY_RUNS=1
# Enable dirty runs, so tests will reuse the same container where possible.
# This significantly speeds up tests, but increases the possibility of test pollution.
export COMPLEMENT_ENABLE_DIRTY_RUNS=1
# All environment variables starting with PASS_ will be shared.
# (The prefix is stripped off before reaching the container.)
export COMPLEMENT_SHARE_ENV_PREFIX=PASS_
# All environment variables starting with PASS_ will be shared.
# (The prefix is stripped off before reaching the container.)
export COMPLEMENT_SHARE_ENV_PREFIX=PASS_
# It takes longer than 10m to run the whole suite.
extra_test_args+=("-timeout=60m")
# It takes longer than 10m to run the whole suite.
extra_test_args+=("-timeout=60m")
if [[ -n "$WORKERS" ]]; then
# Use workers.
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true
if [[ -n "$WORKERS" ]]; then
# Use workers.
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true
# Pass through the workers defined. If none, it will be an empty string
export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES"
# Pass through the workers defined. If none, it will be an empty string
export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES"
# Workers can only use Postgres as a database.
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
# And provide some more configuration to complement.
# It can take quite a while to spin up a worker-mode Synapse for the first
# time (the main problem is that we start 14 python processes for each test,
# and complement likes to do two of them in parallel).
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120
else
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=
if [[ -n "$POSTGRES" ]]; then
# Workers can only use Postgres as a database.
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
# And provide some more configuration to complement.
# It can take quite a while to spin up a worker-mode Synapse for the first
# time (the main problem is that we start 14 python processes for each test,
# and complement likes to do two of them in parallel).
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120
else
export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=
if [[ -n "$POSTGRES" ]]; then
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
else
export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite
fi
fi
if [[ -n "$ASYNCIO_REACTOR" ]]; then
# Enable the Twisted asyncio reactor
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
fi
if [[ -n "$UNIX_SOCKETS" ]]; then
# Enable full on Unix socket mode for Synapse, Redis and Postgresql
export PASS_SYNAPSE_USE_UNIX_SOCKET=1
fi
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
# Set the log level to what is desired
export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL"
# Allow logging sensitive things (currently SQL queries & parameters).
# (This won't have any effect if we're not logging at DEBUG level overall.)
# Since this is just a test suite, this is fine and won't reveal anyone's
# personal information
export PASS_SYNAPSE_LOG_SENSITIVE=1
fi
# Log a few more useful things for a developer attempting to debug something
# particularly tricky.
export PASS_SYNAPSE_LOG_TESTING=1
# Run the tests!
echo "Images built; running complement with ${extra_test_args[@]} $@ ${test_packages[@]}"
cd "$COMPLEMENT_DIR"
go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" "${test_packages[@]}"
}
main "$@"
# For any non-zero exit code (indicating some sort of error happened), we want to exit
# with that code.
exit_code=$?
if [ $exit_code -ne 0 ]; then
exit $exit_code
fi
if [[ -n "$ASYNCIO_REACTOR" ]]; then
# Enable the Twisted asyncio reactor
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
fi
if [[ -n "$UNIX_SOCKETS" ]]; then
# Enable full on Unix socket mode for Synapse, Redis and Postgresql
export PASS_SYNAPSE_USE_UNIX_SOCKET=1
fi
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
# Set the log level to what is desired
export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL"
# Allow logging sensitive things (currently SQL queries & parameters).
# (This won't have any effect if we're not logging at DEBUG level overall.)
# Since this is just a test suite, this is fine and won't reveal anyone's
# personal information
export PASS_SYNAPSE_LOG_SENSITIVE=1
fi
# Log a few more useful things for a developer attempting to debug something
# particularly tricky.
export PASS_SYNAPSE_LOG_TESTING=1
# Run the tests!
echo "Images built; running complement with ${extra_test_args[@]} $@ ${test_packages[@]}"
cd "$COMPLEMENT_DIR"
go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" "${test_packages[@]}"

View File

@@ -58,6 +58,7 @@ from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
from synapse.storage.databases.main import FilteringWorkerStore
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
from synapse.storage.databases.main.delayed_events import DelayedEventsStore
from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore
@@ -273,6 +274,7 @@ class Store(
RelationsWorkerStore,
EventFederationWorkerStore,
SlidingSyncStore,
DelayedEventsStore,
):
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)

View File

@@ -17,7 +17,6 @@ from typing import TYPE_CHECKING
from urllib.parse import urlencode
from pydantic import (
AnyHttpUrl,
BaseModel,
ConfigDict,
StrictBool,
@@ -147,33 +146,13 @@ class MasDelegatedAuth(BaseAuth):
@property
def _metadata_url(self) -> str:
return str(
AnyHttpUrl.build(
scheme=self._config.endpoint.scheme,
username=self._config.endpoint.username,
password=self._config.endpoint.password,
host=self._config.endpoint.host or "",
port=self._config.endpoint.port,
path=".well-known/openid-configuration",
query=None,
fragment=None,
)
return (
f"{str(self._config.endpoint).rstrip('/')}/.well-known/openid-configuration"
)
@property
def _introspection_endpoint(self) -> str:
return str(
AnyHttpUrl.build(
scheme=self._config.endpoint.scheme,
username=self._config.endpoint.username,
password=self._config.endpoint.password,
host=self._config.endpoint.host or "",
port=self._config.endpoint.port,
path="oauth2/introspect",
query=None,
fragment=None,
)
)
return f"{str(self._config.endpoint).rstrip('/')}/oauth2/introspect"
async def _load_metadata(self) -> ServerMetadata:
response = await self._http_client.get_json(self._metadata_url)

View File

@@ -450,7 +450,8 @@ async def start(
await _base.start(hs, freeze=freeze)
# TODO: Feels like this should be moved somewhere else.
hs.get_datastores().main.db_pool.updates.start_doing_background_updates()
for db in hs.get_datastores().databases:
db.updates.start_doing_background_updates()
def start_reactor(

View File

@@ -65,8 +65,6 @@ from typing import (
Sequence,
)
from twisted.internet.interfaces import IDelayedCall
from synapse.appservice import (
ApplicationService,
ApplicationServiceState,
@@ -78,7 +76,7 @@ from synapse.events import EventBase
from synapse.logging.context import run_in_background
from synapse.storage.databases.main import DataStore
from synapse.types import DeviceListUpdates, JsonMapping
from synapse.util.clock import Clock
from synapse.util.clock import Clock, DelayedCallWrapper
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -503,7 +501,7 @@ class _Recoverer:
self.service = service
self.callback = callback
self.backoff_counter = 1
self.scheduled_recovery: IDelayedCall | None = None
self.scheduled_recovery: DelayedCallWrapper | None = None
def recover(self) -> None:
delay = 2**self.backoff_counter

View File

@@ -41,6 +41,7 @@ from synapse.events import EventBase
from synapse.federation.units import Edu
from synapse.handlers.presence import format_user_presence_state
from synapse.logging import issue9533_logger
from synapse.logging.context import PreserveLoggingContext
from synapse.logging.opentracing import SynapseTags, set_tag
from synapse.metrics import SERVER_NAME_LABEL, sent_transactions_counter
from synapse.types import JsonDict, ReadReceipt
@@ -186,7 +187,8 @@ class PerDestinationQueue:
self._transaction_manager.shutdown()
try:
if self.active_transmission_loop is not None:
self.active_transmission_loop.cancel()
with PreserveLoggingContext():
self.active_transmission_loop.cancel()
except Exception:
pass

View File

@@ -21,6 +21,7 @@ from synapse.api.constants import EventTypes
from synapse.api.errors import ShadowBanError, SynapseError
from synapse.api.ratelimiting import Ratelimiter
from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import set_tag
from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions
@@ -29,11 +30,9 @@ from synapse.replication.http.delayed_events import (
)
from synapse.storage.databases.main.delayed_events import (
DelayedEventDetails,
DelayID,
EventType,
StateKey,
Timestamp,
UserLocalpart,
)
from synapse.storage.databases.main.state_deltas import StateDelta
from synapse.types import (
@@ -399,96 +398,63 @@ class DelayedEventsHandler:
if self._next_send_ts_changed(next_send_ts):
self._schedule_next_at(next_send_ts)
async def cancel(self, requester: Requester, delay_id: str) -> None:
async def cancel(self, request: SynapseRequest, delay_id: str) -> None:
"""
Cancels the scheduled delivery of the matching delayed event.
Args:
requester: The owner of the delayed event to act on.
delay_id: The ID of the delayed event to act on.
Raises:
NotFoundError: if no matching delayed event could be found.
"""
assert self._is_master
await self._delayed_event_mgmt_ratelimiter.ratelimit(
requester,
(requester.user.to_string(), requester.device_id),
None, request.getClientAddress().host
)
await make_deferred_yieldable(self._initialized_from_db)
next_send_ts = await self._store.cancel_delayed_event(
delay_id=delay_id,
user_localpart=requester.user.localpart,
)
next_send_ts = await self._store.cancel_delayed_event(delay_id)
if self._next_send_ts_changed(next_send_ts):
self._schedule_next_at_or_none(next_send_ts)
async def restart(self, requester: Requester, delay_id: str) -> None:
async def restart(self, request: SynapseRequest, delay_id: str) -> None:
"""
Restarts the scheduled delivery of the matching delayed event.
Args:
requester: The owner of the delayed event to act on.
delay_id: The ID of the delayed event to act on.
Raises:
NotFoundError: if no matching delayed event could be found.
"""
assert self._is_master
await self._delayed_event_mgmt_ratelimiter.ratelimit(
requester,
(requester.user.to_string(), requester.device_id),
None, request.getClientAddress().host
)
await make_deferred_yieldable(self._initialized_from_db)
next_send_ts = await self._store.restart_delayed_event(
delay_id=delay_id,
user_localpart=requester.user.localpart,
current_ts=self._get_current_ts(),
delay_id, self._get_current_ts()
)
if self._next_send_ts_changed(next_send_ts):
self._schedule_next_at(next_send_ts)
async def send(self, requester: Requester, delay_id: str) -> None:
async def send(self, request: SynapseRequest, delay_id: str) -> None:
"""
Immediately sends the matching delayed event, instead of waiting for its scheduled delivery.
Args:
requester: The owner of the delayed event to act on.
delay_id: The ID of the delayed event to act on.
Raises:
NotFoundError: if no matching delayed event could be found.
"""
assert self._is_master
# Use standard request limiter for sending delayed events on-demand,
# as an on-demand send is similar to sending a regular event.
await self._request_ratelimiter.ratelimit(requester)
await self._delayed_event_mgmt_ratelimiter.ratelimit(
None, request.getClientAddress().host
)
await make_deferred_yieldable(self._initialized_from_db)
event, next_send_ts = await self._store.process_target_delayed_event(
delay_id=delay_id,
user_localpart=requester.user.localpart,
)
event, next_send_ts = await self._store.process_target_delayed_event(delay_id)
if self._next_send_ts_changed(next_send_ts):
self._schedule_next_at_or_none(next_send_ts)
await self._send_event(
DelayedEventDetails(
delay_id=DelayID(delay_id),
user_localpart=UserLocalpart(requester.user.localpart),
room_id=event.room_id,
type=event.type,
state_key=event.state_key,
origin_server_ts=event.origin_server_ts,
content=event.content,
device_id=event.device_id,
)
)
await self._send_event(event)
async def _send_on_timeout(self) -> None:
self._next_delayed_event_call = None
@@ -611,9 +577,7 @@ class DelayedEventsHandler:
finally:
# TODO: If this is a temporary error, retry. Otherwise, consider notifying clients of the failure
try:
await self._store.delete_processed_delayed_event(
event.delay_id, event.user_localpart
)
await self._store.delete_processed_delayed_event(event.delay_id)
except Exception:
logger.exception("Failed to delete processed delayed event")

View File

@@ -321,16 +321,7 @@ class DirectoryHandler:
if not self.hs.is_mine(room_alias):
raise SynapseError(400, "Room Alias is not hosted on this homeserver")
result = await self.get_association_from_room_alias(room_alias)
if result is not None:
return {"room_id": result.room_id, "servers": result.servers}
else:
raise SynapseError(
404,
"Room alias %r not found" % (room_alias.to_string(),),
Codes.NOT_FOUND,
)
return await self.get_association(room_alias)
async def _update_canonical_alias(
self, requester: Requester, user_id: str, room_id: str, room_alias: RoomAlias

View File

@@ -184,9 +184,7 @@ class WorkerLocksHandler:
locks: Collection[WaitingLock | WaitingMultiLock],
) -> None:
for lock in locks:
deferred = lock.deferred
if not deferred.called:
deferred.callback(None)
lock.release_lock()
self._clock.call_later(
0,
@@ -215,6 +213,12 @@ class WaitingLock:
lambda: start_active_span("WaitingLock.lock")
)
def release_lock(self) -> None:
"""Release the lock (by resolving the deferred)"""
if not self.deferred.called:
with PreserveLoggingContext():
self.deferred.callback(None)
async def __aenter__(self) -> None:
self._lock_span.__enter__()
@@ -298,6 +302,12 @@ class WaitingMultiLock:
lambda: start_active_span("WaitingLock.lock")
)
def release_lock(self) -> None:
"""Release the lock (by resolving the deferred)"""
if not self.deferred.called:
with PreserveLoggingContext():
self.deferred.callback(None)
async def __aenter__(self) -> None:
self._lock_span.__enter__()

View File

@@ -77,7 +77,11 @@ from synapse.http import QuieterFileBodyProducer, RequestTimedOutError, redact_u
from synapse.http.proxyagent import ProxyAgent
from synapse.http.replicationagent import ReplicationAgent
from synapse.http.types import QueryParams
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.context import (
PreserveLoggingContext,
make_deferred_yieldable,
run_in_background,
)
from synapse.logging.opentracing import set_tag, start_active_span, tags
from synapse.metrics import SERVER_NAME_LABEL
from synapse.types import ISynapseReactor, StrSequence
@@ -1036,7 +1040,8 @@ class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol):
Report a max size exceed error and disconnect the first time this is called.
"""
if not self.deferred.called:
self.deferred.errback(BodyExceededMaxSize())
with PreserveLoggingContext():
self.deferred.errback(BodyExceededMaxSize())
# Close the connection (forcefully) since all the data will get
# discarded anyway.
assert self.transport is not None
@@ -1135,7 +1140,8 @@ class _MultipartParserProtocol(protocol.Protocol):
logger.warning(
"Exception encountered writing file data to stream: %s", e
)
self.deferred.errback()
with PreserveLoggingContext():
self.deferred.errback()
self.file_length += end - start
callbacks: "multipart.MultipartCallbacks" = {
@@ -1147,7 +1153,8 @@ class _MultipartParserProtocol(protocol.Protocol):
self.total_length += len(incoming_data)
if self.max_length is not None and self.total_length >= self.max_length:
self.deferred.errback(BodyExceededMaxSize())
with PreserveLoggingContext():
self.deferred.errback(BodyExceededMaxSize())
# Close the connection (forcefully) since all the data will get
# discarded anyway.
assert self.transport is not None
@@ -1157,7 +1164,8 @@ class _MultipartParserProtocol(protocol.Protocol):
self.parser.write(incoming_data)
except Exception as e:
logger.warning("Exception writing to multipart parser: %s", e)
self.deferred.errback()
with PreserveLoggingContext():
self.deferred.errback()
return
def connectionLost(self, reason: Failure = connectionDone) -> None:
@@ -1167,9 +1175,11 @@ class _MultipartParserProtocol(protocol.Protocol):
if reason.check(ResponseDone):
self.multipart_response.length = self.file_length
self.deferred.callback(self.multipart_response)
with PreserveLoggingContext():
self.deferred.callback(self.multipart_response)
else:
self.deferred.errback(reason)
with PreserveLoggingContext():
self.deferred.errback(reason)
class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
@@ -1193,7 +1203,8 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
try:
self.stream.write(data)
except Exception:
self.deferred.errback()
with PreserveLoggingContext():
self.deferred.errback()
return
self.length += len(data)
@@ -1201,7 +1212,8 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
# connection. dataReceived might be called again if data was received
# in the meantime.
if self.max_size is not None and self.length >= self.max_size:
self.deferred.errback(BodyExceededMaxSize())
with PreserveLoggingContext():
self.deferred.errback(BodyExceededMaxSize())
# Close the connection (forcefully) since all the data will get
# discarded anyway.
assert self.transport is not None
@@ -1213,7 +1225,8 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
return
if reason.check(ResponseDone):
self.deferred.callback(self.length)
with PreserveLoggingContext():
self.deferred.callback(self.length)
elif reason.check(PotentialDataLoss):
# This applies to requests which don't set `Content-Length` or a
# `Transfer-Encoding` in the response because in this case the end of the
@@ -1222,9 +1235,11 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
# behavior is expected of some servers (like YouTube), let's ignore it.
# Stolen from https://github.com/twisted/treq/pull/49/files
# http://twistedmatrix.com/trac/ticket/4840
self.deferred.callback(self.length)
with PreserveLoggingContext():
self.deferred.callback(self.length)
else:
self.deferred.errback(reason)
with PreserveLoggingContext():
self.deferred.errback(reason)
def read_body_with_max_size(

View File

@@ -41,6 +41,8 @@ from twisted.internet.protocol import ClientFactory, connectionDone
from twisted.python.failure import Failure
from twisted.web import http
from synapse.logging.context import PreserveLoggingContext
logger = logging.getLogger(__name__)
@@ -176,14 +178,16 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
def clientConnectionFailed(self, connector: IConnector, reason: Failure) -> None:
logger.debug("Connection to proxy failed: %s", reason)
if not self.on_connection.called:
self.on_connection.errback(reason)
with PreserveLoggingContext():
self.on_connection.errback(reason)
if isinstance(self.wrapped_factory, ClientFactory):
return self.wrapped_factory.clientConnectionFailed(connector, reason)
def clientConnectionLost(self, connector: IConnector, reason: Failure) -> None:
logger.debug("Connection to proxy lost: %s", reason)
if not self.on_connection.called:
self.on_connection.errback(reason)
with PreserveLoggingContext():
self.on_connection.errback(reason)
if isinstance(self.wrapped_factory, ClientFactory):
return self.wrapped_factory.clientConnectionLost(connector, reason)
@@ -238,14 +242,16 @@ class HTTPConnectProtocol(protocol.Protocol):
self.http_setup_client.connectionLost(reason)
if not self.connected_deferred.called:
self.connected_deferred.errback(reason)
with PreserveLoggingContext():
self.connected_deferred.errback(reason)
def proxyConnected(self, _: Union[None, "defer.Deferred[None]"]) -> None:
self.wrapped_connection_started = True
assert self.transport is not None
self.wrapped_protocol.makeConnection(self.transport)
self.connected_deferred.callback(self.wrapped_protocol)
with PreserveLoggingContext():
self.connected_deferred.callback(self.wrapped_protocol)
# Get any pending data from the http buf and forward it to the original protocol
buf = self.http_setup_client.clearLineBuffer()
@@ -303,7 +309,8 @@ class HTTPConnectSetupClient(http.HTTPClient):
def handleEndHeaders(self) -> None:
logger.debug("End Headers")
self.on_connected.callback(None)
with PreserveLoggingContext():
self.on_connected.callback(None)
def handleResponse(self, body: bytes) -> None:
pass

View File

@@ -619,19 +619,24 @@ class LoggingContextFilter(logging.Filter):
True to include the record in the log output.
"""
context = current_context()
record.request = self._default_request
# Avoid overwriting an existing `server_name` on the record. This is running in
# the context of a global log record filter so there may be 3rd-party code that
# adds their own `server_name` and we don't want to interfere with that
# (clobber).
if not hasattr(record, "server_name"):
record.server_name = "unknown_server_from_no_logcontext"
# context should never be None, but if it somehow ends up being, then
# we end up in a death spiral of infinite loops, so let's check, for
# type-ignore: `context` should never be `None`, but if it somehow ends up
# being, then we end up in a death spiral of infinite loops, so let's check, for
# robustness' sake.
if context is not None:
#
# Add some default values to avoid log formatting errors.
if context is None:
record.request = self._default_request # type: ignore[unreachable]
# Avoid overwriting an existing `server_name` on the record. This is running in
# the context of a global log record filter so there may be 3rd-party code that
# adds their own `server_name` and we don't want to interfere with that
# (clobber).
if not hasattr(record, "server_name"):
record.server_name = "unknown_server_from_no_logcontext"
# Otherwise, in the normal, expected case, fill in the log record attributes
# from the logcontext.
else:
def safe_set(attr: str, value: Any) -> None:
"""

View File

@@ -45,6 +45,7 @@ from synapse.api.errors import Codes, cs_error
from synapse.http.server import finish_request, respond_with_json
from synapse.http.site import SynapseRequest
from synapse.logging.context import (
PreserveLoggingContext,
defer_to_threadpool,
make_deferred_yieldable,
run_in_background,
@@ -753,9 +754,10 @@ class ThreadedFileSender:
self.wakeup_event.set()
if not self.deferred.called:
self.deferred.errback(
ConsumerRequestedStopError("Consumer asked us to stop producing")
)
with PreserveLoggingContext():
self.deferred.errback(
ConsumerRequestedStopError("Consumer asked us to stop producing")
)
async def start_read_loop(self) -> None:
"""This is the loop that drives reading/writing"""
@@ -809,7 +811,8 @@ class ThreadedFileSender:
self.consumer = None
if not self.deferred.called:
self.deferred.errback(failure)
with PreserveLoggingContext():
self.deferred.errback(failure)
def _finish(self) -> None:
"""Called when we have finished writing (either on success or
@@ -823,4 +826,5 @@ class ThreadedFileSender:
self.consumer = None
if not self.deferred.called:
self.deferred.callback(None)
with PreserveLoggingContext():
self.deferred.callback(None)

View File

@@ -47,14 +47,11 @@ class UpdateDelayedEventServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.delayed_events_handler = hs.get_delayed_events_handler()
async def on_POST(
self, request: SynapseRequest, delay_id: str
) -> tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
body = parse_json_object_from_request(request)
try:
action = str(body["action"])
@@ -75,11 +72,65 @@ class UpdateDelayedEventServlet(RestServlet):
)
if enum_action == _UpdateDelayedEventAction.CANCEL:
await self.delayed_events_handler.cancel(requester, delay_id)
await self.delayed_events_handler.cancel(request, delay_id)
elif enum_action == _UpdateDelayedEventAction.RESTART:
await self.delayed_events_handler.restart(requester, delay_id)
await self.delayed_events_handler.restart(request, delay_id)
elif enum_action == _UpdateDelayedEventAction.SEND:
await self.delayed_events_handler.send(requester, delay_id)
await self.delayed_events_handler.send(request, delay_id)
return 200, {}
class CancelDelayedEventServlet(RestServlet):
PATTERNS = client_patterns(
r"/org\.matrix\.msc4140/delayed_events/(?P<delay_id>[^/]+)/cancel$",
releases=(),
)
CATEGORY = "Delayed event management requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
self.delayed_events_handler = hs.get_delayed_events_handler()
async def on_POST(
self, request: SynapseRequest, delay_id: str
) -> tuple[int, JsonDict]:
await self.delayed_events_handler.cancel(request, delay_id)
return 200, {}
class RestartDelayedEventServlet(RestServlet):
PATTERNS = client_patterns(
r"/org\.matrix\.msc4140/delayed_events/(?P<delay_id>[^/]+)/restart$",
releases=(),
)
CATEGORY = "Delayed event management requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
self.delayed_events_handler = hs.get_delayed_events_handler()
async def on_POST(
self, request: SynapseRequest, delay_id: str
) -> tuple[int, JsonDict]:
await self.delayed_events_handler.restart(request, delay_id)
return 200, {}
class SendDelayedEventServlet(RestServlet):
PATTERNS = client_patterns(
r"/org\.matrix\.msc4140/delayed_events/(?P<delay_id>[^/]+)/send$",
releases=(),
)
CATEGORY = "Delayed event management requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
self.delayed_events_handler = hs.get_delayed_events_handler()
async def on_POST(
self, request: SynapseRequest, delay_id: str
) -> tuple[int, JsonDict]:
await self.delayed_events_handler.send(request, delay_id)
return 200, {}
@@ -108,4 +159,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
# The following can't currently be instantiated on workers.
if hs.config.worker.worker_app is None:
UpdateDelayedEventServlet(hs).register(http_server)
CancelDelayedEventServlet(hs).register(http_server)
RestartDelayedEventServlet(hs).register(http_server)
SendDelayedEventServlet(hs).register(http_server)
DelayedEventsServlet(hs).register(http_server)

View File

@@ -13,18 +13,26 @@
#
import logging
from typing import NewType
from typing import TYPE_CHECKING, NewType
import attr
from synapse.api.errors import NotFoundError
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import LoggingTransaction, StoreError
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
StoreError,
)
from synapse.storage.engines import PostgresEngine
from synapse.types import JsonDict, RoomID
from synapse.util import stringutils
from synapse.util.json import json_encoder
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -55,6 +63,27 @@ class DelayedEventDetails(EventDetails):
class DelayedEventsStore(SQLBaseStore):
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
# Set delayed events to be uniquely identifiable by their delay_id.
# In practice, delay_ids are already unique because they are generated
# from cryptographically strong random strings.
# Therefore, adding this constraint is not expected to ever fail,
# despite the current pkey technically allowing non-unique delay_ids.
self.db_pool.updates.register_background_index_update(
update_name="delayed_events_idx",
index_name="delayed_events_idx",
table="delayed_events",
columns=("delay_id",),
unique=True,
)
async def get_delayed_events_stream_pos(self) -> int:
"""
Gets the stream position of the background process to watch for state events
@@ -134,9 +163,7 @@ class DelayedEventsStore(SQLBaseStore):
async def restart_delayed_event(
self,
*,
delay_id: str,
user_localpart: str,
current_ts: Timestamp,
) -> Timestamp:
"""
@@ -145,7 +172,6 @@ class DelayedEventsStore(SQLBaseStore):
Args:
delay_id: The ID of the delayed event to restart.
user_localpart: The localpart of the delayed event's owner.
current_ts: The current time, which will be used to calculate the new send time.
Returns: The send time of the next delayed event to be sent,
@@ -163,13 +189,11 @@ class DelayedEventsStore(SQLBaseStore):
"""
UPDATE delayed_events
SET send_ts = ? + delay
WHERE delay_id = ? AND user_localpart = ?
AND NOT is_processed
WHERE delay_id = ? AND NOT is_processed
""",
(
current_ts,
delay_id,
user_localpart,
),
)
if txn.rowcount == 0:
@@ -319,21 +343,15 @@ class DelayedEventsStore(SQLBaseStore):
async def process_target_delayed_event(
self,
*,
delay_id: str,
user_localpart: str,
) -> tuple[
EventDetails,
DelayedEventDetails,
Timestamp | None,
]:
"""
Marks for processing the matching delayed event, regardless of its timeout time,
as long as it has not already been marked as such.
Args:
delay_id: The ID of the delayed event to restart.
user_localpart: The localpart of the delayed event's owner.
Returns: The details of the matching delayed event,
and the send time of the next delayed event to be sent, if any.
@@ -344,39 +362,38 @@ class DelayedEventsStore(SQLBaseStore):
def process_target_delayed_event_txn(
txn: LoggingTransaction,
) -> tuple[
EventDetails,
DelayedEventDetails,
Timestamp | None,
]:
txn.execute(
"""
UPDATE delayed_events
SET is_processed = TRUE
WHERE delay_id = ? AND user_localpart = ?
AND NOT is_processed
WHERE delay_id = ? AND NOT is_processed
RETURNING
room_id,
event_type,
state_key,
origin_server_ts,
content,
device_id
device_id,
user_localpart
""",
(
delay_id,
user_localpart,
),
(delay_id,),
)
row = txn.fetchone()
if row is None:
raise NotFoundError("Delayed event not found")
event = EventDetails(
event = DelayedEventDetails(
RoomID.from_string(row[0]),
EventType(row[1]),
StateKey(row[2]) if row[2] is not None else None,
Timestamp(row[3]) if row[3] is not None else None,
db_to_json(row[4]),
DeviceID(row[5]) if row[5] is not None else None,
DelayID(delay_id),
UserLocalpart(row[6]),
)
return event, self._get_next_delayed_event_send_ts_txn(txn)
@@ -385,19 +402,10 @@ class DelayedEventsStore(SQLBaseStore):
"process_target_delayed_event", process_target_delayed_event_txn
)
async def cancel_delayed_event(
self,
*,
delay_id: str,
user_localpart: str,
) -> Timestamp | None:
async def cancel_delayed_event(self, delay_id: str) -> Timestamp | None:
"""
Cancels the matching delayed event, i.e. remove it as long as it hasn't been processed.
Args:
delay_id: The ID of the delayed event to restart.
user_localpart: The localpart of the delayed event's owner.
Returns: The send time of the next delayed event to be sent, if any.
Raises:
@@ -413,7 +421,6 @@ class DelayedEventsStore(SQLBaseStore):
table="delayed_events",
keyvalues={
"delay_id": delay_id,
"user_localpart": user_localpart,
"is_processed": False,
},
)
@@ -473,11 +480,7 @@ class DelayedEventsStore(SQLBaseStore):
"cancel_delayed_state_events", cancel_delayed_state_events_txn
)
async def delete_processed_delayed_event(
self,
delay_id: DelayID,
user_localpart: UserLocalpart,
) -> None:
async def delete_processed_delayed_event(self, delay_id: DelayID) -> None:
"""
Delete the matching delayed event, as long as it has been marked as processed.
@@ -488,7 +491,6 @@ class DelayedEventsStore(SQLBaseStore):
table="delayed_events",
keyvalues={
"delay_id": delay_id,
"user_localpart": user_localpart,
"is_processed": True,
},
desc="delete_processed_delayed_event",
@@ -554,7 +556,7 @@ def _generate_delay_id() -> DelayID:
# We use the following format for delay IDs:
# syd_<random string>
# They are scoped to user localparts, so it is possible for
# the same ID to exist for multiple users.
# They are not scoped to user localparts, but the random string
# is expected to be sufficiently random to be globally unique.
return DelayID(f"syd_{stringutils.random_string(20)}")

View File

@@ -1600,18 +1600,21 @@ class EventsWorkerStore(SQLBaseStore):
if d:
d.redactions.append(redacter)
# check for MSC4932 redactions
# check for MSC4293 redactions
to_check = []
events: list[_EventRow] = []
for e in evs:
event = event_dict.get(e)
if not event:
continue
events.append(event)
event_json = json.loads(event.json)
room_id = event_json.get("room_id")
user_id = event_json.get("sender")
to_check.append((room_id, user_id))
try:
event = event_dict.get(e)
if not event:
continue
events.append(event)
event_json = json.loads(event.json)
room_id = event_json.get("room_id")
user_id = event_json.get("sender")
to_check.append((room_id, user_id))
except Exception as exc:
raise InvalidEventError(f"Invalid event {event_id}") from exc
# likely that some of these events may be for the same room/user combo, in
# which case we don't need to do redundant queries

View File

@@ -239,6 +239,16 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
txn.execute("SELECT event_id, should_delete FROM events_to_purge")
event_rows = txn.fetchall()
if len(event_rows) == 0:
logger.info("[purge] no events found to purge")
# For the sake of cleanliness: drop the temp table.
# This will commit the txn in sqlite, so make sure to keep this actually last.
txn.execute("DROP TABLE events_to_purge")
# no referenced state groups
return set()
logger.info(
"[purge] found %i events before cutoff, of which %i can be deleted",
len(event_rows),

View File

@@ -99,8 +99,8 @@ class PostgresEngine(
allow_unsafe_locale = self.config.get("allow_unsafe_locale", False)
# Are we on a supported PostgreSQL version?
if not allow_outdated_version and self._version < 130000:
raise RuntimeError("Synapse requires PostgreSQL 13 or above.")
if not allow_outdated_version and self._version < 140000:
raise RuntimeError("Synapse requires PostgreSQL 14 or above.")
with db_conn.cursor() as txn:
txn.execute("SHOW SERVER_ENCODING")

View File

@@ -19,7 +19,7 @@
#
#
SCHEMA_VERSION = 92 # remember to update the list below when updating
SCHEMA_VERSION = 93 # remember to update the list below when updating
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
@@ -168,11 +168,15 @@ Changes in SCHEMA_VERSION = 91
Changes in SCHEMA_VERSION = 92
- Cleaned up a trigger that was added in #18260 and then reverted.
Changes in SCHEMA_VERSION = 93
- MSC4140: Set delayed events to be uniquely identifiable by their delay ID.
"""
SCHEMA_COMPAT_VERSION = (
# Transitive links are no longer written to `event_auth_chain_links`
# TODO: On the next compat bump, update the primary key of `delayed_events`
84
)
"""Limit on how far the synapse codebase can be rolled back without breaking db compat

View File

@@ -0,0 +1,15 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2025 Element Creations, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(9301, 'delayed_events_idx', '{}');

View File

@@ -813,7 +813,8 @@ def timeout_deferred(
# will have errbacked new_d, but in case it hasn't, errback it now.
if not new_d.called:
new_d.errback(defer.TimeoutError("Timed out after %gs" % (timeout,)))
with PreserveLoggingContext():
new_d.errback(defer.TimeoutError("Timed out after %gs" % (timeout,)))
# We don't track these calls since they are short.
delayed_call = clock.call_later(
@@ -840,11 +841,13 @@ def timeout_deferred(
def success_cb(val: _T) -> None:
if not new_d.called:
new_d.callback(val)
with PreserveLoggingContext():
new_d.callback(val)
def failure_cb(val: Failure) -> None:
if not new_d.called:
new_d.errback(val)
with PreserveLoggingContext():
new_d.errback(val)
deferred.addCallbacks(success_cb, failure_cb)
@@ -946,7 +949,8 @@ def delay_cancellation(awaitable: Awaitable[T]) -> Awaitable[T]:
# propagating. we then `unpause` it once the wrapped deferred completes, to
# propagate the exception.
new_deferred.pause()
new_deferred.errback(Failure(CancelledError()))
with PreserveLoggingContext():
new_deferred.errback(Failure(CancelledError()))
deferred.addBoth(lambda _: new_deferred.unpause())
@@ -978,15 +982,6 @@ class AwakenableSleeper:
"""Sleep for the given number of milliseconds, or return if the given
`name` is explicitly woken up.
"""
# Create a deferred that gets called in N seconds
sleep_deferred: "defer.Deferred[None]" = defer.Deferred()
call = self._clock.call_later(
delay_ms / 1000,
sleep_deferred.callback,
None,
)
# Create a deferred that will get called if `wake` is called with
# the same `name`.
stream_set = self._streams.setdefault(name, set())
@@ -996,13 +991,14 @@ class AwakenableSleeper:
try:
# Wait for either the delay or for `wake` to be called.
await make_deferred_yieldable(
defer.DeferredList(
[sleep_deferred, notify_deferred],
fireOnOneCallback=True,
fireOnOneErrback=True,
consumeErrors=True,
timeout_deferred(
deferred=stop_cancellation(notify_deferred),
timeout=delay_ms / 1000,
clock=self._clock,
)
)
except defer.TimeoutError:
pass
finally:
# Clean up the state
curr_stream_set = self._streams.get(name)
@@ -1011,10 +1007,6 @@ class AwakenableSleeper:
if len(curr_stream_set) == 0:
self._streams.pop(name)
# Cancel the sleep if we were woken up
if call.active():
call.cancel()
class DeferredEvent:
"""Like threading.Event but for async code"""

View File

@@ -39,6 +39,7 @@ from prometheus_client import Gauge
from twisted.internet import defer
from twisted.python.failure import Failure
from synapse.logging.context import PreserveLoggingContext
from synapse.metrics import SERVER_NAME_LABEL
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.lrucache import LruCache
@@ -514,7 +515,8 @@ class CacheMultipleEntries(CacheEntry[KT, VT]):
cache._completed_callback(value, self, key)
if self._deferred:
self._deferred.callback(result)
with PreserveLoggingContext():
self._deferred.callback(result)
def error_bulk(
self, cache: DeferredCache[KT, VT], keys: Collection[KT], failure: Failure
@@ -524,4 +526,5 @@ class CacheMultipleEntries(CacheEntry[KT, VT]):
cache._error_callback(failure, self, key)
if self._deferred:
self._deferred.errback(failure)
with PreserveLoggingContext():
self._deferred.errback(failure)

View File

@@ -14,6 +14,7 @@
#
import logging
from typing import (
Any,
Callable,
@@ -30,10 +31,14 @@ from twisted.internet.task import LoopingCall
from synapse.logging import context
from synapse.types import ISynapseThreadlessReactor
from synapse.util import log_failure
from synapse.util.stringutils import random_string_insecure_fast
P = ParamSpec("P")
logger = logging.getLogger(__name__)
class Clock:
"""
A Clock wraps a Twisted reactor and provides utilities on top of it.
@@ -64,7 +69,12 @@ class Clock:
"""List of active looping calls"""
self._call_id_to_delayed_call: dict[int, IDelayedCall] = {}
"""Mapping from unique call ID to delayed call"""
"""
Mapping from unique call ID to delayed call.
For "performance", this only tracks a subset of delayed calls: those created
with `call_later` with `call_later_cancel_on_shutdown=True`.
"""
self._is_shutdown = False
"""Whether shutdown has been requested by the HomeServer"""
@@ -153,11 +163,20 @@ class Clock:
**kwargs: P.kwargs,
) -> LoopingCall:
"""Common functionality for `looping_call` and `looping_call_now`"""
instance_id = random_string_insecure_fast(5)
if self._is_shutdown:
raise Exception("Cannot start looping call. Clock has been shutdown")
looping_call_context_string = "looping_call"
if now:
looping_call_context_string = "looping_call_now"
def wrapped_f(*args: P.args, **kwargs: P.kwargs) -> Deferred:
logger.debug(
"%s(%s): Executing callback", looping_call_context_string, instance_id
)
assert context.current_context() is context.SENTINEL_CONTEXT, (
"Expected `looping_call` callback from the reactor to start with the sentinel logcontext "
f"but saw {context.current_context()}. In other words, another task shouldn't have "
@@ -201,6 +220,17 @@ class Clock:
d = call.start(msec / 1000.0, now=now)
d.addErrback(log_failure, "Looping call died", consumeErrors=False)
self._looping_calls.append(call)
logger.debug(
"%s(%s): Scheduled looping call every %sms later",
looping_call_context_string,
instance_id,
msec,
# Find out who is scheduling the call which makes it easy to follow in the
# logs.
stack_info=True,
)
return call
def cancel_all_looping_calls(self, consumeErrors: bool = True) -> None:
@@ -226,7 +256,7 @@ class Clock:
*args: Any,
call_later_cancel_on_shutdown: bool = True,
**kwargs: Any,
) -> IDelayedCall:
) -> "DelayedCallWrapper":
"""Call something later
Note that the function will be called with generic `call_later` logcontext, so
@@ -245,74 +275,79 @@ class Clock:
issue, we can just track all delayed calls.
**kwargs: Key arguments to pass to function.
"""
call_id = self._delayed_call_id
self._delayed_call_id = self._delayed_call_id + 1
if self._is_shutdown:
raise Exception("Cannot start delayed call. Clock has been shutdown")
def create_wrapped_callback(
track_for_shutdown_cancellation: bool,
) -> Callable[P, None]:
def wrapped_callback(*args: Any, **kwargs: Any) -> None:
assert context.current_context() is context.SENTINEL_CONTEXT, (
"Expected `call_later` callback from the reactor to start with the sentinel logcontext "
f"but saw {context.current_context()}. In other words, another task shouldn't have "
"leaked their logcontext to us."
)
def wrapped_callback(*args: Any, **kwargs: Any) -> None:
logger.debug("call_later(%s): Executing callback", call_id)
# Because this is a callback from the reactor, we will be using the
# `sentinel` log context at this point. We want the function to log with
# some logcontext as we want to know which server the logs came from.
#
# We use `PreserveLoggingContext` to prevent our new `call_later`
# logcontext from finishing as soon as we exit this function, in case `f`
# returns an awaitable/deferred which would continue running and may try to
# restore the `call_later` context when it's done (because it's trying to
# adhere to the Synapse logcontext rules.)
#
# This also ensures that we return to the `sentinel` context when we exit
# this function and yield control back to the reactor to avoid leaking the
# current logcontext to the reactor (which would then get picked up and
# associated with the next thing the reactor does)
try:
with context.PreserveLoggingContext(
context.LoggingContext(
name="call_later", server_name=self._server_name
)
):
# We use `run_in_background` to reset the logcontext after `f` (or the
# awaitable returned by `f`) completes to avoid leaking the current
# logcontext to the reactor
context.run_in_background(callback, *args, **kwargs)
finally:
if track_for_shutdown_cancellation:
# We still want to remove the call from the tracking map. Even if
# the callback raises an exception.
self._call_id_to_delayed_call.pop(call_id)
assert context.current_context() is context.SENTINEL_CONTEXT, (
"Expected `call_later` callback from the reactor to start with the sentinel logcontext "
f"but saw {context.current_context()}. In other words, another task shouldn't have "
"leaked their logcontext to us."
)
return wrapped_callback
# Because this is a callback from the reactor, we will be using the
# `sentinel` log context at this point. We want the function to log with
# some logcontext as we want to know which server the logs came from.
#
# We use `PreserveLoggingContext` to prevent our new `call_later`
# logcontext from finishing as soon as we exit this function, in case `f`
# returns an awaitable/deferred which would continue running and may try to
# restore the `call_later` context when it's done (because it's trying to
# adhere to the Synapse logcontext rules.)
#
# This also ensures that we return to the `sentinel` context when we exit
# this function and yield control back to the reactor to avoid leaking the
# current logcontext to the reactor (which would then get picked up and
# associated with the next thing the reactor does)
try:
with context.PreserveLoggingContext(
context.LoggingContext(
name="call_later", server_name=self._server_name
)
):
# We use `run_in_background` to reset the logcontext after `f` (or the
# awaitable returned by `f`) completes to avoid leaking the current
# logcontext to the reactor
context.run_in_background(callback, *args, **kwargs)
finally:
if call_later_cancel_on_shutdown:
# We still want to remove the call from the tracking map. Even if
# the callback raises an exception.
self._call_id_to_delayed_call.pop(call_id)
# We can ignore the lint here since this class is the one location callLater should
# be called.
call = self._reactor.callLater(delay, wrapped_callback, *args, **kwargs) # type: ignore[call-later-not-tracked]
logger.debug(
"call_later(%s): Scheduled call for %ss later (tracked for shutdown: %s)",
call_id,
delay,
call_later_cancel_on_shutdown,
# Find out who is scheduling the call which makes it easy to follow in the
# logs.
stack_info=True,
)
wrapped_call = DelayedCallWrapper(call, call_id, self)
if call_later_cancel_on_shutdown:
call_id = self._delayed_call_id
self._delayed_call_id = self._delayed_call_id + 1
self._call_id_to_delayed_call[call_id] = wrapped_call
# We can ignore the lint here since this class is the one location callLater
# should be called.
call = self._reactor.callLater(
delay, create_wrapped_callback(True), *args, **kwargs
) # type: ignore[call-later-not-tracked]
call = DelayedCallWrapper(call, call_id, self)
self._call_id_to_delayed_call[call_id] = call
return call
else:
# We can ignore the lint here since this class is the one location callLater should
# be called.
return self._reactor.callLater(
delay, create_wrapped_callback(False), *args, **kwargs
) # type: ignore[call-later-not-tracked]
return wrapped_call
def cancel_call_later(self, timer: IDelayedCall, ignore_errs: bool = False) -> None:
def cancel_call_later(
self, wrapped_call: "DelayedCallWrapper", ignore_errs: bool = False
) -> None:
try:
timer.cancel()
logger.debug(
"cancel_call_later: cancelling scheduled call %s", wrapped_call.call_id
)
wrapped_call.delayed_call.cancel()
except Exception:
if not ignore_errs:
raise
@@ -327,8 +362,11 @@ class Clock:
"""
# We make a copy here since calling `cancel()` on a delayed_call
# will result in the call removing itself from the map mid-iteration.
for call in list(self._call_id_to_delayed_call.values()):
for call_id, call in list(self._call_id_to_delayed_call.items()):
try:
logger.debug(
"cancel_all_delayed_calls: cancelling scheduled call %s", call_id
)
call.cancel()
except Exception:
if not ignore_errs:
@@ -352,8 +390,11 @@ class Clock:
*args: Postional arguments to pass to function.
**kwargs: Key arguments to pass to function.
"""
instance_id = random_string_insecure_fast(5)
def wrapped_callback(*args: Any, **kwargs: Any) -> None:
logger.debug("call_when_running(%s): Executing callback", instance_id)
# Since this callback can be invoked immediately if the reactor is already
# running, we can't always assume that we're running in the sentinel
# logcontext (i.e. we can't assert that we're in the sentinel context like
@@ -392,6 +433,14 @@ class Clock:
# callWhenRunning should be called.
self._reactor.callWhenRunning(wrapped_callback, *args, **kwargs) # type: ignore[prefer-synapse-clock-call-when-running]
logger.debug(
"call_when_running(%s): Scheduled call",
instance_id,
# Find out who is scheduling the call which makes it easy to follow in the
# logs.
stack_info=True,
)
def add_system_event_trigger(
self,
phase: str,
@@ -417,8 +466,16 @@ class Clock:
Returns:
an ID that can be used to remove this call with `reactor.removeSystemEventTrigger`.
"""
instance_id = random_string_insecure_fast(5)
def wrapped_callback(*args: Any, **kwargs: Any) -> None:
logger.debug(
"add_system_event_trigger(%s): Executing %s %s callback",
instance_id,
phase,
event_type,
)
assert context.current_context() is context.SENTINEL_CONTEXT, (
"Expected `add_system_event_trigger` callback from the reactor to start with the sentinel logcontext "
f"but saw {context.current_context()}. In other words, another task shouldn't have "
@@ -449,6 +506,16 @@ class Clock:
# logcontext to the reactor
context.run_in_background(callback, *args, **kwargs)
logger.debug(
"add_system_event_trigger(%s) for %s %s",
instance_id,
phase,
event_type,
# Find out who is scheduling the call which makes it easy to follow in the
# logs.
stack_info=True,
)
# We can ignore the lint here since this class is the one location
# `addSystemEventTrigger` should be called.
return self._reactor.addSystemEventTrigger(

View File

@@ -1057,6 +1057,32 @@ class MasAuthDelegation(HomeserverTestCase):
self.assertEqual(self.server.calls, 1)
class MasAuthDelegationWithSubpath(MasAuthDelegation):
"""Test MAS delegation when the MAS server is hosted on a subpath."""
def default_config(self) -> dict[str, Any]:
config = super().default_config()
# Override the endpoint to include a subpath
config["matrix_authentication_service"]["endpoint"] = (
self.server.endpoint + "auth/path/"
)
return config
def test_introspection_endpoint_uses_subpath(self) -> None:
"""Test that the introspection endpoint correctly uses the configured subpath."""
expected_introspection_url = (
self.server.endpoint + "auth/path/oauth2/introspect"
)
self.assertEqual(self._auth._introspection_endpoint, expected_introspection_url)
def test_metadata_url_uses_subpath(self) -> None:
"""Test that the metadata URL correctly uses the configured subpath."""
expected_metadata_url = (
self.server.endpoint + "auth/path/.well-known/openid-configuration"
)
self.assertEqual(self._auth._metadata_url, expected_metadata_url)
@parameterized_class(
("config",),
[

View File

@@ -28,6 +28,7 @@ from synapse.types import JsonDict
from synapse.util.clock import Clock
from tests import unittest
from tests.server import FakeChannel
from tests.unittest import HomeserverTestCase
PATH_PREFIX = "/_matrix/client/unstable/org.matrix.msc4140/delayed_events"
@@ -127,6 +128,10 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(setter_expected, content.get(setter_key), content)
def test_get_delayed_events_auth(self) -> None:
channel = self.make_request("GET", PATH_PREFIX)
self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, channel.result)
@unittest.override_config(
{"rc_delayed_event_mgmt": {"per_second": 0.5, "burst_count": 1}}
)
@@ -154,7 +159,6 @@ class DelayedEventsTestCase(HomeserverTestCase):
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/",
access_token=self.user1_access_token,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, channel.result)
@@ -162,7 +166,6 @@ class DelayedEventsTestCase(HomeserverTestCase):
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/abc",
access_token=self.user1_access_token,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
self.assertEqual(
@@ -175,7 +178,6 @@ class DelayedEventsTestCase(HomeserverTestCase):
"POST",
f"{PATH_PREFIX}/abc",
{},
self.user1_access_token,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
self.assertEqual(
@@ -188,7 +190,6 @@ class DelayedEventsTestCase(HomeserverTestCase):
"POST",
f"{PATH_PREFIX}/abc",
{"action": "oops"},
self.user1_access_token,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
self.assertEqual(
@@ -196,17 +197,21 @@ class DelayedEventsTestCase(HomeserverTestCase):
channel.json_body["errcode"],
)
@parameterized.expand(["cancel", "restart", "send"])
def test_update_delayed_event_without_match(self, action: str) -> None:
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/abc",
{"action": action},
self.user1_access_token,
@parameterized.expand(
(
(action, action_in_path)
for action in ("cancel", "restart", "send")
for action_in_path in (True, False)
)
)
def test_update_delayed_event_without_match(
self, action: str, action_in_path: bool
) -> None:
channel = self._update_delayed_event("abc", action, action_in_path)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, channel.result)
def test_cancel_delayed_state_event(self) -> None:
@parameterized.expand((True, False))
def test_cancel_delayed_state_event(self, action_in_path: bool) -> None:
state_key = "to_never_send"
setter_key = "setter"
@@ -221,7 +226,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
delay_id = channel.json_body.get("delay_id")
self.assertIsNotNone(delay_id)
assert delay_id is not None
self.reactor.advance(1)
events = self._get_delayed_events()
@@ -236,12 +241,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
expect_code=HTTPStatus.NOT_FOUND,
)
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/{delay_id}",
{"action": "cancel"},
self.user1_access_token,
)
channel = self._update_delayed_event(delay_id, "cancel", action_in_path)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertListEqual([], self._get_delayed_events())
@@ -254,10 +254,11 @@ class DelayedEventsTestCase(HomeserverTestCase):
expect_code=HTTPStatus.NOT_FOUND,
)
@parameterized.expand((True, False))
@unittest.override_config(
{"rc_delayed_event_mgmt": {"per_second": 0.5, "burst_count": 1}}
)
def test_cancel_delayed_event_ratelimit(self) -> None:
def test_cancel_delayed_event_ratelimit(self, action_in_path: bool) -> None:
delay_ids = []
for _ in range(2):
channel = self.make_request(
@@ -268,38 +269,17 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
delay_id = channel.json_body.get("delay_id")
self.assertIsNotNone(delay_id)
assert delay_id is not None
delay_ids.append(delay_id)
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/{delay_ids.pop(0)}",
{"action": "cancel"},
self.user1_access_token,
)
channel = self._update_delayed_event(delay_ids.pop(0), "cancel", action_in_path)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
args = (
"POST",
f"{PATH_PREFIX}/{delay_ids.pop(0)}",
{"action": "cancel"},
self.user1_access_token,
)
channel = self.make_request(*args)
channel = self._update_delayed_event(delay_ids.pop(0), "cancel", action_in_path)
self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result)
# Add the current user to the ratelimit overrides, allowing them no ratelimiting.
self.get_success(
self.hs.get_datastores().main.set_ratelimit_for_user(
self.user1_user_id, 0, 0
)
)
# Test that the request isn't ratelimited anymore.
channel = self.make_request(*args)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
def test_send_delayed_state_event(self) -> None:
@parameterized.expand((True, False))
def test_send_delayed_state_event(self, action_in_path: bool) -> None:
state_key = "to_send_on_request"
setter_key = "setter"
@@ -314,7 +294,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
delay_id = channel.json_body.get("delay_id")
self.assertIsNotNone(delay_id)
assert delay_id is not None
self.reactor.advance(1)
events = self._get_delayed_events()
@@ -329,12 +309,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
expect_code=HTTPStatus.NOT_FOUND,
)
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/{delay_id}",
{"action": "send"},
self.user1_access_token,
)
channel = self._update_delayed_event(delay_id, "send", action_in_path)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertListEqual([], self._get_delayed_events())
content = self.helper.get_state(
@@ -345,8 +320,9 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(setter_expected, content.get(setter_key), content)
@unittest.override_config({"rc_message": {"per_second": 3.5, "burst_count": 4}})
def test_send_delayed_event_ratelimit(self) -> None:
@parameterized.expand((True, False))
@unittest.override_config({"rc_message": {"per_second": 2.5, "burst_count": 3}})
def test_send_delayed_event_ratelimit(self, action_in_path: bool) -> None:
delay_ids = []
for _ in range(2):
channel = self.make_request(
@@ -357,38 +333,17 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
delay_id = channel.json_body.get("delay_id")
self.assertIsNotNone(delay_id)
assert delay_id is not None
delay_ids.append(delay_id)
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/{delay_ids.pop(0)}",
{"action": "send"},
self.user1_access_token,
)
channel = self._update_delayed_event(delay_ids.pop(0), "send", action_in_path)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
args = (
"POST",
f"{PATH_PREFIX}/{delay_ids.pop(0)}",
{"action": "send"},
self.user1_access_token,
)
channel = self.make_request(*args)
channel = self._update_delayed_event(delay_ids.pop(0), "send", action_in_path)
self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result)
# Add the current user to the ratelimit overrides, allowing them no ratelimiting.
self.get_success(
self.hs.get_datastores().main.set_ratelimit_for_user(
self.user1_user_id, 0, 0
)
)
# Test that the request isn't ratelimited anymore.
channel = self.make_request(*args)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
def test_restart_delayed_state_event(self) -> None:
@parameterized.expand((True, False))
def test_restart_delayed_state_event(self, action_in_path: bool) -> None:
state_key = "to_send_on_restarted_timeout"
setter_key = "setter"
@@ -403,7 +358,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
delay_id = channel.json_body.get("delay_id")
self.assertIsNotNone(delay_id)
assert delay_id is not None
self.reactor.advance(1)
events = self._get_delayed_events()
@@ -418,12 +373,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
expect_code=HTTPStatus.NOT_FOUND,
)
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/{delay_id}",
{"action": "restart"},
self.user1_access_token,
)
channel = self._update_delayed_event(delay_id, "restart", action_in_path)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.reactor.advance(1)
@@ -449,10 +399,11 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(setter_expected, content.get(setter_key), content)
@parameterized.expand((True, False))
@unittest.override_config(
{"rc_delayed_event_mgmt": {"per_second": 0.5, "burst_count": 1}}
)
def test_restart_delayed_event_ratelimit(self) -> None:
def test_restart_delayed_event_ratelimit(self, action_in_path: bool) -> None:
delay_ids = []
for _ in range(2):
channel = self.make_request(
@@ -463,37 +414,19 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
delay_id = channel.json_body.get("delay_id")
self.assertIsNotNone(delay_id)
assert delay_id is not None
delay_ids.append(delay_id)
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/{delay_ids.pop(0)}",
{"action": "restart"},
self.user1_access_token,
channel = self._update_delayed_event(
delay_ids.pop(0), "restart", action_in_path
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
args = (
"POST",
f"{PATH_PREFIX}/{delay_ids.pop(0)}",
{"action": "restart"},
self.user1_access_token,
channel = self._update_delayed_event(
delay_ids.pop(0), "restart", action_in_path
)
channel = self.make_request(*args)
self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result)
# Add the current user to the ratelimit overrides, allowing them no ratelimiting.
self.get_success(
self.hs.get_datastores().main.set_ratelimit_for_user(
self.user1_user_id, 0, 0
)
)
# Test that the request isn't ratelimited anymore.
channel = self.make_request(*args)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
def test_delayed_state_is_not_cancelled_by_new_state_from_same_user(
self,
) -> None:
@@ -598,6 +531,17 @@ class DelayedEventsTestCase(HomeserverTestCase):
return content
def _update_delayed_event(
self, delay_id: str, action: str, action_in_path: bool
) -> FakeChannel:
path = f"{PATH_PREFIX}/{delay_id}"
body = {}
if action_in_path:
path += f"/{action}"
else:
body["action"] = action
return self.make_request("POST", path, body)
def _get_path_for_delayed_state(
room_id: str, event_type: str, state_key: str, delay_ms: int