Compare commits

..

35 Commits

Author SHA1 Message Date
Devon Hudson
d65fc3861b Fix lint 2025-11-09 20:54:21 -07:00
Devon Hudson
eff2503adc Move thread_update request body next to handler 2025-11-09 20:52:27 -07:00
Devon Hudson
ba3c8a5f3e Refactor thread updates to use the same logic between endpoint and extension 2025-11-09 20:45:32 -07:00
Devon Hudson
c826157c52 Fix linter errors 2025-11-09 16:21:56 -07:00
Devon Hudson
57c884ec83 Merge branch 'devon/ssext_threads' into devon/ssext_threads_companion 2025-11-09 16:15:20 -07:00
Devon Hudson
6fa43cb0b4 Comment cleanup 2025-11-09 12:43:41 -07:00
Devon Hudson
f778ac32c1 Update docstring 2025-11-09 12:37:04 -07:00
Devon Hudson
003fc725db Merge branch 'develop' into devon/ssext_threads 2025-11-09 12:33:55 -07:00
Devon Hudson
934f99a694 Add wait_for_new_data tests 2025-11-09 12:09:56 -07:00
Devon Hudson
78e8ec6161 Add test for room list filtering 2025-11-09 09:44:52 -07:00
Devon Hudson
f59419377d Refactor for clarity 2025-11-09 09:35:11 -07:00
Devon Hudson
a3b34dfafd Run linter 2025-11-09 09:30:44 -07:00
Devon Hudson
cb82a4a687 Handle user leave/ban rooms to prevent leaking data 2025-11-09 08:45:52 -07:00
Devon Hudson
0c0ece9612 Fix next_token logic 2025-11-09 08:29:49 -07:00
Devon Hudson
46e3f6756c Cleanup logic 2025-11-08 10:07:46 -07:00
Devon Hudson
dedd6e35e6 Rejig thread updates to use room lists 2025-11-08 09:12:37 -07:00
Devon Hudson
a3c7b3ecb9 Don't fetch bundled aggregations if we don't have to 2025-10-16 18:06:26 -06:00
Devon Hudson
bf594a28a8 Move constants to designated file 2025-10-16 17:37:01 -06:00
Devon Hudson
89f75cc70f Add newsfile 2025-10-10 16:20:03 -06:00
Devon Hudson
2f8568866e Remove unnecessary bits 2025-10-10 16:15:21 -06:00
Devon Hudson
af992dd0e2 Merge branch 'devon/ssext_threads' into devon/ssext_threads_companion 2025-10-10 15:40:06 -06:00
Devon Hudson
c757969597 Add indexes to improve threads query performance 2025-10-10 15:39:27 -06:00
Devon Hudson
87e9fe8b38 Add implementation for /thread_updates MSC4360 companion endpoint 2025-10-10 15:09:01 -06:00
Devon Hudson
4cb0eeabdf Allow SlidingSyncStreamToken in /relations 2025-10-09 11:28:33 -06:00
Devon Hudson
4d7826b006 Filter events from extension if in timeline 2025-10-08 17:01:40 -06:00
Devon Hudson
ab7e5a2b17 Properly return prev_batch tokens for threads extension 2025-10-08 16:12:46 -06:00
Devon Hudson
4c51247cb3 Only return rooms where user is currently joined 2025-10-07 12:49:32 -06:00
Devon Hudson
4dd82e581a Add newsfile 2025-10-03 16:16:04 -06:00
Devon Hudson
6e69338abc Fix linter error 2025-10-03 16:15:11 -06:00
Devon Hudson
79ea4bed33 Add thread_root events to threads extension response 2025-10-03 15:57:13 -06:00
Devon Hudson
9ef4ca173e Add user room filtering for threads extension 2025-10-03 14:01:16 -06:00
Devon Hudson
24b38733df Don't return empty fields in response 2025-10-02 17:23:30 -06:00
Devon Hudson
4602b56643 Stub in early db queries to get tests going 2025-10-02 17:11:14 -06:00
Devon Hudson
6c460b3eae Stub in threads extension tests 2025-10-01 10:53:11 -06:00
Devon Hudson
cd4f4223de Stub in threads sliding sync extension 2025-10-01 10:04:29 -06:00
69 changed files with 4822 additions and 1604 deletions

View File

@@ -35,55 +35,46 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
#
# For PRs, we only run each type of test with the oldest and newest Python
# version that's supported. The oldest version ensures we don't accidentally
# introduce syntax or code that's too new, and the newest ensures we don't use
# code that's been dropped in the latest supported Python version.
# For PRs, we only run each type of test with the oldest Python version supported (which
# is Python 3.10 right now)
trial_sqlite_tests = [
{
"python-version": "3.10",
"database": "sqlite",
"extras": "all",
},
{
"python-version": "3.14",
"database": "sqlite",
"extras": "all",
},
}
]
if not IS_PR:
# Otherwise, check all supported Python versions.
#
# Avoiding running all of these versions on every PR saves on CI time.
trial_sqlite_tests.extend(
{
"python-version": version,
"database": "sqlite",
"extras": "all",
}
for version in ("3.11", "3.12", "3.13")
for version in ("3.11", "3.12", "3.13", "3.14")
)
# Only test postgres against the earliest and latest Python versions that we
# support in order to save on CI time.
trial_postgres_tests = [
{
"python-version": "3.10",
"database": "postgres",
"postgres-version": "14",
"postgres-version": "13",
"extras": "all",
},
{
"python-version": "3.14",
"database": "postgres",
"postgres-version": "17",
"extras": "all",
},
}
]
# Ensure that Synapse passes unit tests even with no extra dependencies installed.
if not IS_PR:
trial_postgres_tests.append(
{
"python-version": "3.14",
"database": "postgres",
"postgres-version": "17",
"extras": "all",
}
)
trial_no_extra_tests = [
{
"python-version": "3.10",

View File

@@ -16,23 +16,20 @@ export VIRTUALENV_NO_DOWNLOAD=1
# to select the lowest possible versions, rather than resorting to this sed script.
# Patch the project definitions in-place:
# - `-E` use extended regex syntax.
# - Don't modify the line that defines required Python versions.
# - Replace all lower and tilde bounds with exact bounds.
# - Replace all caret bounds with exact bounds.
# - Delete all lines referring to psycopg2 - so no testing of postgres support.
# - Replace all lower and tilde bounds with exact bounds
# - Replace all caret bounds---but not the one that defines the supported Python version!
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Use pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Omit systemd: we're not logging to journal here.
sed -i -E '
/^\s*requires-python\s*=/b
s/[~>]=/==/g
s/\^/==/g
/psycopg2/d
s/pyOpenSSL\s*==\s*16\.0\.0"/pyOpenSSL==17.0.0"/
/systemd/d
' pyproject.toml
sed -i \
-e "s/[~>]=/==/g" \
-e '/^python = "^/!s/\^/==/g' \
-e "/psycopg2/d" \
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
-e '/systemd/d' \
pyproject.toml
echo "::group::Patched pyproject.toml"
cat pyproject.toml

View File

@@ -26,8 +26,3 @@ c4268e3da64f1abb5b31deaeb5769adb6510c0a7
# Update black to 23.1.0 (https://github.com/matrix-org/synapse/pull/15103)
9bb2eac71962970d02842bca441f4bcdbbf93a11
# Use type hinting generics in standard collections (https://github.com/element-hq/synapse/pull/19046)
fc244bb592aa481faf28214a2e2ce3bb4e95d990
# Write union types as X | Y where possible (https://github.com/element-hq/synapse/pull/19111)
fcac7e0282b074d4bd3414d1c9c181e9701875d9

View File

@@ -123,7 +123,7 @@ jobs:
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Calculate docker image tag
uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v5.9.0
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
with:
images: ${{ matrix.repository }}
flavor: |

View File

@@ -55,7 +55,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
- name: Work out labels for complement image
id: meta
uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v5.9.0
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
with:
images: ghcr.io/${{ github.repository }}/complement-synapse
tags: |

View File

@@ -150,14 +150,12 @@ jobs:
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
env:
# The platforms that we build for are determined by the
# `tool.cibuildwheel.skip` option in `pyproject.toml`.
# We skip testing wheels for the following platforms in CI:
# Skip testing for platforms which various libraries don't have wheels
# for, and so need extra build deps.
#
# pp3*-* (PyPy wheels) broke in CI (TODO: investigate).
# musl: (TODO: investigate).
CIBW_TEST_SKIP: pp3*-* *musl*
# cp39-*: Python 3.9 is EOL.
# cp3??t-*: Free-threaded builds are not currently supported.
CIBW_TEST_SKIP: pp3*-* cp39-* cp3??t-* *i686* *musl*
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:

View File

@@ -617,7 +617,7 @@ jobs:
matrix:
include:
- python-version: "3.10"
postgres-version: "14"
postgres-version: "13"
- python-version: "3.14"
postgres-version: "17"

View File

@@ -1,84 +1,4 @@
# Synapse 1.143.0rc2 (2025-11-18)
## Internal Changes
- Fixes docker image creation in the release workflow.
# Synapse 1.143.0rc1 (2025-11-18)
## Dropping support for PostgreSQL 13
In line with our [deprecation policy](https://github.com/element-hq/synapse/blob/develop/docs/deprecation_policy.md), we've dropped
support for PostgreSQL 13, as it is no longer supported upstream.
This release of Synapse requires PostgreSQL 14+.
## Features
- Support multiple config files in `register_new_matrix_user`. ([\#18784](https://github.com/element-hq/synapse/issues/18784))
- Remove authentication from `POST /_matrix/client/v1/delayed_events`, and allow calling this endpoint with the update action to take (`send`/`cancel`/`restart`) in the request path instead of the body. ([\#19152](https://github.com/element-hq/synapse/issues/19152))
## Bugfixes
- Fixed a longstanding bug where background updates were only run on the `main` database. ([\#19181](https://github.com/element-hq/synapse/issues/19181))
- Fixed a bug introduced in v1.142.0 preventing subpaths in MAS endpoints from working. ([\#19186](https://github.com/element-hq/synapse/issues/19186))
- Fix the SQLite-to-PostgreSQL migration script to correctly migrate a boolean column in the `delayed_events` table. ([\#19155](https://github.com/element-hq/synapse/issues/19155))
## Improved Documentation
- Improve documentation around streams, particularly ID generators and adding new streams. ([\#18943](https://github.com/element-hq/synapse/issues/18943))
## Deprecations and Removals
- Remove support for PostgreSQL 13. ([\#19170](https://github.com/element-hq/synapse/issues/19170))
## Internal Changes
- Provide additional servers with federation room directory results. ([\#18970](https://github.com/element-hq/synapse/issues/18970))
- Add a shortcut return when there are no events to purge. ([\#19093](https://github.com/element-hq/synapse/issues/19093))
- Write union types as `X | Y` where possible, as per PEP 604, added in Python 3.10. ([\#19111](https://github.com/element-hq/synapse/issues/19111))
- Reduce cardinality of `synapse_storage_events_persisted_events_sep_total` metric by removing `origin_entity` label. This also separates out events sent by local application services by changing the `origin_type` for such events to `application_service`. The `type` field also only tracks common event types, and anything else is bucketed under `*other*`. ([\#19133](https://github.com/element-hq/synapse/issues/19133), [\#19168](https://github.com/element-hq/synapse/issues/19168))
- Run trial tests on Python 3.14 for PRs. ([\#19135](https://github.com/element-hq/synapse/issues/19135))
- Update `pyproject.toml` project metadata to be compatible with standard Python packaging tooling. ([\#19137](https://github.com/element-hq/synapse/issues/19137))
- Minor speed up of processing of inbound replication. ([\#19138](https://github.com/element-hq/synapse/issues/19138), [\#19145](https://github.com/element-hq/synapse/issues/19145), [\#19146](https://github.com/element-hq/synapse/issues/19146))
- Ignore recent Python language refactors from git blame (`.git-blame-ignore-revs`). ([\#19150](https://github.com/element-hq/synapse/issues/19150))
- Bump lower bounds of dependencies `parameterized` to `0.9.0` and `idna` to `3.3` as those are the first to advertise support for Python 3.10. ([\#19167](https://github.com/element-hq/synapse/issues/19167))
- Point out which event caused the exception when checking [MSC4293](https://github.com/matrix-org/matrix-spec-proposals/pull/4293) redactions. ([\#19169](https://github.com/element-hq/synapse/issues/19169))
- Restore printing `sentinel` for the log record `request` when no logcontext is active. ([\#19172](https://github.com/element-hq/synapse/issues/19172))
- Add debug logs to track `Clock` utilities. ([\#19173](https://github.com/element-hq/synapse/issues/19173))
- Remove explicit python version skips in `cibuildwheel` config as it's no longer required after [#19137](https://github.com/element-hq/synapse/pull/19137). ([\#19177](https://github.com/element-hq/synapse/issues/19177))
- Fix potential lost logcontext when `PerDestinationQueue.shutdown(...)` is called. ([\#19178](https://github.com/element-hq/synapse/issues/19178))
- Fix bad deferred logcontext handling across the codebase. ([\#19180](https://github.com/element-hq/synapse/issues/19180))
### Updates to locked dependencies
* Bump bytes from 1.10.1 to 1.11.0. ([\#19193](https://github.com/element-hq/synapse/issues/19193))
* Bump click from 8.1.8 to 8.3.1. ([\#19195](https://github.com/element-hq/synapse/issues/19195))
* Bump cryptography from 43.0.3 to 45.0.7. ([\#19159](https://github.com/element-hq/synapse/issues/19159))
* Bump docker/metadata-action from 5.8.0 to 5.9.0. ([\#19161](https://github.com/element-hq/synapse/issues/19161))
* Bump pydantic from 2.12.3 to 2.12.4. ([\#19158](https://github.com/element-hq/synapse/issues/19158))
* Bump pyo3-log from 0.13.1 to 0.13.2. ([\#19156](https://github.com/element-hq/synapse/issues/19156))
* Bump ruff from 0.14.3 to 0.14.5. ([\#19196](https://github.com/element-hq/synapse/issues/19196))
* Bump sentry-sdk from 2.34.1 to 2.43.0. ([\#19157](https://github.com/element-hq/synapse/issues/19157))
* Bump sentry-sdk from 2.43.0 to 2.44.0. ([\#19197](https://github.com/element-hq/synapse/issues/19197))
* Bump tomli from 2.2.1 to 2.3.0. ([\#19194](https://github.com/element-hq/synapse/issues/19194))
* Bump types-netaddr from 1.3.0.20240530 to 1.3.0.20251108. ([\#19160](https://github.com/element-hq/synapse/issues/19160))
# Synapse 1.142.1 (2025-11-18)
## Bugfixes
- Fixed a bug introduced in v1.142.0 preventing subpaths in MAS endpoints from working. ([\#19186](https://github.com/element-hq/synapse/issues/19186))
# Synapse 1.142.0 (2025-11-11)
# Synapse 1.142.0rc4 (2025-11-07)
## Dropped support for Python 3.9
@@ -109,15 +29,6 @@ of these wheels downstream, please reach out to us in
[#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org). We'd
love to hear from you!
## Internal Changes
- Properly stop building wheels for Python 3.9 and free-threaded CPython. ([\#19154](https://github.com/element-hq/synapse/issues/19154))
# Synapse 1.142.0rc4 (2025-11-07)
## Bugfixes
- Fix a bug introduced in 1.142.0rc1 where any attempt to configure `matrix_authentication_service.secret_path` would prevent the homeserver from starting up. ([\#19144](https://github.com/element-hq/synapse/issues/19144))

8
Cargo.lock generated
View File

@@ -73,9 +73,9 @@ checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
[[package]]
name = "bytes"
version = "1.11.0"
version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3"
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
[[package]]
name = "cc"
@@ -851,9 +851,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
version = "0.13.2"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f8bae9ad5ba08b0b0ed2bb9c2bdbaeccc69cafca96d78cf0fbcea0d45d122bb"
checksum = "d359e20231345f21a3b5b6aea7e73f4dc97e1712ef3bfe2d88997ac6a308d784"
dependencies = [
"arc-swap",
"log",

View File

@@ -0,0 +1 @@
Add experimental support for MSC4360: Sliding Sync Threads Extension.

View File

@@ -0,0 +1 @@
Add companion endpoint for MSC4360: Sliding Sync Threads Extension.

1
changelog.d/19111.misc Normal file
View File

@@ -0,0 +1 @@
Write union types as `X | Y` where possible, as per PEP 604, added in Python 3.10.

1
changelog.d/19138.misc Normal file
View File

@@ -0,0 +1 @@
Minor speed up of processing of inbound replication.

1
changelog.d/19145.misc Normal file
View File

@@ -0,0 +1 @@
Minor speed up of processing of inbound replication.

1
changelog.d/19146.misc Normal file
View File

@@ -0,0 +1 @@
Minor speed up of processing of inbound replication.

View File

@@ -1 +0,0 @@
Refactor `scripts-dev/complement.sh` logic to avoid `exit` to facilitate being able to source it from other scripts (composable).

View File

@@ -2166,10 +2166,10 @@
"datasource": {
"uid": "${DS_PROMETHEUS}"
},
"expr": "rate(synapse_storage_events_persisted_events_sep_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"expr": "rate(synapse_storage_events_persisted_by_source_type{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{origin_type}}",
"legendFormat": "{{type}}",
"refId": "D"
}
],
@@ -2254,7 +2254,7 @@
"datasource": {
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum by(type) (rate(synapse_storage_events_persisted_events_sep_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
"expr": "rate(synapse_storage_events_persisted_by_event_type{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"instant": false,
"intervalFactor": 2,
@@ -2294,6 +2294,99 @@
"align": false
}
},
{
"aliasColors": {
"irc-freenode (local)": "#EAB839"
},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": {
"uid": "${DS_PROMETHEUS}"
},
"decimals": 1,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 44
},
"hiddenSeries": false,
"id": 44,
"legend": {
"alignAsTable": true,
"avg": false,
"current": false,
"hideEmpty": true,
"hideZero": true,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"datasource": {
"uid": "${DS_PROMETHEUS}"
},
"expr": "rate(synapse_storage_events_persisted_by_origin{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{origin_entity}} ({{origin_type}})",
"refId": "A",
"step": 20
}
],
"thresholds": [],
"timeRegions": [],
"title": "Events/s by Origin",
"tooltip": {
"shared": false,
"sort": 2,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"mode": "time",
"show": true,
"values": []
},
"yaxes": [
{
"format": "hertz",
"logBase": 1,
"min": "0",
"show": true
},
{
"format": "short",
"logBase": 1,
"show": true
}
],
"yaxis": {
"align": false
}
},
{
"aliasColors": {},
"bars": false,

View File

@@ -44,3 +44,31 @@ groups:
###
### End of 'Prometheus Console Only' rules block
###
###
### Grafana Only
### The following rules are only needed if you use the Grafana dashboard
### in contrib/grafana/synapse.json
###
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
labels:
type: remote
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity="*client*",origin_type="local"})
labels:
type: local
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity!="*client*",origin_type="local"})
labels:
type: bridges
- record: synapse_storage_events_persisted_by_event_type
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
- record: synapse_storage_events_persisted_by_origin
expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
###
### End of 'Grafana Only' rules block
###

24
debian/changelog vendored
View File

@@ -1,27 +1,3 @@
matrix-synapse-py3 (1.143.0~rc2) stable; urgency=medium
* New Synapse release 1.143.0rc2.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Nov 2025 17:36:08 -0700
matrix-synapse-py3 (1.143.0~rc1) stable; urgency=medium
* New Synapse release 1.143.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Nov 2025 13:08:39 -0700
matrix-synapse-py3 (1.142.1) stable; urgency=medium
* New Synapse release 1.142.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Nov 2025 12:25:23 -0700
matrix-synapse-py3 (1.142.0) stable; urgency=medium
* New Synapse release 1.142.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Nov 2025 09:45:51 +0000
matrix-synapse-py3 (1.142.0~rc4) stable; urgency=medium
* New Synapse release 1.142.0rc4.

View File

@@ -11,7 +11,7 @@ ARG SYNAPSE_VERSION=latest
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
ARG DEBIAN_VERSION=trixie
FROM docker.io/library/postgres:14-${DEBIAN_VERSION} AS postgres_base
FROM docker.io/library/postgres:13-${DEBIAN_VERSION} AS postgres_base
FROM $FROM
# First of all, we copy postgres server from the official postgres image,
@@ -26,7 +26,7 @@ RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
COPY --from=postgres_base --chown=postgres /var/run/postgresql /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/14/bin"
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
# We also initialize the database at build time, rather than runtime, so that it's faster to spin up the image.

View File

@@ -1,4 +1,4 @@
# Streams
## Streams
Synapse has a concept of "streams", which are roughly described in [`id_generators.py`](
https://github.com/element-hq/synapse/blob/develop/synapse/storage/util/id_generators.py
@@ -19,7 +19,7 @@ To that end, let's describe streams formally, paraphrasing from the docstring of
https://github.com/element-hq/synapse/blob/a719b703d9bd0dade2565ddcad0e2f3a7a9d4c37/synapse/storage/util/id_generators.py#L96
).
## Definition
### Definition
A stream is an append-only log `T1, T2, ..., Tn, ...` of facts[^1] which grows over time.
Only "writers" can add facts to a stream, and there may be multiple writers.
@@ -47,7 +47,7 @@ But unhappy cases (e.g. transaction rollback due to an error) also count as comp
Once completed, the rows written with that stream ID are fixed, and no new rows
will be inserted with that ID.
## Current stream ID
### Current stream ID
For any given stream reader (including writers themselves), we may define a per-writer current stream ID:
@@ -93,7 +93,7 @@ Consider a single-writer stream which is initially at ID 1.
| Complete 6 | 6 | |
## Multi-writer streams
### Multi-writer streams
There are two ways to view a multi-writer stream.
@@ -115,7 +115,7 @@ The facts this stream holds are instructions to "you should now invalidate these
We only ever treat this as a multiple single-writer streams as there is no important ordering between cache invalidations.
(Invalidations are self-contained facts; and the invalidations commute/are idempotent).
## Writing to streams
### Writing to streams
Writers need to track:
- track their current position (i.e. its own per-writer stream ID).
@@ -133,7 +133,7 @@ To complete a fact, first remove it from your map of facts currently awaiting co
Then, if no earlier fact is awaiting completion, the writer can advance its current position in that stream.
Upon doing so it should emit an `RDATA` message[^3], once for every fact between the old and the new stream ID.
## Subscribing to streams
### Subscribing to streams
Readers need to track the current position of every writer.
@@ -146,44 +146,10 @@ The `RDATA` itself is not a self-contained representation of the fact;
readers will have to query the stream tables for the full details.
Readers must also advance their record of the writer's current position for that stream.
## Summary
# Summary
In a nutshell: we have an append-only log with a "buffer/scratchpad" at the end where we have to wait for the sequence to be linear and contiguous.
---
## Cheatsheet for creating a new stream
These rough notes and links may help you to create a new stream and add all the
necessary registration and event handling.
**Create your stream:**
- [create a stream class and stream row class](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/replication/tcp/streams/_base.py#L728)
- will need an [ID generator](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/storage/databases/main/thread_subscriptions.py#L75)
- may need [writer configuration](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/config/workers.py#L177), if there isn't already an obvious source of configuration for which workers should be designated as writers to your new stream.
- if adding new writer configuration, add Docker-worker configuration, which lets us configure the writer worker in Complement tests: [[1]](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/docker/configure_workers_and_start.py#L331), [[2]](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/docker/configure_workers_and_start.py#L440)
- most of the time, you will likely introduce a new datastore class for the concept represented by the new stream, unless there is already an obvious datastore that covers it.
- consider whether it may make sense to introduce a handler
**Register your stream in:**
- [`STREAMS_MAP`](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/replication/tcp/streams/__init__.py#L71)
**Advance your stream in:**
- [`process_replication_position` of your appropriate datastore](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/storage/databases/main/thread_subscriptions.py#L111)
- don't forget the super call
**If you're going to do any caching that needs invalidation from new rows:**
- add invalidations to [`process_replication_rows` of your appropriate datastore](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/storage/databases/main/thread_subscriptions.py#L91)
- don't forget the super call
- add local-only [invalidations to your writer transactions](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/storage/databases/main/thread_subscriptions.py#L201)
**For streams to be used in sync:**
- add a new field to [`StreamToken`](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/types/__init__.py#L1003)
- add a new [`StreamKeyType`](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/types/__init__.py#L999)
- add appropriate wake-up rules
- in [`on_rdata`](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/replication/tcp/client.py#L260)
- locally on the same worker when completing a write, [e.g. in your handler](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/handlers/thread_subscriptions.py#L139)
- add the stream in [`bound_future_token`](https://github.com/element-hq/synapse/blob/4367fb2d078c52959aeca0fe6874539c53e8360d/synapse/streams/events.py#L127)
---

View File

@@ -86,45 +86,6 @@ server {
}
```
### Nginx Proxy Manager or NPMPlus
```nginx
Add New Proxy-Host
- Tab Details
- Domain Names: matrix.example.com
- Scheme: http
- Forward Hostname / IP: localhost # IP address or hostname where Synapse is hosted. Bare-metal or Container.
- Forward Port: 8008
- Tab Custom locations
- Add Location
- Define Location: /_matrix
- Scheme: http
- Forward Hostname / IP: localhost # IP address or hostname where Synapse is hosted. Bare-metal or Container.
- Forward Port: 8008
- Click on the gear icon to display a custom configuration field. Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
- Enter this in the Custom Field: client_max_body_size 50M;
- Tab SSL/TLS
- Choose your SSL/TLS certificate and preferred settings.
- Tab Advanced
- Enter this in the Custom Field. This means that port 8448 no longer needs to be opened in your Firewall.
The Federation communication use now Port 443.
location /.well-known/matrix/server {
return 200 '{"m.server": "matrix.example.com:443"}';
add_header Content-Type application/json;
}
location /.well-known/matrix/client {
return 200 '{"m.homeserver": {"base_url": "https://matrix.example.com"}}';
add_header Content-Type application/json;
add_header "Access-Control-Allow-Origin" *;
}
```
### Caddy v2
```

View File

@@ -117,14 +117,6 @@ each upgrade are complete before moving on to the next upgrade, to avoid
stacking them up. You can monitor the currently running background updates with
[the Admin API](usage/administration/admin_api/background_updates.html#status).
# Upgrading to v1.143.0
## Dropping support for PostgreSQL 13
In line with our [deprecation policy](deprecation_policy.md), we've dropped
support for PostgreSQL 13, as it is no longer supported upstream.
This release of Synapse requires PostgreSQL 14+.
# Upgrading to v1.142.0
## Python 3.10+ is now required

721
poetry.lock generated
View File

@@ -349,14 +349,14 @@ files = [
[[package]]
name = "click"
version = "8.3.1"
version = "8.1.8"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.10"
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6"},
{file = "click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a"},
{file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
{file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
]
[package.dependencies]
@@ -389,62 +389,52 @@ files = [
[[package]]
name = "cryptography"
version = "45.0.7"
version = "43.0.3"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
optional = false
python-versions = "!=3.9.0,!=3.9.1,>=3.7"
python-versions = ">=3.7"
groups = ["main", "dev"]
files = [
{file = "cryptography-45.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:3be4f21c6245930688bd9e162829480de027f8bf962ede33d4f8ba7d67a00cee"},
{file = "cryptography-45.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:67285f8a611b0ebc0857ced2081e30302909f571a46bfa7a3cc0ad303fe015c6"},
{file = "cryptography-45.0.7-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:577470e39e60a6cd7780793202e63536026d9b8641de011ed9d8174da9ca5339"},
{file = "cryptography-45.0.7-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:4bd3e5c4b9682bc112d634f2c6ccc6736ed3635fc3319ac2bb11d768cc5a00d8"},
{file = "cryptography-45.0.7-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:465ccac9d70115cd4de7186e60cfe989de73f7bb23e8a7aa45af18f7412e75bf"},
{file = "cryptography-45.0.7-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:16ede8a4f7929b4b7ff3642eba2bf79aa1d71f24ab6ee443935c0d269b6bc513"},
{file = "cryptography-45.0.7-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:8978132287a9d3ad6b54fcd1e08548033cc09dc6aacacb6c004c73c3eb5d3ac3"},
{file = "cryptography-45.0.7-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:b6a0e535baec27b528cb07a119f321ac024592388c5681a5ced167ae98e9fff3"},
{file = "cryptography-45.0.7-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a24ee598d10befaec178efdff6054bc4d7e883f615bfbcd08126a0f4931c83a6"},
{file = "cryptography-45.0.7-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:fa26fa54c0a9384c27fcdc905a2fb7d60ac6e47d14bc2692145f2b3b1e2cfdbd"},
{file = "cryptography-45.0.7-cp311-abi3-win32.whl", hash = "sha256:bef32a5e327bd8e5af915d3416ffefdbe65ed975b646b3805be81b23580b57b8"},
{file = "cryptography-45.0.7-cp311-abi3-win_amd64.whl", hash = "sha256:3808e6b2e5f0b46d981c24d79648e5c25c35e59902ea4391a0dcb3e667bf7443"},
{file = "cryptography-45.0.7-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bfb4c801f65dd61cedfc61a83732327fafbac55a47282e6f26f073ca7a41c3b2"},
{file = "cryptography-45.0.7-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:81823935e2f8d476707e85a78a405953a03ef7b7b4f55f93f7c2d9680e5e0691"},
{file = "cryptography-45.0.7-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3994c809c17fc570c2af12c9b840d7cea85a9fd3e5c0e0491f4fa3c029216d59"},
{file = "cryptography-45.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dad43797959a74103cb59c5dac71409f9c27d34c8a05921341fb64ea8ccb1dd4"},
{file = "cryptography-45.0.7-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ce7a453385e4c4693985b4a4a3533e041558851eae061a58a5405363b098fcd3"},
{file = "cryptography-45.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b04f85ac3a90c227b6e5890acb0edbaf3140938dbecf07bff618bf3638578cf1"},
{file = "cryptography-45.0.7-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:48c41a44ef8b8c2e80ca4527ee81daa4c527df3ecbc9423c41a420a9559d0e27"},
{file = "cryptography-45.0.7-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:f3df7b3d0f91b88b2106031fd995802a2e9ae13e02c36c1fc075b43f420f3a17"},
{file = "cryptography-45.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd342f085542f6eb894ca00ef70236ea46070c8a13824c6bde0dfdcd36065b9b"},
{file = "cryptography-45.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1993a1bb7e4eccfb922b6cd414f072e08ff5816702a0bdb8941c247a6b1b287c"},
{file = "cryptography-45.0.7-cp37-abi3-win32.whl", hash = "sha256:18fcf70f243fe07252dcb1b268a687f2358025ce32f9f88028ca5c364b123ef5"},
{file = "cryptography-45.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:7285a89df4900ed3bfaad5679b1e668cb4b38a8de1ccbfc84b05f34512da0a90"},
{file = "cryptography-45.0.7-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:de58755d723e86175756f463f2f0bddd45cc36fbd62601228a3f8761c9f58252"},
{file = "cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a20e442e917889d1a6b3c570c9e3fa2fdc398c20868abcea268ea33c024c4083"},
{file = "cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:258e0dff86d1d891169b5af222d362468a9570e2532923088658aa866eb11130"},
{file = "cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d97cf502abe2ab9eff8bd5e4aca274da8d06dd3ef08b759a8d6143f4ad65d4b4"},
{file = "cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:c987dad82e8c65ebc985f5dae5e74a3beda9d0a2a4daf8a1115f3772b59e5141"},
{file = "cryptography-45.0.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c13b1e3afd29a5b3b2656257f14669ca8fa8d7956d509926f0b130b600b50ab7"},
{file = "cryptography-45.0.7-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a862753b36620af6fc54209264f92c716367f2f0ff4624952276a6bbd18cbde"},
{file = "cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:06ce84dc14df0bf6ea84666f958e6080cdb6fe1231be2a51f3fc1267d9f3fb34"},
{file = "cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d0c5c6bac22b177bf8da7435d9d27a6834ee130309749d162b26c3105c0795a9"},
{file = "cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:2f641b64acc00811da98df63df7d59fd4706c0df449da71cb7ac39a0732b40ae"},
{file = "cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:f5414a788ecc6ee6bc58560e85ca624258a55ca434884445440a810796ea0e0b"},
{file = "cryptography-45.0.7-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:1f3d56f73595376f4244646dd5c5870c14c196949807be39e79e7bd9bac3da63"},
{file = "cryptography-45.0.7.tar.gz", hash = "sha256:4b1654dfc64ea479c242508eb8c724044f1e964a47d1d1cacc5132292d851971"},
{file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"},
{file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"},
{file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"},
{file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"},
{file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"},
{file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"},
{file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"},
{file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"},
{file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"},
{file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"},
{file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"},
{file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"},
{file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"},
{file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"},
{file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"},
{file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"},
{file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"},
{file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"},
{file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"},
{file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"},
{file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"},
{file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"},
{file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"},
{file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"},
{file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"},
{file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"},
{file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"},
]
[package.dependencies]
cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""}
cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""}
[package.extras]
docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs ; python_full_version >= \"3.8.0\"", "sphinx-rtd-theme (>=3.0.0) ; python_full_version >= \"3.8.0\""]
docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"]
nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8.0\""]
pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"]
sdist = ["build (>=1.0.0)"]
docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"]
docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"]
nox = ["nox"]
pep8test = ["check-sdist", "click", "mypy", "ruff"]
sdist = ["build"]
ssh = ["bcrypt (>=3.1.5)"]
test = ["certifi (>=2024)", "cryptography-vectors (==45.0.7)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"]
test-randomorder = ["pytest-randomly"]
[[package]]
@@ -795,14 +785,14 @@ files = [
[[package]]
name = "immutabledict"
version = "4.2.2"
version = "4.2.1"
description = "Immutable wrapper around dictionaries (a fork of frozendict)"
optional = false
python-versions = "<4.0,>=3.8"
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "immutabledict-4.2.2-py3-none-any.whl", hash = "sha256:97c31d098a2c850e93a958badeef765e4736ed7942ec73e439facd764a3a7217"},
{file = "immutabledict-4.2.2.tar.gz", hash = "sha256:cb6ed3090df593148f94cb407d218ca526fd2639694afdb553dc4f50ce6feeca"},
{file = "immutabledict-4.2.1-py3-none-any.whl", hash = "sha256:c56a26ced38c236f79e74af3ccce53772827cef5c3bce7cab33ff2060f756373"},
{file = "immutabledict-4.2.1.tar.gz", hash = "sha256:d91017248981c72eb66c8ff9834e99c2f53562346f23e7f51e7a5ebcf66a3bcc"},
]
[[package]]
@@ -812,7 +802,7 @@ description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.7"
groups = ["dev"]
markers = "python_version < \"3.12\" and platform_machine != \"ppc64le\" and platform_machine != \"s390x\""
markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and python_version < \"3.12\""
files = [
{file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"},
{file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"},
@@ -945,14 +935,14 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-
[[package]]
name = "jsonschema-specifications"
version = "2023.7.1"
version = "2023.6.1"
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "jsonschema_specifications-2023.7.1-py3-none-any.whl", hash = "sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1"},
{file = "jsonschema_specifications-2023.7.1.tar.gz", hash = "sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb"},
{file = "jsonschema_specifications-2023.6.1-py3-none-any.whl", hash = "sha256:3d2b82663aff01815f744bb5c7887e2121a63399b49b104a3c96145474d091d7"},
{file = "jsonschema_specifications-2023.6.1.tar.gz", hash = "sha256:ca1c4dd059a9e7b34101cf5b3ab7ff1d18b139f35950d598d629837ef66e8f28"},
]
[package.dependencies]
@@ -1405,18 +1395,18 @@ files = [
[[package]]
name = "multipart"
version = "1.3.0"
version = "1.2.1"
description = "Parser for multipart/form-data"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "multipart-1.3.0-py3-none-any.whl", hash = "sha256:439bf4b00fd7cb2dbff08ae13f49f4f49798931ecd8d496372c63537fa19f304"},
{file = "multipart-1.3.0.tar.gz", hash = "sha256:a46bd6b0eb4c1ba865beb88ddd886012a3da709b6e7b86084fc37e99087e5cf1"},
{file = "multipart-1.2.1-py3-none-any.whl", hash = "sha256:c03dc203bc2e67f6b46a599467ae0d87cf71d7530504b2c1ff4a9ea21d8b8c8c"},
{file = "multipart-1.2.1.tar.gz", hash = "sha256:829b909b67bc1ad1c6d4488fcdc6391c2847842b08323addf5200db88dbe9480"},
]
[package.extras]
dev = ["build", "pytest", "pytest-cov", "tox", "tox-uv", "twine"]
dev = ["build", "pytest", "pytest-cov", "twine"]
docs = ["sphinx (>=8,<9)", "sphinx-autobuild"]
[[package]]
@@ -1583,123 +1573,139 @@ files = [
[[package]]
name = "phonenumbers"
version = "9.0.18"
version = "9.0.15"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
optional = false
python-versions = "*"
groups = ["main"]
files = [
{file = "phonenumbers-9.0.18-py2.py3-none-any.whl", hash = "sha256:d3354454ac31c97f8a08121df97a7145b8dca641f734c6f1518a41c2f60c5764"},
{file = "phonenumbers-9.0.18.tar.gz", hash = "sha256:5537c61ba95b11b992c95e804da6e49193cc06b1224f632ade64631518a48ed1"},
{file = "phonenumbers-9.0.15-py2.py3-none-any.whl", hash = "sha256:269b73bc05258e8fd57582770b9559307099ea677c8f1dc5272476f661344776"},
{file = "phonenumbers-9.0.15.tar.gz", hash = "sha256:345ff7f23768332d866f37732f815cdf1d33c7f0961246562a5c5b78c12c3ff3"},
]
[[package]]
name = "pillow"
version = "12.0.0"
description = "Python Imaging Library (fork)"
version = "11.3.0"
description = "Python Imaging Library (Fork)"
optional = false
python-versions = ">=3.10"
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "pillow-12.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:3adfb466bbc544b926d50fe8f4a4e6abd8c6bffd28a26177594e6e9b2b76572b"},
{file = "pillow-12.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1ac11e8ea4f611c3c0147424eae514028b5e9077dd99ab91e1bd7bc33ff145e1"},
{file = "pillow-12.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d49e2314c373f4c2b39446fb1a45ed333c850e09d0c59ac79b72eb3b95397363"},
{file = "pillow-12.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c7b2a63fd6d5246349f3d3f37b14430d73ee7e8173154461785e43036ffa96ca"},
{file = "pillow-12.0.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d64317d2587c70324b79861babb9c09f71fbb780bad212018874b2c013d8600e"},
{file = "pillow-12.0.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d77153e14b709fd8b8af6f66a3afbb9ed6e9fc5ccf0b6b7e1ced7b036a228782"},
{file = "pillow-12.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:32ed80ea8a90ee3e6fa08c21e2e091bba6eda8eccc83dbc34c95169507a91f10"},
{file = "pillow-12.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c828a1ae702fc712978bda0320ba1b9893d99be0badf2647f693cc01cf0f04fa"},
{file = "pillow-12.0.0-cp310-cp310-win32.whl", hash = "sha256:bd87e140e45399c818fac4247880b9ce719e4783d767e030a883a970be632275"},
{file = "pillow-12.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:455247ac8a4cfb7b9bc45b7e432d10421aea9fc2e74d285ba4072688a74c2e9d"},
{file = "pillow-12.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:6ace95230bfb7cd79ef66caa064bbe2f2a1e63d93471c3a2e1f1348d9f22d6b7"},
{file = "pillow-12.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0fd00cac9c03256c8b2ff58f162ebcd2587ad3e1f2e397eab718c47e24d231cc"},
{file = "pillow-12.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3475b96f5908b3b16c47533daaa87380c491357d197564e0ba34ae75c0f3257"},
{file = "pillow-12.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:110486b79f2d112cf6add83b28b627e369219388f64ef2f960fef9ebaf54c642"},
{file = "pillow-12.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5269cc1caeedb67e6f7269a42014f381f45e2e7cd42d834ede3c703a1d915fe3"},
{file = "pillow-12.0.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aa5129de4e174daccbc59d0a3b6d20eaf24417d59851c07ebb37aeb02947987c"},
{file = "pillow-12.0.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bee2a6db3a7242ea309aa7ee8e2780726fed67ff4e5b40169f2c940e7eb09227"},
{file = "pillow-12.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:90387104ee8400a7b4598253b4c406f8958f59fcf983a6cea2b50d59f7d63d0b"},
{file = "pillow-12.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bc91a56697869546d1b8f0a3ff35224557ae7f881050e99f615e0119bf934b4e"},
{file = "pillow-12.0.0-cp311-cp311-win32.whl", hash = "sha256:27f95b12453d165099c84f8a8bfdfd46b9e4bda9e0e4b65f0635430027f55739"},
{file = "pillow-12.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:b583dc9070312190192631373c6c8ed277254aa6e6084b74bdd0a6d3b221608e"},
{file = "pillow-12.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:759de84a33be3b178a64c8ba28ad5c135900359e85fb662bc6e403ad4407791d"},
{file = "pillow-12.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:53561a4ddc36facb432fae7a9d8afbfaf94795414f5cdc5fc52f28c1dca90371"},
{file = "pillow-12.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:71db6b4c1653045dacc1585c1b0d184004f0d7e694c7b34ac165ca70c0838082"},
{file = "pillow-12.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2fa5f0b6716fc88f11380b88b31fe591a06c6315e955c096c35715788b339e3f"},
{file = "pillow-12.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:82240051c6ca513c616f7f9da06e871f61bfd7805f566275841af15015b8f98d"},
{file = "pillow-12.0.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55f818bd74fe2f11d4d7cbc65880a843c4075e0ac7226bc1a23261dbea531953"},
{file = "pillow-12.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b87843e225e74576437fd5b6a4c2205d422754f84a06942cfaf1dc32243e45a8"},
{file = "pillow-12.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c607c90ba67533e1b2355b821fef6764d1dd2cbe26b8c1005ae84f7aea25ff79"},
{file = "pillow-12.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:21f241bdd5080a15bc86d3466a9f6074a9c2c2b314100dd896ac81ee6db2f1ba"},
{file = "pillow-12.0.0-cp312-cp312-win32.whl", hash = "sha256:dd333073e0cacdc3089525c7df7d39b211bcdf31fc2824e49d01c6b6187b07d0"},
{file = "pillow-12.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9fe611163f6303d1619bbcb653540a4d60f9e55e622d60a3108be0d5b441017a"},
{file = "pillow-12.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:7dfb439562f234f7d57b1ac6bc8fe7f838a4bd49c79230e0f6a1da93e82f1fad"},
{file = "pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:0869154a2d0546545cde61d1789a6524319fc1897d9ee31218eae7a60ccc5643"},
{file = "pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:a7921c5a6d31b3d756ec980f2f47c0cfdbce0fc48c22a39347a895f41f4a6ea4"},
{file = "pillow-12.0.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:1ee80a59f6ce048ae13cda1abf7fbd2a34ab9ee7d401c46be3ca685d1999a399"},
{file = "pillow-12.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c50f36a62a22d350c96e49ad02d0da41dbd17ddc2e29750dbdba4323f85eb4a5"},
{file = "pillow-12.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5193fde9a5f23c331ea26d0cf171fbf67e3f247585f50c08b3e205c7aeb4589b"},
{file = "pillow-12.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bde737cff1a975b70652b62d626f7785e0480918dece11e8fef3c0cf057351c3"},
{file = "pillow-12.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a6597ff2b61d121172f5844b53f21467f7082f5fb385a9a29c01414463f93b07"},
{file = "pillow-12.0.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b817e7035ea7f6b942c13aa03bb554fc44fea70838ea21f8eb31c638326584e"},
{file = "pillow-12.0.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f4f1231b7dec408e8670264ce63e9c71409d9583dd21d32c163e25213ee2a344"},
{file = "pillow-12.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e51b71417049ad6ab14c49608b4a24d8fb3fe605e5dfabfe523b58064dc3d27"},
{file = "pillow-12.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d120c38a42c234dc9a8c5de7ceaaf899cf33561956acb4941653f8bdc657aa79"},
{file = "pillow-12.0.0-cp313-cp313-win32.whl", hash = "sha256:4cc6b3b2efff105c6a1656cfe59da4fdde2cda9af1c5e0b58529b24525d0a098"},
{file = "pillow-12.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:4cf7fed4b4580601c4345ceb5d4cbf5a980d030fd5ad07c4d2ec589f95f09905"},
{file = "pillow-12.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:9f0b04c6b8584c2c193babcccc908b38ed29524b29dd464bc8801bf10d746a3a"},
{file = "pillow-12.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7fa22993bac7b77b78cae22bad1e2a987ddf0d9015c63358032f84a53f23cdc3"},
{file = "pillow-12.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f135c702ac42262573fe9714dfe99c944b4ba307af5eb507abef1667e2cbbced"},
{file = "pillow-12.0.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c85de1136429c524e55cfa4e033b4a7940ac5c8ee4d9401cc2d1bf48154bbc7b"},
{file = "pillow-12.0.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:38df9b4bfd3db902c9c2bd369bcacaf9d935b2fff73709429d95cc41554f7b3d"},
{file = "pillow-12.0.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7d87ef5795da03d742bf49439f9ca4d027cde49c82c5371ba52464aee266699a"},
{file = "pillow-12.0.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aff9e4d82d082ff9513bdd6acd4f5bd359f5b2c870907d2b0a9c5e10d40c88fe"},
{file = "pillow-12.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8d8ca2b210ada074d57fcee40c30446c9562e542fc46aedc19baf758a93532ee"},
{file = "pillow-12.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:99a7f72fb6249302aa62245680754862a44179b545ded638cf1fef59befb57ef"},
{file = "pillow-12.0.0-cp313-cp313t-win32.whl", hash = "sha256:4078242472387600b2ce8d93ade8899c12bf33fa89e55ec89fe126e9d6d5d9e9"},
{file = "pillow-12.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2c54c1a783d6d60595d3514f0efe9b37c8808746a66920315bfd34a938d7994b"},
{file = "pillow-12.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:26d9f7d2b604cd23aba3e9faf795787456ac25634d82cd060556998e39c6fa47"},
{file = "pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:beeae3f27f62308f1ddbcfb0690bf44b10732f2ef43758f169d5e9303165d3f9"},
{file = "pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:d4827615da15cd59784ce39d3388275ec093ae3ee8d7f0c089b76fa87af756c2"},
{file = "pillow-12.0.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:3e42edad50b6909089750e65c91aa09aaf1e0a71310d383f11321b27c224ed8a"},
{file = "pillow-12.0.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e5d8efac84c9afcb40914ab49ba063d94f5dbdf5066db4482c66a992f47a3a3b"},
{file = "pillow-12.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:266cd5f2b63ff316d5a1bba46268e603c9caf5606d44f38c2873c380950576ad"},
{file = "pillow-12.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:58eea5ebe51504057dd95c5b77d21700b77615ab0243d8152793dc00eb4faf01"},
{file = "pillow-12.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f13711b1a5ba512d647a0e4ba79280d3a9a045aaf7e0cc6fbe96b91d4cdf6b0c"},
{file = "pillow-12.0.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6846bd2d116ff42cba6b646edf5bf61d37e5cbd256425fa089fee4ff5c07a99e"},
{file = "pillow-12.0.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c98fa880d695de164b4135a52fd2e9cd7b7c90a9d8ac5e9e443a24a95ef9248e"},
{file = "pillow-12.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fa3ed2a29a9e9d2d488b4da81dcb54720ac3104a20bf0bd273f1e4648aff5af9"},
{file = "pillow-12.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d034140032870024e6b9892c692fe2968493790dd57208b2c37e3fb35f6df3ab"},
{file = "pillow-12.0.0-cp314-cp314-win32.whl", hash = "sha256:1b1b133e6e16105f524a8dec491e0586d072948ce15c9b914e41cdadd209052b"},
{file = "pillow-12.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:8dc232e39d409036af549c86f24aed8273a40ffa459981146829a324e0848b4b"},
{file = "pillow-12.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:d52610d51e265a51518692045e372a4c363056130d922a7351429ac9f27e70b0"},
{file = "pillow-12.0.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1979f4566bb96c1e50a62d9831e2ea2d1211761e5662afc545fa766f996632f6"},
{file = "pillow-12.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b2e4b27a6e15b04832fe9bf292b94b5ca156016bbc1ea9c2c20098a0320d6cf6"},
{file = "pillow-12.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fb3096c30df99fd01c7bf8e544f392103d0795b9f98ba71a8054bcbf56b255f1"},
{file = "pillow-12.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7438839e9e053ef79f7112c881cef684013855016f928b168b81ed5835f3e75e"},
{file = "pillow-12.0.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d5c411a8eaa2299322b647cd932586b1427367fd3184ffbb8f7a219ea2041ca"},
{file = "pillow-12.0.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d7e091d464ac59d2c7ad8e7e08105eaf9dafbc3883fd7265ffccc2baad6ac925"},
{file = "pillow-12.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:792a2c0be4dcc18af9d4a2dfd8a11a17d5e25274a1062b0ec1c2d79c76f3e7f8"},
{file = "pillow-12.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:afbefa430092f71a9593a99ab6a4e7538bc9eabbf7bf94f91510d3503943edc4"},
{file = "pillow-12.0.0-cp314-cp314t-win32.whl", hash = "sha256:3830c769decf88f1289680a59d4f4c46c72573446352e2befec9a8512104fa52"},
{file = "pillow-12.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:905b0365b210c73afb0ebe9101a32572152dfd1c144c7e28968a331b9217b94a"},
{file = "pillow-12.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:99353a06902c2e43b43e8ff74ee65a7d90307d82370604746738a1e0661ccca7"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b22bd8c974942477156be55a768f7aa37c46904c175be4e158b6a86e3a6b7ca8"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:805ebf596939e48dbb2e4922a1d3852cfc25c38160751ce02da93058b48d252a"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cae81479f77420d217def5f54b5b9d279804d17e982e0f2fa19b1d1e14ab5197"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aeaefa96c768fc66818730b952a862235d68825c178f1b3ffd4efd7ad2edcb7c"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09f2d0abef9e4e2f349305a4f8cc784a8a6c2f58a8c4892eea13b10a943bd26e"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bdee52571a343d721fb2eb3b090a82d959ff37fc631e3f70422e0c2e029f3e76"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:b290fd8aa38422444d4b50d579de197557f182ef1068b75f5aa8558638b8d0a5"},
{file = "pillow-12.0.0.tar.gz", hash = "sha256:87d4f8125c9988bfbed67af47dd7a953e2fc7b0cc1e7800ec6d2080d490bb353"},
{file = "pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860"},
{file = "pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad"},
{file = "pillow-11.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0"},
{file = "pillow-11.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b"},
{file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50"},
{file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae"},
{file = "pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9"},
{file = "pillow-11.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:040a5b691b0713e1f6cbe222e0f4f74cd233421e105850ae3b3c0ceda520f42e"},
{file = "pillow-11.3.0-cp310-cp310-win32.whl", hash = "sha256:89bd777bc6624fe4115e9fac3352c79ed60f3bb18651420635f26e643e3dd1f6"},
{file = "pillow-11.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:19d2ff547c75b8e3ff46f4d9ef969a06c30ab2d4263a9e287733aa8b2429ce8f"},
{file = "pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f"},
{file = "pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722"},
{file = "pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288"},
{file = "pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d"},
{file = "pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494"},
{file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58"},
{file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f"},
{file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e"},
{file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94"},
{file = "pillow-11.3.0-cp311-cp311-win32.whl", hash = "sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0"},
{file = "pillow-11.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac"},
{file = "pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd"},
{file = "pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4"},
{file = "pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69"},
{file = "pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d"},
{file = "pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6"},
{file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7"},
{file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024"},
{file = "pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809"},
{file = "pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d"},
{file = "pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149"},
{file = "pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d"},
{file = "pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542"},
{file = "pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd"},
{file = "pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8"},
{file = "pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f"},
{file = "pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c"},
{file = "pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd"},
{file = "pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e"},
{file = "pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1"},
{file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805"},
{file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8"},
{file = "pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2"},
{file = "pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b"},
{file = "pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3"},
{file = "pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51"},
{file = "pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580"},
{file = "pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e"},
{file = "pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d"},
{file = "pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced"},
{file = "pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c"},
{file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8"},
{file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59"},
{file = "pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe"},
{file = "pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c"},
{file = "pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788"},
{file = "pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31"},
{file = "pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e"},
{file = "pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12"},
{file = "pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a"},
{file = "pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632"},
{file = "pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673"},
{file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027"},
{file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77"},
{file = "pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874"},
{file = "pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a"},
{file = "pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214"},
{file = "pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635"},
{file = "pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6"},
{file = "pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae"},
{file = "pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653"},
{file = "pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6"},
{file = "pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36"},
{file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b"},
{file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477"},
{file = "pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50"},
{file = "pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b"},
{file = "pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12"},
{file = "pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db"},
{file = "pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa"},
{file = "pillow-11.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:48d254f8a4c776de343051023eb61ffe818299eeac478da55227d96e241de53f"},
{file = "pillow-11.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7aee118e30a4cf54fdd873bd3a29de51e29105ab11f9aad8c32123f58c8f8081"},
{file = "pillow-11.3.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:23cff760a9049c502721bdb743a7cb3e03365fafcdfc2ef9784610714166e5a4"},
{file = "pillow-11.3.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6359a3bc43f57d5b375d1ad54a0074318a0844d11b76abccf478c37c986d3cfc"},
{file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:092c80c76635f5ecb10f3f83d76716165c96f5229addbd1ec2bdbbda7d496e06"},
{file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cadc9e0ea0a2431124cde7e1697106471fc4c1da01530e679b2391c37d3fbb3a"},
{file = "pillow-11.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6a418691000f2a418c9135a7cf0d797c1bb7d9a485e61fe8e7722845b95ef978"},
{file = "pillow-11.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:97afb3a00b65cc0804d1c7abddbf090a81eaac02768af58cbdcaaa0a931e0b6d"},
{file = "pillow-11.3.0-cp39-cp39-win32.whl", hash = "sha256:ea944117a7974ae78059fcc1800e5d3295172bb97035c0c1d9345fca1419da71"},
{file = "pillow-11.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:e5c5858ad8ec655450a7c7df532e9842cf8df7cc349df7225c60d5d348c8aada"},
{file = "pillow-11.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:6abdbfd3aea42be05702a8dd98832329c167ee84400a1d1f61ab11437f1717eb"},
{file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967"},
{file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe"},
{file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c"},
{file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25"},
{file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27"},
{file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a"},
{file = "pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f"},
{file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6"},
{file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438"},
{file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3"},
{file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c"},
{file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361"},
{file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7"},
{file = "pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8"},
{file = "pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523"},
]
[package.extras]
docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"]
fpx = ["olefile"]
mic = ["olefile"]
test-arrow = ["arro3-compute", "arro3-core", "nanoarrow", "pyarrow"]
tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma (>=5)", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "trove-classifiers (>=2024.10.12)"]
test-arrow = ["pyarrow"]
tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "trove-classifiers (>=2024.10.12)"]
typing = ["typing-extensions ; python_version < \"3.10\""]
xmp = ["defusedxml"]
[[package]]
@@ -1807,19 +1813,19 @@ files = [
[[package]]
name = "pydantic"
version = "2.12.4"
version = "2.12.3"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e"},
{file = "pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac"},
{file = "pydantic-2.12.3-py3-none-any.whl", hash = "sha256:6986454a854bc3bc6e5443e1369e06a3a456af9d339eda45510f517d9ea5c6bf"},
{file = "pydantic-2.12.3.tar.gz", hash = "sha256:1da1c82b0fc140bb0103bc1441ffe062154c8d38491189751ee00fd8ca65ce74"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
pydantic-core = "2.41.5"
pydantic-core = "2.41.4"
typing-extensions = ">=4.14.1"
typing-inspection = ">=0.4.2"
@@ -1829,133 +1835,129 @@ timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows
[[package]]
name = "pydantic-core"
version = "2.41.5"
version = "2.41.4"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146"},
{file = "pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2"},
{file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97"},
{file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9"},
{file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52"},
{file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941"},
{file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a"},
{file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c"},
{file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2"},
{file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556"},
{file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49"},
{file = "pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba"},
{file = "pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9"},
{file = "pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6"},
{file = "pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b"},
{file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a"},
{file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8"},
{file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e"},
{file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1"},
{file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b"},
{file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b"},
{file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284"},
{file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594"},
{file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e"},
{file = "pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b"},
{file = "pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe"},
{file = "pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f"},
{file = "pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7"},
{file = "pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0"},
{file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69"},
{file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75"},
{file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05"},
{file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc"},
{file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c"},
{file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5"},
{file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c"},
{file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294"},
{file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1"},
{file = "pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d"},
{file = "pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815"},
{file = "pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3"},
{file = "pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9"},
{file = "pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34"},
{file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0"},
{file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33"},
{file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e"},
{file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2"},
{file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586"},
{file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d"},
{file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740"},
{file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e"},
{file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858"},
{file = "pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36"},
{file = "pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11"},
{file = "pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd"},
{file = "pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a"},
{file = "pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14"},
{file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1"},
{file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66"},
{file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869"},
{file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2"},
{file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375"},
{file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553"},
{file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90"},
{file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07"},
{file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb"},
{file = "pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23"},
{file = "pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf"},
{file = "pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0"},
{file = "pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a"},
{file = "pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3"},
{file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c"},
{file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612"},
{file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d"},
{file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9"},
{file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660"},
{file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9"},
{file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3"},
{file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf"},
{file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470"},
{file = "pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa"},
{file = "pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c"},
{file = "pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008"},
{file = "pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf"},
{file = "pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5"},
{file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d"},
{file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60"},
{file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82"},
{file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5"},
{file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3"},
{file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425"},
{file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504"},
{file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5"},
{file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3"},
{file = "pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460"},
{file = "pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b"},
{file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034"},
{file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c"},
{file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2"},
{file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad"},
{file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd"},
{file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc"},
{file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56"},
{file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51"},
{file = "pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e"},
{file = "pydantic_core-2.41.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2442d9a4d38f3411f22eb9dd0912b7cbf4b7d5b6c92c4173b75d3e1ccd84e36e"},
{file = "pydantic_core-2.41.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:30a9876226dda131a741afeab2702e2d127209bde3c65a2b8133f428bc5d006b"},
{file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d55bbac04711e2980645af68b97d445cdbcce70e5216de444a6c4b6943ebcccd"},
{file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1d778fb7849a42d0ee5927ab0f7453bf9f85eef8887a546ec87db5ddb178945"},
{file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b65077a4693a98b90ec5ad8f203ad65802a1b9b6d4a7e48066925a7e1606706"},
{file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:62637c769dee16eddb7686bf421be48dfc2fae93832c25e25bc7242e698361ba"},
{file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dfe3aa529c8f501babf6e502936b9e8d4698502b2cfab41e17a028d91b1ac7b"},
{file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca2322da745bf2eeb581fc9ea3bbb31147702163ccbcbf12a3bb630e4bf05e1d"},
{file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e8cd3577c796be7231dcf80badcf2e0835a46665eaafd8ace124d886bab4d700"},
{file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:1cae8851e174c83633f0833e90636832857297900133705ee158cf79d40f03e6"},
{file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a26d950449aae348afe1ac8be5525a00ae4235309b729ad4d3399623125b43c9"},
{file = "pydantic_core-2.41.4-cp310-cp310-win32.whl", hash = "sha256:0cf2a1f599efe57fa0051312774280ee0f650e11152325e41dfd3018ef2c1b57"},
{file = "pydantic_core-2.41.4-cp310-cp310-win_amd64.whl", hash = "sha256:a8c2e340d7e454dc3340d3d2e8f23558ebe78c98aa8f68851b04dcb7bc37abdc"},
{file = "pydantic_core-2.41.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:28ff11666443a1a8cf2a044d6a545ebffa8382b5f7973f22c36109205e65dc80"},
{file = "pydantic_core-2.41.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61760c3925d4633290292bad462e0f737b840508b4f722247d8729684f6539ae"},
{file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eae547b7315d055b0de2ec3965643b0ab82ad0106a7ffd29615ee9f266a02827"},
{file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ef9ee5471edd58d1fcce1c80ffc8783a650e3e3a193fe90d52e43bb4d87bff1f"},
{file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:15dd504af121caaf2c95cb90c0ebf71603c53de98305621b94da0f967e572def"},
{file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a926768ea49a8af4d36abd6a8968b8790f7f76dd7cbd5a4c180db2b4ac9a3a2"},
{file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916b9b7d134bff5440098a4deb80e4cb623e68974a87883299de9124126c2a8"},
{file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5cf90535979089df02e6f17ffd076f07237efa55b7343d98760bde8743c4b265"},
{file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7533c76fa647fade2d7ec75ac5cc079ab3f34879626dae5689b27790a6cf5a5c"},
{file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:37e516bca9264cbf29612539801ca3cd5d1be465f940417b002905e6ed79d38a"},
{file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0c19cb355224037c83642429b8ce261ae108e1c5fbf5c028bac63c77b0f8646e"},
{file = "pydantic_core-2.41.4-cp311-cp311-win32.whl", hash = "sha256:09c2a60e55b357284b5f31f5ab275ba9f7f70b7525e18a132ec1f9160b4f1f03"},
{file = "pydantic_core-2.41.4-cp311-cp311-win_amd64.whl", hash = "sha256:711156b6afb5cb1cb7c14a2cc2c4a8b4c717b69046f13c6b332d8a0a8f41ca3e"},
{file = "pydantic_core-2.41.4-cp311-cp311-win_arm64.whl", hash = "sha256:6cb9cf7e761f4f8a8589a45e49ed3c0d92d1d696a45a6feaee8c904b26efc2db"},
{file = "pydantic_core-2.41.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ab06d77e053d660a6faaf04894446df7b0a7e7aba70c2797465a0a1af00fc887"},
{file = "pydantic_core-2.41.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c53ff33e603a9c1179a9364b0a24694f183717b2e0da2b5ad43c316c956901b2"},
{file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:304c54176af2c143bd181d82e77c15c41cbacea8872a2225dd37e6544dce9999"},
{file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025ba34a4cf4fb32f917d5d188ab5e702223d3ba603be4d8aca2f82bede432a4"},
{file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f5f30c402ed58f90c70e12eff65547d3ab74685ffe8283c719e6bead8ef53f"},
{file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd96e5d15385d301733113bcaa324c8bcf111275b7675a9c6e88bfb19fc05e3b"},
{file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98f348cbb44fae6e9653c1055db7e29de67ea6a9ca03a5fa2c2e11a47cff0e47"},
{file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec22626a2d14620a83ca583c6f5a4080fa3155282718b6055c2ea48d3ef35970"},
{file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a95d4590b1f1a43bf33ca6d647b990a88f4a3824a8c4572c708f0b45a5290ed"},
{file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:f9672ab4d398e1b602feadcffcdd3af44d5f5e6ddc15bc7d15d376d47e8e19f8"},
{file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:84d8854db5f55fead3b579f04bda9a36461dab0730c5d570e1526483e7bb8431"},
{file = "pydantic_core-2.41.4-cp312-cp312-win32.whl", hash = "sha256:9be1c01adb2ecc4e464392c36d17f97e9110fbbc906bcbe1c943b5b87a74aabd"},
{file = "pydantic_core-2.41.4-cp312-cp312-win_amd64.whl", hash = "sha256:d682cf1d22bab22a5be08539dca3d1593488a99998f9f412137bc323179067ff"},
{file = "pydantic_core-2.41.4-cp312-cp312-win_arm64.whl", hash = "sha256:833eebfd75a26d17470b58768c1834dfc90141b7afc6eb0429c21fc5a21dcfb8"},
{file = "pydantic_core-2.41.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:85e050ad9e5f6fe1004eec65c914332e52f429bc0ae12d6fa2092407a462c746"},
{file = "pydantic_core-2.41.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7393f1d64792763a48924ba31d1e44c2cfbc05e3b1c2c9abb4ceeadd912cced"},
{file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94dab0940b0d1fb28bcab847adf887c66a27a40291eedf0b473be58761c9799a"},
{file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de7c42f897e689ee6f9e93c4bec72b99ae3b32a2ade1c7e4798e690ff5246e02"},
{file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:664b3199193262277b8b3cd1e754fb07f2c6023289c815a1e1e8fb415cb247b1"},
{file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95b253b88f7d308b1c0b417c4624f44553ba4762816f94e6986819b9c273fb2"},
{file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1351f5bbdbbabc689727cb91649a00cb9ee7203e0a6e54e9f5ba9e22e384b84"},
{file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1affa4798520b148d7182da0615d648e752de4ab1a9566b7471bc803d88a062d"},
{file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7b74e18052fea4aa8dea2fb7dbc23d15439695da6cbe6cfc1b694af1115df09d"},
{file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:285b643d75c0e30abda9dc1077395624f314a37e3c09ca402d4015ef5979f1a2"},
{file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f52679ff4218d713b3b33f88c89ccbf3a5c2c12ba665fb80ccc4192b4608dbab"},
{file = "pydantic_core-2.41.4-cp313-cp313-win32.whl", hash = "sha256:ecde6dedd6fff127c273c76821bb754d793be1024bc33314a120f83a3c69460c"},
{file = "pydantic_core-2.41.4-cp313-cp313-win_amd64.whl", hash = "sha256:d081a1f3800f05409ed868ebb2d74ac39dd0c1ff6c035b5162356d76030736d4"},
{file = "pydantic_core-2.41.4-cp313-cp313-win_arm64.whl", hash = "sha256:f8e49c9c364a7edcbe2a310f12733aad95b022495ef2a8d653f645e5d20c1564"},
{file = "pydantic_core-2.41.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ed97fd56a561f5eb5706cebe94f1ad7c13b84d98312a05546f2ad036bafe87f4"},
{file = "pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a870c307bf1ee91fc58a9a61338ff780d01bfae45922624816878dce784095d2"},
{file = "pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25e97bc1f5f8f7985bdc2335ef9e73843bb561eb1fa6831fdfc295c1c2061cf"},
{file = "pydantic_core-2.41.4-cp313-cp313t-win_amd64.whl", hash = "sha256:d405d14bea042f166512add3091c1af40437c2e7f86988f3915fabd27b1e9cd2"},
{file = "pydantic_core-2.41.4-cp313-cp313t-win_arm64.whl", hash = "sha256:19f3684868309db5263a11bace3c45d93f6f24afa2ffe75a647583df22a2ff89"},
{file = "pydantic_core-2.41.4-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:e9205d97ed08a82ebb9a307e92914bb30e18cdf6f6b12ca4bedadb1588a0bfe1"},
{file = "pydantic_core-2.41.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:82df1f432b37d832709fbcc0e24394bba04a01b6ecf1ee87578145c19cde12ac"},
{file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3b4cc4539e055cfa39a3763c939f9d409eb40e85813257dcd761985a108554"},
{file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b1eb1754fce47c63d2ff57fdb88c351a6c0150995890088b33767a10218eaa4e"},
{file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6ab5ab30ef325b443f379ddb575a34969c333004fca5a1daa0133a6ffaad616"},
{file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31a41030b1d9ca497634092b46481b937ff9397a86f9f51bd41c4767b6fc04af"},
{file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a44ac1738591472c3d020f61c6df1e4015180d6262ebd39bf2aeb52571b60f12"},
{file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d72f2b5e6e82ab8f94ea7d0d42f83c487dc159c5240d8f83beae684472864e2d"},
{file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c4d1e854aaf044487d31143f541f7aafe7b482ae72a022c664b2de2e466ed0ad"},
{file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:b568af94267729d76e6ee5ececda4e283d07bbb28e8148bb17adad93d025d25a"},
{file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6d55fb8b1e8929b341cc313a81a26e0d48aa3b519c1dbaadec3a6a2b4fcad025"},
{file = "pydantic_core-2.41.4-cp314-cp314-win32.whl", hash = "sha256:5b66584e549e2e32a1398df11da2e0a7eff45d5c2d9db9d5667c5e6ac764d77e"},
{file = "pydantic_core-2.41.4-cp314-cp314-win_amd64.whl", hash = "sha256:557a0aab88664cc552285316809cab897716a372afaf8efdbef756f8b890e894"},
{file = "pydantic_core-2.41.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f1ea6f48a045745d0d9f325989d8abd3f1eaf47dd00485912d1a3a63c623a8d"},
{file = "pydantic_core-2.41.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6c1fe4c5404c448b13188dd8bd2ebc2bdd7e6727fa61ff481bcc2cca894018da"},
{file = "pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:523e7da4d43b113bf8e7b49fa4ec0c35bf4fe66b2230bfc5c13cc498f12c6c3e"},
{file = "pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5729225de81fb65b70fdb1907fcf08c75d498f4a6f15af005aabb1fdadc19dfa"},
{file = "pydantic_core-2.41.4-cp314-cp314t-win_amd64.whl", hash = "sha256:de2cfbb09e88f0f795fd90cf955858fc2c691df65b1f21f0aa00b99f3fbc661d"},
{file = "pydantic_core-2.41.4-cp314-cp314t-win_arm64.whl", hash = "sha256:d34f950ae05a83e0ede899c595f312ca976023ea1db100cd5aa188f7005e3ab0"},
{file = "pydantic_core-2.41.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:646e76293345954acea6966149683047b7b2ace793011922208c8e9da12b0062"},
{file = "pydantic_core-2.41.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cc8e85a63085a137d286e2791037f5fdfff0aabb8b899483ca9c496dd5797338"},
{file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:692c622c8f859a17c156492783902d8370ac7e121a611bd6fe92cc71acf9ee8d"},
{file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d1e2906efb1031a532600679b424ef1d95d9f9fb507f813951f23320903adbd7"},
{file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e04e2f7f8916ad3ddd417a7abdd295276a0bf216993d9318a5d61cc058209166"},
{file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df649916b81822543d1c8e0e1d079235f68acdc7d270c911e8425045a8cfc57e"},
{file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66c529f862fdba70558061bb936fe00ddbaaa0c647fd26e4a4356ef1d6561891"},
{file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fc3b4c5a1fd3a311563ed866c2c9b62da06cb6398bee186484ce95c820db71cb"},
{file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6e0fc40d84448f941df9b3334c4b78fe42f36e3bf631ad54c3047a0cdddc2514"},
{file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:44e7625332683b6c1c8b980461475cde9595eff94447500e80716db89b0da005"},
{file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:170ee6835f6c71081d031ef1c3b4dc4a12b9efa6a9540f93f95b82f3c7571ae8"},
{file = "pydantic_core-2.41.4-cp39-cp39-win32.whl", hash = "sha256:3adf61415efa6ce977041ba9745183c0e1f637ca849773afa93833e04b163feb"},
{file = "pydantic_core-2.41.4-cp39-cp39-win_amd64.whl", hash = "sha256:a238dd3feee263eeaeb7dc44aea4ba1364682c4f9f9467e6af5596ba322c2332"},
{file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:a1b2cfec3879afb742a7b0bcfa53e4f22ba96571c9e54d6a3afe1052d17d843b"},
{file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:d175600d975b7c244af6eb9c9041f10059f20b8bbffec9e33fdd5ee3f67cdc42"},
{file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f184d657fa4947ae5ec9c47bd7e917730fa1cbb78195037e32dcbab50aca5ee"},
{file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ed810568aeffed3edc78910af32af911c835cc39ebbfacd1f0ab5dd53028e5c"},
{file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:4f5d640aeebb438517150fdeec097739614421900e4a08db4a3ef38898798537"},
{file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:4a9ab037b71927babc6d9e7fc01aea9e66dc2a4a34dff06ef0724a4049629f94"},
{file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4dab9484ec605c3016df9ad4fd4f9a390bc5d816a3b10c6550f8424bb80b18c"},
{file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8a5028425820731d8c6c098ab642d7b8b999758e24acae03ed38a66eca8335"},
{file = "pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e5ab4fc177dd41536b3c32b2ea11380dd3d4619a385860621478ac2d25ceb00"},
{file = "pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d88d0054d3fa11ce936184896bed3c1c5441d6fa483b498fac6a5d0dd6f64a9"},
{file = "pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b2a054a8725f05b4b6503357e0ac1c4e8234ad3b0c2ac130d6ffc66f0e170e2"},
{file = "pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0d9db5a161c99375a0c68c058e227bee1d89303300802601d76a3d01f74e258"},
{file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6273ea2c8ffdac7b7fda2653c49682db815aebf4a89243a6feccf5e36c18c347"},
{file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:4c973add636efc61de22530b2ef83a65f39b6d6f656df97f678720e20de26caa"},
{file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b69d1973354758007f46cf2d44a4f3d0933f10b6dc9bf15cf1356e037f6f731a"},
{file = "pydantic_core-2.41.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3619320641fd212aaf5997b6ca505e97540b7e16418f4a241f44cdf108ffb50d"},
{file = "pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:491535d45cd7ad7e4a2af4a5169b0d07bebf1adfd164b0368da8aa41e19907a5"},
{file = "pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:54d86c0cada6aba4ec4c047d0e348cbad7063b87ae0f005d9f8c9ad04d4a92a2"},
{file = "pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca1124aced216b2500dc2609eade086d718e8249cb9696660ab447d50a758bd"},
{file = "pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c9024169becccf0cb470ada03ee578d7348c119a0d42af3dcf9eda96e3a247c"},
{file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:26895a4268ae5a2849269f4991cdc97236e4b9c010e51137becf25182daac405"},
{file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:ca4df25762cf71308c446e33c9b1fdca2923a3f13de616e2a949f38bf21ff5a8"},
{file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5a28fcedd762349519276c36634e71853b4541079cab4acaaac60c4421827308"},
{file = "pydantic_core-2.41.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c173ddcd86afd2535e2b695217e82191580663a1d1928239f877f5a1649ef39f"},
{file = "pydantic_core-2.41.4.tar.gz", hash = "sha256:70e47929a9d4a1905a67e4b687d5946026390568a8e952b92824118063cee4d5"},
]
[package.dependencies]
@@ -2489,31 +2491,31 @@ files = [
[[package]]
name = "ruff"
version = "0.14.5"
version = "0.14.3"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "ruff-0.14.5-py3-none-linux_armv6l.whl", hash = "sha256:f3b8248123b586de44a8018bcc9fefe31d23dda57a34e6f0e1e53bd51fd63594"},
{file = "ruff-0.14.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:f7a75236570318c7a30edd7f5491945f0169de738d945ca8784500b517163a72"},
{file = "ruff-0.14.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:6d146132d1ee115f8802356a2dc9a634dbf58184c51bff21f313e8cd1c74899a"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2380596653dcd20b057794d55681571a257a42327da8894b93bbd6111aa801f"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2d1fa985a42b1f075a098fa1ab9d472b712bdb17ad87a8ec86e45e7fa6273e68"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88f0770d42b7fa02bbefddde15d235ca3aa24e2f0137388cc15b2dcbb1f7c7a7"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:3676cb02b9061fee7294661071c4709fa21419ea9176087cb77e64410926eb78"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b595bedf6bc9cab647c4a173a61acf4f1ac5f2b545203ba82f30fcb10b0318fb"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f55382725ad0bdb2e8ee2babcbbfb16f124f5a59496a2f6a46f1d9d99d93e6e2"},
{file = "ruff-0.14.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7497d19dce23976bdaca24345ae131a1d38dcfe1b0850ad8e9e6e4fa321a6e19"},
{file = "ruff-0.14.5-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:410e781f1122d6be4f446981dd479470af86537fb0b8857f27a6e872f65a38e4"},
{file = "ruff-0.14.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c01be527ef4c91a6d55e53b337bfe2c0f82af024cc1a33c44792d6844e2331e1"},
{file = "ruff-0.14.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:f66e9bb762e68d66e48550b59c74314168ebb46199886c5c5aa0b0fbcc81b151"},
{file = "ruff-0.14.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d93be8f1fa01022337f1f8f3bcaa7ffee2d0b03f00922c45c2207954f351f465"},
{file = "ruff-0.14.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:c135d4b681f7401fe0e7312017e41aba9b3160861105726b76cfa14bc25aa367"},
{file = "ruff-0.14.5-py3-none-win32.whl", hash = "sha256:c83642e6fccfb6dea8b785eb9f456800dcd6a63f362238af5fc0c83d027dd08b"},
{file = "ruff-0.14.5-py3-none-win_amd64.whl", hash = "sha256:9d55d7af7166f143c94eae1db3312f9ea8f95a4defef1979ed516dbb38c27621"},
{file = "ruff-0.14.5-py3-none-win_arm64.whl", hash = "sha256:4b700459d4649e2594b31f20a9de33bc7c19976d4746d8d0798ad959621d64a4"},
{file = "ruff-0.14.5.tar.gz", hash = "sha256:8d3b48d7d8aad423d3137af7ab6c8b1e38e4de104800f0d596990f6ada1a9fc1"},
{file = "ruff-0.14.3-py3-none-linux_armv6l.whl", hash = "sha256:876b21e6c824f519446715c1342b8e60f97f93264012de9d8d10314f8a79c371"},
{file = "ruff-0.14.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b6fd8c79b457bedd2abf2702b9b472147cd860ed7855c73a5247fa55c9117654"},
{file = "ruff-0.14.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:71ff6edca490c308f083156938c0c1a66907151263c4abdcb588602c6e696a14"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:786ee3ce6139772ff9272aaf43296d975c0217ee1b97538a98171bf0d21f87ed"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cd6291d0061811c52b8e392f946889916757610d45d004e41140d81fb6cd5ddc"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a497ec0c3d2c88561b6d90f9c29f5ae68221ac00d471f306fa21fa4264ce5fcd"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e231e1be58fc568950a04fbe6887c8e4b85310e7889727e2b81db205c45059eb"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:469e35872a09c0e45fecf48dd960bfbce056b5db2d5e6b50eca329b4f853ae20"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d6bc90307c469cb9d28b7cfad90aaa600b10d67c6e22026869f585e1e8a2db0"},
{file = "ruff-0.14.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2f8a0bbcffcfd895df39c9a4ecd59bb80dca03dc43f7fb63e647ed176b741e"},
{file = "ruff-0.14.3-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:678fdd7c7d2d94851597c23ee6336d25f9930b460b55f8598e011b57c74fd8c5"},
{file = "ruff-0.14.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1ec1ac071e7e37e0221d2f2dbaf90897a988c531a8592a6a5959f0603a1ecf5e"},
{file = "ruff-0.14.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:afcdc4b5335ef440d19e7df9e8ae2ad9f749352190e96d481dc501b753f0733e"},
{file = "ruff-0.14.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:7bfc42f81862749a7136267a343990f865e71fe2f99cf8d2958f684d23ce3dfa"},
{file = "ruff-0.14.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a65e448cfd7e9c59fae8cf37f9221585d3354febaad9a07f29158af1528e165f"},
{file = "ruff-0.14.3-py3-none-win32.whl", hash = "sha256:f3d91857d023ba93e14ed2d462ab62c3428f9bbf2b4fbac50a03ca66d31991f7"},
{file = "ruff-0.14.3-py3-none-win_amd64.whl", hash = "sha256:d7b7006ac0756306db212fd37116cce2bd307e1e109375e1c6c106002df0ae5f"},
{file = "ruff-0.14.3-py3-none-win_arm64.whl", hash = "sha256:26eb477ede6d399d898791d01961e16b86f02bc2486d0d1a7a9bb2379d055dc1"},
{file = "ruff-0.14.3.tar.gz", hash = "sha256:4ff876d2ab2b161b6de0aa1f5bd714e8e9b4033dc122ee006925fbacc4f62153"},
]
[[package]]
@@ -2551,15 +2553,15 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
version = "2.44.0"
version = "2.34.1"
description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"all\" or extra == \"sentry\""
files = [
{file = "sentry_sdk-2.44.0-py2.py3-none-any.whl", hash = "sha256:9e36a0372b881e8f92fdbff4564764ce6cec4b7f25424d0a3a8d609c9e4651a7"},
{file = "sentry_sdk-2.44.0.tar.gz", hash = "sha256:5b1fe54dfafa332e900b07dd8f4dfe35753b64e78e7d9b1655a28fd3065e2493"},
{file = "sentry_sdk-2.34.1-py2.py3-none-any.whl", hash = "sha256:b7a072e1cdc5abc48101d5146e1ae680fa81fe886d8d95aaa25a0b450c818d32"},
{file = "sentry_sdk-2.34.1.tar.gz", hash = "sha256:69274eb8c5c38562a544c3e9f68b5be0a43be4b697f5fd385bf98e4fbe672687"},
]
[package.dependencies]
@@ -2581,25 +2583,20 @@ django = ["django (>=1.8)"]
falcon = ["falcon (>=1.4)"]
fastapi = ["fastapi (>=0.79.0)"]
flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"]
google-genai = ["google-genai (>=1.29.0)"]
grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"]
http2 = ["httpcore[http2] (==1.*)"]
httpx = ["httpx (>=0.16.0)"]
huey = ["huey (>=2)"]
huggingface-hub = ["huggingface_hub (>=0.22)"]
langchain = ["langchain (>=0.0.210)"]
langgraph = ["langgraph (>=0.6.6)"]
launchdarkly = ["launchdarkly-server-sdk (>=9.8.0)"]
litellm = ["litellm (>=1.77.5)"]
litestar = ["litestar (>=2.0.0)"]
loguru = ["loguru (>=0.5)"]
mcp = ["mcp (>=1.15.0)"]
openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"]
openfeature = ["openfeature-sdk (>=0.7.1)"]
opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
opentelemetry-experimental = ["opentelemetry-distro"]
pure-eval = ["asttokens", "executing", "pure_eval"]
pydantic-ai = ["pydantic-ai (>=1.0.0)"]
pymongo = ["pymongo (>=3.1)"]
pyspark = ["pyspark (>=2.4.4)"]
quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
@@ -2780,54 +2777,44 @@ twisted = ["twisted"]
[[package]]
name = "tomli"
version = "2.3.0"
version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
groups = ["main", "dev"]
files = [
{file = "tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45"},
{file = "tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba"},
{file = "tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf"},
{file = "tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441"},
{file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845"},
{file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c"},
{file = "tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456"},
{file = "tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be"},
{file = "tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac"},
{file = "tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22"},
{file = "tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f"},
{file = "tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52"},
{file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8"},
{file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6"},
{file = "tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876"},
{file = "tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878"},
{file = "tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b"},
{file = "tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae"},
{file = "tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b"},
{file = "tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf"},
{file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f"},
{file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05"},
{file = "tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606"},
{file = "tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999"},
{file = "tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e"},
{file = "tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3"},
{file = "tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc"},
{file = "tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0"},
{file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879"},
{file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005"},
{file = "tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463"},
{file = "tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8"},
{file = "tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77"},
{file = "tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf"},
{file = "tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530"},
{file = "tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b"},
{file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67"},
{file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f"},
{file = "tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0"},
{file = "tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba"},
{file = "tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b"},
{file = "tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549"},
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"},
{file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"},
{file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"},
{file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"},
{file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"},
{file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"},
{file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"},
{file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"},
{file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"},
{file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"},
{file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"},
{file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"},
{file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"},
]
markers = {main = "python_version < \"3.14\""}
@@ -3041,14 +3028,14 @@ referencing = "*"
[[package]]
name = "types-netaddr"
version = "1.3.0.20251108"
version = "1.3.0.20240530"
description = "Typing stubs for netaddr"
optional = false
python-versions = ">=3.9"
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "types_netaddr-1.3.0.20251108-py3-none-any.whl", hash = "sha256:1699b3aae860b8754e8ba7fb426905287065c9dbce05d019c25b630ae2ba66c5"},
{file = "types_netaddr-1.3.0.20251108.tar.gz", hash = "sha256:2895d8a48eb71ba0ebecf74c6cebaddfa0f97199835ebb406b5aae1725e06ac1"},
{file = "types-netaddr-1.3.0.20240530.tar.gz", hash = "sha256:742c2ec1f202b666f544223e2616b34f1f13df80c91e5aeaaa93a72e4d0774ea"},
{file = "types_netaddr-1.3.0.20240530-py3-none-any.whl", hash = "sha256:354998d018e326da4f1d9b005fc91137b7c2c473aaf03c4ef64bf83c6861b440"},
]
[[package]]
@@ -3239,7 +3226,7 @@ description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.8"
groups = ["dev"]
markers = "python_version < \"3.12\" and platform_machine != \"ppc64le\" and platform_machine != \"s390x\""
markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and python_version < \"3.12\""
files = [
{file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"},
{file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"},
@@ -3336,8 +3323,8 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"]
test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
[extras]
all = ["authlib", "hiredis", "jaeger-client", "lxml", "matrix-synapse-ldap3", "opentracing", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pympler", "pysaml2", "sentry-sdk", "txredisapi"]
cache-memory = ["pympler"]
all = ["Pympler", "authlib", "hiredis", "jaeger-client", "lxml", "matrix-synapse-ldap3", "opentracing", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "sentry-sdk", "txredisapi"]
cache-memory = ["Pympler"]
jwt = ["authlib"]
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
oidc = ["authlib"]
@@ -3352,5 +3339,5 @@ url-preview = ["lxml"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10.0,<4.0.0"
content-hash = "4f8d98723236eaf3d13f440dce95ec6cc3c4dc49ba3a0e45bf9cfbb51aca899c"
python-versions = "^3.10.0"
content-hash = "2a891bc466355554d5c5873e7f8592e4f693de4d0f734ddb55f8a55bb4e529df"

View File

@@ -1,183 +1,3 @@
[project]
name = "matrix-synapse"
version = "1.143.0rc2"
description = "Homeserver for the Matrix decentralised comms protocol"
readme = "README.rst"
authors = [
{ name = "Matrix.org Team and Contributors", email = "packages@matrix.org" }
]
requires-python = ">=3.10.0,<4.0.0"
license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial"
classifiers = [
"Development Status :: 5 - Production/Stable",
"Topic :: Communications :: Chat",
]
# Mandatory Dependencies
dependencies = [
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
"jsonschema>=3.0.0",
# 0.25.0 is the first version to support Python 3.14.
# We can remove this once https://github.com/python-jsonschema/jsonschema/issues/1426 is fixed
# and included in a release.
"rpds-py>=0.25.0",
# We choose 2.0 as a lower bound: the most recent backwards incompatible release.
# It seems generally available, judging by https://pkgs.org/search/?q=immutabledict
"immutabledict>=2.0",
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
"unpaddedbase64>=2.1.0",
# We require 2.0.0 for immutabledict support.
"canonicaljson>=2.0.0,<3.0.0",
# we use the type definitions added in signedjson 1.1.
"signedjson>=1.1.0,<2.0.0",
# validating SSL certs for IP addresses requires service_identity 18.1.
"service-identity>=18.1.0",
# Twisted 18.9 introduces some logger improvements that the structured
# logger utilises
# Twisted 19.7.0 moves test helpers to a new module and deprecates the old location.
# Twisted 21.2.0 introduces contextvar support.
# We could likely bump this to 22.1 without making distro packagers'
# lives hard (as of 2025-07, distro support is Ubuntu LTS: 22.1, Debian stable: 22.4,
# RHEL 9: 22.10)
"Twisted[tls]>=21.2.0",
"treq>=21.5.0",
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
"pyOpenSSL>=16.0.0",
"PyYAML>=5.3",
"pyasn1>=0.1.9",
"pyasn1-modules>=0.0.7",
"bcrypt>=3.1.7",
# 10.0.1 minimum is mandatory here because of libwebp CVE-2023-4863.
# Packagers that already took care of libwebp can lower that down to 5.4.0.
"Pillow>=10.0.1",
# We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2.
# 2.0.5 updates collections.abc imports to avoid Python 3.10 incompatibility.
"sortedcontainers>=2.0.5",
"pymacaroons>=0.13.0",
"msgpack>=0.5.2",
"phonenumbers>=8.2.0",
# we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
# `prometheus_client.metrics` was added in 0.5.0, so we require that too.
# We chose 0.6.0 as that is the current version in Debian Buster (oldstable).
"prometheus-client>=0.6.0",
# we use `order`, which arrived in attrs 19.2.0.
# Note: 21.1.0 broke `/sync`, see https://github.com/matrix-org/synapse/issues/9936
"attrs>=19.2.0,!=21.1.0",
"netaddr>=0.7.18",
# Jinja 2.x is incompatible with MarkupSafe>=2.1. To ensure that admins do not
# end up with a broken installation, with recent MarkupSafe but old Jinja, we
# add a lower bound to the Jinja2 dependency.
"Jinja2>=3.0",
# 3.2.0 updates collections.abc imports to avoid Python 3.10 incompatibility.
"bleach>=3.2.0",
# pydantic 2.12 depends on typing-extensions>=4.14.1
"typing-extensions>=4.14.1",
# We enforce that we have a `cryptography` version that bundles an `openssl`
# with the latest security patches.
"cryptography>=3.4.7",
# ijson 3.1.4 fixes a bug with "." in property names
"ijson>=3.1.4",
"matrix-common>=1.3.0,<2.0.0",
# We need packaging.verison.Version(...).major added in 20.0.
"packaging>=20.0",
"pydantic>=2.8;python_version < '3.14'",
"pydantic>=2.12;python_version >= '3.14'",
# This is for building the rust components during "poetry install", which
# currently ignores the `build-system.requires` directive (c.f.
# https://github.com/python-poetry/poetry/issues/6154). Both `pip install` and
# `poetry build` do the right thing without this explicit dependency.
#
# This isn't really a dev-dependency, as `poetry install --without dev` will fail,
# but the alternative is to add it to the main list of deps where it isn't
# needed.
"setuptools_rust>=1.3",
# This is used for parsing multipart responses
"python-multipart>=0.0.9",
]
[project.optional-dependencies]
matrix-synapse-ldap3 = ["matrix-synapse-ldap3>=0.1"]
postgres = [
"psycopg2>=2.8;platform_python_implementation != 'PyPy'",
"psycopg2cffi>=2.8;platform_python_implementation == 'PyPy'",
"psycopg2cffi-compat==1.1;platform_python_implementation == 'PyPy'",
]
saml2 = ["pysaml2>=4.5.0"]
oidc = ["authlib>=0.15.1"]
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
systemd = ["systemd-python>=231"]
url-preview = ["lxml>=4.6.3"]
sentry = ["sentry-sdk>=0.7.2"]
opentracing = ["jaeger-client>=4.2.0", "opentracing>=2.2.0"]
jwt = ["authlib"]
# hiredis is not a *strict* dependency, but it makes things much faster.
# (if it is not installed, we fall back to slow code.)
redis = ["txredisapi>=1.4.7", "hiredis"]
# Required to use experimental `caches.track_memory_usage` config option.
cache-memory = ["pympler"]
# If this is updated, don't forget to update the equivalent lines in
# tool.poetry.group.dev.dependencies.
test = ["parameterized>=0.9.0", "idna>=3.3"]
# The duplication here is awful.
#
# TODO: This can be resolved via PEP 735 dependency groups, which poetry supports
# since 2.2.0. However, switching to that would require updating the command
# developers use to install the `all` group. This would require some coordination.
#
# NB: the strings in this list must be *package* names, not extra names.
# Some of our extra names _are_ package names, which can lead to great confusion.
all = [
# matrix-synapse-ldap3
"matrix-synapse-ldap3>=0.1",
# postgres
"psycopg2>=2.8;platform_python_implementation != 'PyPy'",
"psycopg2cffi>=2.8;platform_python_implementation == 'PyPy'",
"psycopg2cffi-compat==1.1;platform_python_implementation == 'PyPy'",
# saml2
"pysaml2>=4.5.0",
# oidc and jwt
"authlib>=0.15.1",
# url-preview
"lxml>=4.6.3",
# sentry
"sentry-sdk>=0.7.2",
# opentracing
"jaeger-client>=4.2.0", "opentracing>=2.2.0",
# redis
"txredisapi>=1.4.7", "hiredis",
# cache-memory
"pympler",
# omitted:
# - test: it's useful to have this separate from dev deps in the olddeps job
# - systemd: this is a system-based requirement
]
[project.urls]
repository = "https://github.com/element-hq/synapse"
documentation = "https://element-hq.github.io/synapse/latest"
"Issue Tracker" = "https://github.com/element-hq/synapse/issues"
[project.scripts]
synapse_homeserver = "synapse.app.homeserver:main"
synapse_worker = "synapse.app.generic_worker:main"
synctl = "synapse._scripts.synctl:main"
export_signing_key = "synapse._scripts.export_signing_key:main"
generate_config = "synapse._scripts.generate_config:main"
generate_log_config = "synapse._scripts.generate_log_config:main"
generate_signing_key = "synapse._scripts.generate_signing_key:main"
hash_password = "synapse._scripts.hash_password:main"
register_new_matrix_user = "synapse._scripts.register_new_matrix_user:main"
synapse_port_db = "synapse._scripts.synapse_port_db:main"
synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.towncrier]
package = "synapse"
filename = "CHANGES.md"
@@ -291,9 +111,20 @@ manifest-path = "rust/Cargo.toml"
module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
version = "1.142.0rc4"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial"
readme = "README.rst"
repository = "https://github.com/element-hq/synapse"
packages = [
{ include = "synapse" },
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Topic :: Communications :: Chat",
]
include = [
{ path = "AUTHORS.rst", format = "sdist" },
{ path = "book.toml", format = "sdist" },
@@ -323,12 +154,197 @@ exclude = [
script = "build_rust.py"
generate-setup-file = true
[tool.poetry.scripts]
synapse_homeserver = "synapse.app.homeserver:main"
synapse_worker = "synapse.app.generic_worker:main"
synctl = "synapse._scripts.synctl:main"
export_signing_key = "synapse._scripts.export_signing_key:main"
generate_config = "synapse._scripts.generate_config:main"
generate_log_config = "synapse._scripts.generate_log_config:main"
generate_signing_key = "synapse._scripts.generate_signing_key:main"
hash_password = "synapse._scripts.hash_password:main"
register_new_matrix_user = "synapse._scripts.register_new_matrix_user:main"
synapse_port_db = "synapse._scripts.synapse_port_db:main"
synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.poetry.dependencies]
python = "^3.10.0"
# Mandatory Dependencies
# ----------------------
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
jsonschema = ">=3.0.0"
# 0.25.0 is the first version to support Python 3.14.
# We can remove this once https://github.com/python-jsonschema/jsonschema/issues/1426 is fixed
# and included in a release.
rpds-py = ">=0.25.0"
# We choose 2.0 as a lower bound: the most recent backwards incompatible release.
# It seems generally available, judging by https://pkgs.org/search/?q=immutabledict
immutabledict = ">=2.0"
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
unpaddedbase64 = ">=2.1.0"
# We require 2.0.0 for immutabledict support.
canonicaljson = "^2.0.0"
# we use the type definitions added in signedjson 1.1.
signedjson = "^1.1.0"
# validating SSL certs for IP addresses requires service_identity 18.1.
service-identity = ">=18.1.0"
# Twisted 18.9 introduces some logger improvements that the structured
# logger utilises
# Twisted 19.7.0 moves test helpers to a new module and deprecates the old location.
# Twisted 21.2.0 introduces contextvar support.
# We could likely bump this to 22.1 without making distro packagers'
# lives hard (as of 2025-07, distro support is Ubuntu LTS: 22.1, Debian stable: 22.4,
# RHEL 9: 22.10)
Twisted = {extras = ["tls"], version = ">=21.2.0"}
treq = ">=21.5.0"
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
pyOpenSSL = ">=16.0.0"
PyYAML = ">=5.3"
pyasn1 = ">=0.1.9"
pyasn1-modules = ">=0.0.7"
bcrypt = ">=3.1.7"
# 10.0.1 minimum is mandatory here because of libwebp CVE-2023-4863.
# Packagers that already took care of libwebp can lower that down to 5.4.0.
Pillow = ">=10.0.1"
# We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2.
# 2.0.5 updates collections.abc imports to avoid Python 3.10 incompatibility.
sortedcontainers = ">=2.0.5"
pymacaroons = ">=0.13.0"
msgpack = ">=0.5.2"
phonenumbers = ">=8.2.0"
# we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
# `prometheus_client.metrics` was added in 0.5.0, so we require that too.
# We chose 0.6.0 as that is the current version in Debian Buster (oldstable).
prometheus-client = ">=0.6.0"
# we use `order`, which arrived in attrs 19.2.0.
# Note: 21.1.0 broke `/sync`, see https://github.com/matrix-org/synapse/issues/9936
attrs = ">=19.2.0,!=21.1.0"
netaddr = ">=0.7.18"
# Jinja 2.x is incompatible with MarkupSafe>=2.1. To ensure that admins do not
# end up with a broken installation, with recent MarkupSafe but old Jinja, we
# add a lower bound to the Jinja2 dependency.
Jinja2 = ">=3.0"
# 3.2.0 updates collections.abc imports to avoid Python 3.10 incompatibility.
bleach = ">=3.2.0"
# pydantic 2.12 depends on typing-extensions>=4.14.1
typing-extensions = ">=4.14.1"
# We enforce that we have a `cryptography` version that bundles an `openssl`
# with the latest security patches.
cryptography = ">=3.4.7"
# ijson 3.1.4 fixes a bug with "." in property names
ijson = ">=3.1.4"
matrix-common = "^1.3.0"
# We need packaging.verison.Version(...).major added in 20.0.
packaging = ">=20.0"
pydantic = [
{ version = "~=2.8", python = "<3.14" },
{ version = "~=2.12", python = ">=3.14" },
]
# This is for building the rust components during "poetry install", which
# currently ignores the `build-system.requires` directive (c.f.
# https://github.com/python-poetry/poetry/issues/6154). Both `pip install` and
# `poetry build` do the right thing without this explicit dependency.
#
# This isn't really a dev-dependency, as `poetry install --without dev` will fail,
# but the alternative is to add it to the main list of deps where it isn't
# needed.
setuptools_rust = ">=1.3"
# This is used for parsing multipart responses
python-multipart = ">=0.0.9"
# Optional Dependencies
# ---------------------
matrix-synapse-ldap3 = { version = ">=0.1", optional = true }
psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'PyPy'", optional = true }
psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true }
psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true }
pysaml2 = { version = ">=4.5.0", optional = true }
authlib = { version = ">=0.15.1", optional = true }
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
# Note: systemd-python 231 appears to have been yanked from pypi
systemd-python = { version = ">=231", optional = true }
# 4.6.3 removes usage of _PyGen_Send which is unavailable in CPython as of Python 3.10.
lxml = { version = ">=4.6.3", optional = true }
sentry-sdk = { version = ">=0.7.2", optional = true }
opentracing = { version = ">=2.2.0", optional = true }
# 4.2.0 updates collections.abc imports to avoid Python 3.10 incompatibility.
jaeger-client = { version = ">=4.2.0", optional = true }
txredisapi = { version = ">=1.4.7", optional = true }
hiredis = { version = "*", optional = true }
Pympler = { version = "*", optional = true }
parameterized = { version = ">=0.7.4", optional = true }
idna = { version = ">=2.5", optional = true }
[tool.poetry.extras]
# NB: Packages that should be part of `pip install matrix-synapse[all]` need to be specified
# twice: once here, and once in the `all` extra.
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"]
saml2 = ["pysaml2"]
oidc = ["authlib"]
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
systemd = ["systemd-python"]
url-preview = ["lxml"]
sentry = ["sentry-sdk"]
opentracing = ["jaeger-client", "opentracing"]
jwt = ["authlib"]
# hiredis is not a *strict* dependency, but it makes things much faster.
# (if it is not installed, we fall back to slow code.)
redis = ["txredisapi", "hiredis"]
# Required to use experimental `caches.track_memory_usage` config option.
cache-memory = ["pympler"]
test = ["parameterized", "idna"]
# The duplication here is awful. I hate hate hate hate hate it. However, for now I want
# to ensure you can still `pip install matrix-synapse[all]` like today. Two motivations:
# 1) for new installations, I want instructions in existing documentation and tutorials
# out there to still work.
# 2) I don't want to hard-code a list of extras into CI if I can help it. The ideal
# solution here would be something like https://github.com/python-poetry/poetry/issues/3413
# Poetry 1.2's dependency groups might make this easier. But I'm not trying that out
# until there's a stable release of 1.2.
#
# NB: the strings in this list must be *package* names, not extra names.
# Some of our extra names _are_ package names, which can lead to great confusion.
all = [
# matrix-synapse-ldap3
"matrix-synapse-ldap3",
# postgres
"psycopg2", "psycopg2cffi", "psycopg2cffi-compat",
# saml2
"pysaml2",
# oidc and jwt
"authlib",
# url-preview
"lxml",
# sentry
"sentry-sdk",
# opentracing
"jaeger-client", "opentracing",
# redis
"txredisapi", "hiredis",
# cache-memory
"pympler",
# omitted:
# - test: it's useful to have this separate from dev deps in the olddeps job
# - systemd: this is a system-based requirement
]
[tool.poetry.group.dev.dependencies]
# We pin development dependencies in poetry.lock so that our tests don't start
# failing on new releases. Keeping lower bounds loose here means that dependabot
# can bump versions without having to update the content-hash in the lockfile.
# This helps prevents merge conflicts when running a batch of dependabot updates.
ruff = "0.14.5"
ruff = "0.14.3"
# Typechecking
lxml-stubs = ">=0.4.0"
@@ -348,11 +364,10 @@ types-setuptools = ">=57.4.0"
# Dependencies which are exclusively required by unit test code. This is
# NOT a list of all modules that are necessary to run the unit tests.
# Tests assume that all optional dependencies are installed.
#
# If this is updated, don't forget to update the equivalent lines in
# project.optional-dependencies.test.
parameterized = ">=0.9.0"
idna = ">=3.3"
# parameterized<0.7.4 can create classes with names that would normally be invalid
# identifiers. trial really does not like this when running with multiple workers.
parameterized = ">=0.7.4"
idna = ">=2.5"
# The following are used by the release script
click = ">=8.1.3"
@@ -376,27 +391,19 @@ tomli = ">=1.2.3"
# runtime errors caused by build system changes.
# We are happy to raise these upper bounds upon request,
# provided we check that it's safe to do so (i.e. that CI passes).
requires = ["poetry-core>=2.0.0,<=2.1.3", "setuptools_rust>=1.3,<=1.11.1"]
requires = ["poetry-core>=1.1.0,<=2.1.3", "setuptools_rust>=1.3,<=1.11.1"]
build-backend = "poetry.core.masonry.api"
[tool.cibuildwheel]
# Skip unsupported platforms (by us or by Rust).
#
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the
# list of supported build targets.
#
# Also see `.github/workflows/release-artifacts.yml` for the list of
# architectures we build for (based on the runner OS types we use), as well as
# the platforms we exclude from testing in CI.
#
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
# We skip:
# - free-threaded cpython builds: these are not currently supported.
# - i686: We don't support 32-bit platforms.
skip = "cp3??t-* *i686*"
# Enable non-default builds. See the list of available options:
# https://cibuildwheel.pypa.io/en/stable/options#enable
#
# - CPython 3.8: EOLed
# - musllinux i686: excluded to reduce number of wheels we build.
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
skip = "cp38* *-musllinux_i686"
# Enable non-default builds.
# "pypy" used to be included by default up until cibuildwheel 3.
enable = "pypy"

View File

@@ -1,5 +1,5 @@
$schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json
$id: https://element-hq.github.io/synapse/schema/synapse/v1.143/synapse-config.schema.json
$id: https://element-hq.github.io/synapse/schema/synapse/v1.142/synapse-config.schema.json
type: object
properties:
modules:

View File

@@ -72,154 +72,153 @@ For help on arguments to 'go test', run 'go help testflag'.
EOF
}
main() {
# parse our arguments
skip_docker_build=""
skip_complement_run=""
while [ $# -ge 1 ]; do
# parse our arguments
skip_docker_build=""
skip_complement_run=""
while [ $# -ge 1 ]; do
arg=$1
case "$arg" in
"-h")
usage
return 1
;;
"-f"|"--fast")
skip_docker_build=1
;;
"--build-only")
skip_complement_run=1
;;
"-e"|"--editable")
use_editable_synapse=1
;;
"--rebuild-editable")
rebuild_editable_synapse=1
;;
*)
# unknown arg: presumably an argument to gotest. break the loop.
break
"-h")
usage
exit 1
;;
"-f"|"--fast")
skip_docker_build=1
;;
"--build-only")
skip_complement_run=1
;;
"-e"|"--editable")
use_editable_synapse=1
;;
"--rebuild-editable")
rebuild_editable_synapse=1
;;
*)
# unknown arg: presumably an argument to gotest. break the loop.
break
esac
shift
done
done
# enable buildkit for the docker builds
export DOCKER_BUILDKIT=1
# enable buildkit for the docker builds
export DOCKER_BUILDKIT=1
# Determine whether to use the docker or podman container runtime.
if [ -n "$PODMAN" ]; then
export CONTAINER_RUNTIME=podman
export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock
export BUILDAH_FORMAT=docker
export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal
else
export CONTAINER_RUNTIME=docker
fi
# Determine whether to use the docker or podman container runtime.
if [ -n "$PODMAN" ]; then
export CONTAINER_RUNTIME=podman
export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock
export BUILDAH_FORMAT=docker
export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal
else
export CONTAINER_RUNTIME=docker
fi
# Change to the repository root
cd "$(dirname $0)/.."
# Change to the repository root
cd "$(dirname $0)/.."
# Check for a user-specified Complement checkout
if [[ -z "$COMPLEMENT_DIR" ]]; then
COMPLEMENT_REF=${COMPLEMENT_REF:-main}
echo "COMPLEMENT_DIR not set. Fetching Complement checkout from ${COMPLEMENT_REF}..."
wget -Nq https://github.com/matrix-org/complement/archive/${COMPLEMENT_REF}.tar.gz
tar -xzf ${COMPLEMENT_REF}.tar.gz
COMPLEMENT_DIR=complement-${COMPLEMENT_REF}
echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
fi
# Check for a user-specified Complement checkout
if [[ -z "$COMPLEMENT_DIR" ]]; then
COMPLEMENT_REF=${COMPLEMENT_REF:-main}
echo "COMPLEMENT_DIR not set. Fetching Complement checkout from ${COMPLEMENT_REF}..."
wget -Nq https://github.com/matrix-org/complement/archive/${COMPLEMENT_REF}.tar.gz
tar -xzf ${COMPLEMENT_REF}.tar.gz
COMPLEMENT_DIR=complement-${COMPLEMENT_REF}
echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
fi
if [ -n "$use_editable_synapse" ]; then
if [ -n "$use_editable_synapse" ]; then
if [[ -e synapse/synapse_rust.abi3.so ]]; then
# In an editable install, back up the host's compiled Rust module to prevent
# inconvenience; the container will overwrite the module with its own copy.
mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host
# And restore it on exit:
synapse_pkg=`realpath synapse`
trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT
# In an editable install, back up the host's compiled Rust module to prevent
# inconvenience; the container will overwrite the module with its own copy.
mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host
# And restore it on exit:
synapse_pkg=`realpath synapse`
trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT
fi
editable_mount="$(realpath .):/editable-src:z"
if [ -n "$rebuild_editable_synapse" ]; then
unset skip_docker_build
elif $CONTAINER_RUNTIME inspect complement-synapse-editable &>/dev/null; then
# complement-synapse-editable already exists: see if we can still use it:
# - The Rust module must still be importable; it will fail to import if the Rust source has changed.
# - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
# First set up the module in the right place for an editable installation.
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
&& $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
skip_docker_build=1
else
echo "Editable Synapse image is stale. Will rebuild."
unset skip_docker_build
fi
fi
fi
elif $CONTAINER_RUNTIME inspect complement-synapse-editable &>/dev/null; then
# complement-synapse-editable already exists: see if we can still use it:
# - The Rust module must still be importable; it will fail to import if the Rust source has changed.
# - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
if [ -z "$skip_docker_build" ]; then
# First set up the module in the right place for an editable installation.
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
&& $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
skip_docker_build=1
else
echo "Editable Synapse image is stale. Will rebuild."
unset skip_docker_build
fi
fi
fi
if [ -z "$skip_docker_build" ]; then
if [ -n "$use_editable_synapse" ]; then
# Build a special image designed for use in development with editable
# installs.
$CONTAINER_RUNTIME build -t synapse-editable \
-f "docker/editable.Dockerfile" .
# Build a special image designed for use in development with editable
# installs.
$CONTAINER_RUNTIME build -t synapse-editable \
-f "docker/editable.Dockerfile" .
$CONTAINER_RUNTIME build -t synapse-workers-editable \
--build-arg FROM=synapse-editable \
-f "docker/Dockerfile-workers" .
$CONTAINER_RUNTIME build -t synapse-workers-editable \
--build-arg FROM=synapse-editable \
-f "docker/Dockerfile-workers" .
$CONTAINER_RUNTIME build -t complement-synapse-editable \
--build-arg FROM=synapse-workers-editable \
-f "docker/complement/Dockerfile" "docker/complement"
$CONTAINER_RUNTIME build -t complement-synapse-editable \
--build-arg FROM=synapse-workers-editable \
-f "docker/complement/Dockerfile" "docker/complement"
# Prepare the Rust module
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
# Prepare the Rust module
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
else
# Build the base Synapse image from the local checkout
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
$CONTAINER_RUNTIME build -t matrixdotorg/synapse \
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
-f "docker/Dockerfile" .
echo_if_github "::endgroup::"
# Build the base Synapse image from the local checkout
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
$CONTAINER_RUNTIME build -t matrixdotorg/synapse \
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
-f "docker/Dockerfile" .
echo_if_github "::endgroup::"
# Build the workers docker image (from the base Synapse image we just built).
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
$CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
echo_if_github "::endgroup::"
# Build the workers docker image (from the base Synapse image we just built).
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
$CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
echo_if_github "::endgroup::"
# Build the unified Complement image (from the worker Synapse image we just built).
echo_if_github "::group::Build Docker image: complement/Dockerfile"
$CONTAINER_RUNTIME build -t complement-synapse \
`# This is the tag we end up pushing to the registry (see` \
`# .github/workflows/push_complement_image.yml) so let's just label it now` \
`# so people can reference it by the same name locally.` \
-t ghcr.io/element-hq/synapse/complement-synapse \
-f "docker/complement/Dockerfile" "docker/complement"
echo_if_github "::endgroup::"
# Build the unified Complement image (from the worker Synapse image we just built).
echo_if_github "::group::Build Docker image: complement/Dockerfile"
$CONTAINER_RUNTIME build -t complement-synapse \
`# This is the tag we end up pushing to the registry (see` \
`# .github/workflows/push_complement_image.yml) so let's just label it now` \
`# so people can reference it by the same name locally.` \
-t ghcr.io/element-hq/synapse/complement-synapse \
-f "docker/complement/Dockerfile" "docker/complement"
echo_if_github "::endgroup::"
fi
fi
fi
if [ -n "$skip_complement_run" ]; then
echo "Skipping Complement run as requested."
return 0
fi
if [ -n "$skip_complement_run" ]; then
echo "Skipping Complement run as requested."
exit
fi
export COMPLEMENT_BASE_IMAGE=complement-synapse
if [ -n "$use_editable_synapse" ]; then
export COMPLEMENT_BASE_IMAGE=complement-synapse
if [ -n "$use_editable_synapse" ]; then
export COMPLEMENT_BASE_IMAGE=complement-synapse-editable
export COMPLEMENT_HOST_MOUNTS="$editable_mount"
fi
fi
extra_test_args=()
extra_test_args=()
test_packages=(
test_packages=(
./tests/csapi
./tests
./tests/msc3874
@@ -232,80 +231,71 @@ main() {
./tests/msc4140
./tests/msc4155
./tests/msc4306
)
)
# Enable dirty runs, so tests will reuse the same container where possible.
# This significantly speeds up tests, but increases the possibility of test pollution.
export COMPLEMENT_ENABLE_DIRTY_RUNS=1
# Enable dirty runs, so tests will reuse the same container where possible.
# This significantly speeds up tests, but increases the possibility of test pollution.
export COMPLEMENT_ENABLE_DIRTY_RUNS=1
# All environment variables starting with PASS_ will be shared.
# (The prefix is stripped off before reaching the container.)
export COMPLEMENT_SHARE_ENV_PREFIX=PASS_
# All environment variables starting with PASS_ will be shared.
# (The prefix is stripped off before reaching the container.)
export COMPLEMENT_SHARE_ENV_PREFIX=PASS_
# It takes longer than 10m to run the whole suite.
extra_test_args+=("-timeout=60m")
# It takes longer than 10m to run the whole suite.
extra_test_args+=("-timeout=60m")
if [[ -n "$WORKERS" ]]; then
# Use workers.
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true
if [[ -n "$WORKERS" ]]; then
# Use workers.
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true
# Pass through the workers defined. If none, it will be an empty string
export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES"
# Pass through the workers defined. If none, it will be an empty string
export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES"
# Workers can only use Postgres as a database.
# Workers can only use Postgres as a database.
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
# And provide some more configuration to complement.
# It can take quite a while to spin up a worker-mode Synapse for the first
# time (the main problem is that we start 14 python processes for each test,
# and complement likes to do two of them in parallel).
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120
else
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=
if [[ -n "$POSTGRES" ]]; then
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
# And provide some more configuration to complement.
# It can take quite a while to spin up a worker-mode Synapse for the first
# time (the main problem is that we start 14 python processes for each test,
# and complement likes to do two of them in parallel).
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120
else
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=
if [[ -n "$POSTGRES" ]]; then
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
else
export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite
fi
export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite
fi
if [[ -n "$ASYNCIO_REACTOR" ]]; then
# Enable the Twisted asyncio reactor
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
fi
if [[ -n "$UNIX_SOCKETS" ]]; then
# Enable full on Unix socket mode for Synapse, Redis and Postgresql
export PASS_SYNAPSE_USE_UNIX_SOCKET=1
fi
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
# Set the log level to what is desired
export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL"
# Allow logging sensitive things (currently SQL queries & parameters).
# (This won't have any effect if we're not logging at DEBUG level overall.)
# Since this is just a test suite, this is fine and won't reveal anyone's
# personal information
export PASS_SYNAPSE_LOG_SENSITIVE=1
fi
# Log a few more useful things for a developer attempting to debug something
# particularly tricky.
export PASS_SYNAPSE_LOG_TESTING=1
# Run the tests!
echo "Images built; running complement with ${extra_test_args[@]} $@ ${test_packages[@]}"
cd "$COMPLEMENT_DIR"
go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" "${test_packages[@]}"
}
main "$@"
# For any non-zero exit code (indicating some sort of error happened), we want to exit
# with that code.
exit_code=$?
if [ $exit_code -ne 0 ]; then
exit $exit_code
fi
if [[ -n "$ASYNCIO_REACTOR" ]]; then
# Enable the Twisted asyncio reactor
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
fi
if [[ -n "$UNIX_SOCKETS" ]]; then
# Enable full on Unix socket mode for Synapse, Redis and Postgresql
export PASS_SYNAPSE_USE_UNIX_SOCKET=1
fi
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
# Set the log level to what is desired
export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL"
# Allow logging sensitive things (currently SQL queries & parameters).
# (This won't have any effect if we're not logging at DEBUG level overall.)
# Since this is just a test suite, this is fine and won't reveal anyone's
# personal information
export PASS_SYNAPSE_LOG_SENSITIVE=1
fi
# Log a few more useful things for a developer attempting to debug something
# particularly tricky.
export PASS_SYNAPSE_LOG_TESTING=1
# Run the tests!
echo "Images built; running complement with ${extra_test_args[@]} $@ ${test_packages[@]}"
cd "$COMPLEMENT_DIR"
go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" "${test_packages[@]}"

View File

@@ -26,7 +26,7 @@ import hashlib
import hmac
import logging
import sys
from typing import Any, Callable, Iterable, TextIO
from typing import Any, Callable
import requests
import yaml
@@ -244,7 +244,6 @@ def main() -> None:
group.add_argument(
"-c",
"--config",
action="append",
type=argparse.FileType("r"),
help="Path to server config file. Used to read in shared secret.",
)
@@ -265,7 +264,7 @@ def main() -> None:
config: dict[str, Any] | None = None
if "config" in args and args.config:
config = _read_config_files(args.config)
config = yaml.safe_load(args.config)
if args.shared_secret:
secret = args.shared_secret
@@ -327,33 +326,6 @@ def main() -> None:
)
# Adapted from synapse.config._base.
def _read_config_files(config_files: Iterable[TextIO]) -> dict[str, Any]:
"""Read the config files and shallowly merge them into a dict.
Successive configurations are shallowly merged into ones provided earlier,
i.e., entirely replacing top-level sections of the configuration.
Args:
config_files: A list of the config files to read
Returns:
The configuration dictionary.
"""
specified_config = {}
for config_file in config_files:
yaml_config = yaml.safe_load(config_file)
if not isinstance(yaml_config, dict):
err = "File %r is empty or doesn't parse into a key-value map. IGNORING."
print(err % (config_file,))
continue
specified_config.update(yaml_config)
return specified_config
def _read_file(file_path: Any, config_path: str) -> str:
"""Check the given file exists, and read it into a string

View File

@@ -58,7 +58,6 @@ from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
from synapse.storage.databases.main import FilteringWorkerStore
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
from synapse.storage.databases.main.delayed_events import DelayedEventsStore
from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore
@@ -108,7 +107,6 @@ logger = logging.getLogger("synapse_port_db")
BOOLEAN_COLUMNS = {
"access_tokens": ["used"],
"account_validity": ["email_sent"],
"delayed_events": ["is_processed"],
"device_lists_changes_in_room": ["converted_to_destinations"],
"device_lists_outbound_pokes": ["sent"],
"devices": ["hidden"],
@@ -274,7 +272,6 @@ class Store(
RelationsWorkerStore,
EventFederationWorkerStore,
SlidingSyncStore,
DelayedEventsStore,
):
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)

View File

@@ -17,6 +17,7 @@ from typing import TYPE_CHECKING
from urllib.parse import urlencode
from pydantic import (
AnyHttpUrl,
BaseModel,
ConfigDict,
StrictBool,
@@ -146,13 +147,33 @@ class MasDelegatedAuth(BaseAuth):
@property
def _metadata_url(self) -> str:
return (
f"{str(self._config.endpoint).rstrip('/')}/.well-known/openid-configuration"
return str(
AnyHttpUrl.build(
scheme=self._config.endpoint.scheme,
username=self._config.endpoint.username,
password=self._config.endpoint.password,
host=self._config.endpoint.host or "",
port=self._config.endpoint.port,
path=".well-known/openid-configuration",
query=None,
fragment=None,
)
)
@property
def _introspection_endpoint(self) -> str:
return f"{str(self._config.endpoint).rstrip('/')}/oauth2/introspect"
return str(
AnyHttpUrl.build(
scheme=self._config.endpoint.scheme,
username=self._config.endpoint.username,
password=self._config.endpoint.password,
host=self._config.endpoint.host or "",
port=self._config.endpoint.port,
path="oauth2/introspect",
query=None,
fragment=None,
)
)
async def _load_metadata(self) -> ServerMetadata:
response = await self._http_client.get_json(self._metadata_url)

View File

@@ -272,6 +272,9 @@ class EventContentFields:
M_TOPIC: Final = "m.topic"
M_TEXT: Final = "m.text"
# Event relations
RELATIONS: Final = "m.relates_to"
class EventUnsignedContentFields:
"""Fields found inside the 'unsigned' data on events"""
@@ -360,3 +363,10 @@ class Direction(enum.Enum):
class ProfileFields:
DISPLAYNAME: Final = "displayname"
AVATAR_URL: Final = "avatar_url"
class MRelatesToFields:
"""Fields found inside m.relates_to content blocks."""
EVENT_ID: Final = "event_id"
REL_TYPE: Final = "rel_type"

View File

@@ -450,8 +450,7 @@ async def start(
await _base.start(hs, freeze=freeze)
# TODO: Feels like this should be moved somewhere else.
for db in hs.get_datastores().databases:
db.updates.start_doing_background_updates()
hs.get_datastores().main.db_pool.updates.start_doing_background_updates()
def start_reactor(

View File

@@ -65,6 +65,8 @@ from typing import (
Sequence,
)
from twisted.internet.interfaces import IDelayedCall
from synapse.appservice import (
ApplicationService,
ApplicationServiceState,
@@ -76,7 +78,7 @@ from synapse.events import EventBase
from synapse.logging.context import run_in_background
from synapse.storage.databases.main import DataStore
from synapse.types import DeviceListUpdates, JsonMapping
from synapse.util.clock import Clock, DelayedCallWrapper
from synapse.util.clock import Clock
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -501,7 +503,7 @@ class _Recoverer:
self.service = service
self.callback = callback
self.backoff_counter = 1
self.scheduled_recovery: DelayedCallWrapper | None = None
self.scheduled_recovery: IDelayedCall | None = None
def recover(self) -> None:
delay = 2**self.backoff_counter

View File

@@ -593,3 +593,6 @@ class ExperimentalConfig(Config):
# MSC4306: Thread Subscriptions
# (and MSC4308: Thread Subscriptions extension to Sliding Sync)
self.msc4306_enabled: bool = experimental.get("msc4306_enabled", False)
# MSC4360: Threads Extension to Sliding Sync
self.msc4360_enabled: bool = experimental.get("msc4360_enabled", False)

View File

@@ -41,7 +41,6 @@ from synapse.events import EventBase
from synapse.federation.units import Edu
from synapse.handlers.presence import format_user_presence_state
from synapse.logging import issue9533_logger
from synapse.logging.context import PreserveLoggingContext
from synapse.logging.opentracing import SynapseTags, set_tag
from synapse.metrics import SERVER_NAME_LABEL, sent_transactions_counter
from synapse.types import JsonDict, ReadReceipt
@@ -187,8 +186,7 @@ class PerDestinationQueue:
self._transaction_manager.shutdown()
try:
if self.active_transmission_loop is not None:
with PreserveLoggingContext():
self.active_transmission_loop.cancel()
self.active_transmission_loop.cancel()
except Exception:
pass

View File

@@ -21,7 +21,6 @@ from synapse.api.constants import EventTypes
from synapse.api.errors import ShadowBanError, SynapseError
from synapse.api.ratelimiting import Ratelimiter
from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import set_tag
from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions
@@ -30,9 +29,11 @@ from synapse.replication.http.delayed_events import (
)
from synapse.storage.databases.main.delayed_events import (
DelayedEventDetails,
DelayID,
EventType,
StateKey,
Timestamp,
UserLocalpart,
)
from synapse.storage.databases.main.state_deltas import StateDelta
from synapse.types import (
@@ -398,63 +399,96 @@ class DelayedEventsHandler:
if self._next_send_ts_changed(next_send_ts):
self._schedule_next_at(next_send_ts)
async def cancel(self, request: SynapseRequest, delay_id: str) -> None:
async def cancel(self, requester: Requester, delay_id: str) -> None:
"""
Cancels the scheduled delivery of the matching delayed event.
Args:
requester: The owner of the delayed event to act on.
delay_id: The ID of the delayed event to act on.
Raises:
NotFoundError: if no matching delayed event could be found.
"""
assert self._is_master
await self._delayed_event_mgmt_ratelimiter.ratelimit(
None, request.getClientAddress().host
requester,
(requester.user.to_string(), requester.device_id),
)
await make_deferred_yieldable(self._initialized_from_db)
next_send_ts = await self._store.cancel_delayed_event(delay_id)
next_send_ts = await self._store.cancel_delayed_event(
delay_id=delay_id,
user_localpart=requester.user.localpart,
)
if self._next_send_ts_changed(next_send_ts):
self._schedule_next_at_or_none(next_send_ts)
async def restart(self, request: SynapseRequest, delay_id: str) -> None:
async def restart(self, requester: Requester, delay_id: str) -> None:
"""
Restarts the scheduled delivery of the matching delayed event.
Args:
requester: The owner of the delayed event to act on.
delay_id: The ID of the delayed event to act on.
Raises:
NotFoundError: if no matching delayed event could be found.
"""
assert self._is_master
await self._delayed_event_mgmt_ratelimiter.ratelimit(
None, request.getClientAddress().host
requester,
(requester.user.to_string(), requester.device_id),
)
await make_deferred_yieldable(self._initialized_from_db)
next_send_ts = await self._store.restart_delayed_event(
delay_id, self._get_current_ts()
delay_id=delay_id,
user_localpart=requester.user.localpart,
current_ts=self._get_current_ts(),
)
if self._next_send_ts_changed(next_send_ts):
self._schedule_next_at(next_send_ts)
async def send(self, request: SynapseRequest, delay_id: str) -> None:
async def send(self, requester: Requester, delay_id: str) -> None:
"""
Immediately sends the matching delayed event, instead of waiting for its scheduled delivery.
Args:
requester: The owner of the delayed event to act on.
delay_id: The ID of the delayed event to act on.
Raises:
NotFoundError: if no matching delayed event could be found.
"""
assert self._is_master
await self._delayed_event_mgmt_ratelimiter.ratelimit(
None, request.getClientAddress().host
)
# Use standard request limiter for sending delayed events on-demand,
# as an on-demand send is similar to sending a regular event.
await self._request_ratelimiter.ratelimit(requester)
await make_deferred_yieldable(self._initialized_from_db)
event, next_send_ts = await self._store.process_target_delayed_event(delay_id)
event, next_send_ts = await self._store.process_target_delayed_event(
delay_id=delay_id,
user_localpart=requester.user.localpart,
)
if self._next_send_ts_changed(next_send_ts):
self._schedule_next_at_or_none(next_send_ts)
await self._send_event(event)
await self._send_event(
DelayedEventDetails(
delay_id=DelayID(delay_id),
user_localpart=UserLocalpart(requester.user.localpart),
room_id=event.room_id,
type=event.type,
state_key=event.state_key,
origin_server_ts=event.origin_server_ts,
content=event.content,
device_id=event.device_id,
)
)
async def _send_on_timeout(self) -> None:
self._next_delayed_event_call = None
@@ -577,7 +611,9 @@ class DelayedEventsHandler:
finally:
# TODO: If this is a temporary error, retry. Otherwise, consider notifying clients of the failure
try:
await self._store.delete_processed_delayed_event(event.delay_id)
await self._store.delete_processed_delayed_event(
event.delay_id, event.user_localpart
)
except Exception:
logger.exception("Failed to delete processed delayed event")

View File

@@ -321,7 +321,16 @@ class DirectoryHandler:
if not self.hs.is_mine(room_alias):
raise SynapseError(400, "Room Alias is not hosted on this homeserver")
return await self.get_association(room_alias)
result = await self.get_association_from_room_alias(room_alias)
if result is not None:
return {"room_id": result.room_id, "servers": result.servers}
else:
raise SynapseError(
404,
"Room alias %r not found" % (room_alias.to_string(),),
Codes.NOT_FOUND,
)
async def _update_canonical_alias(
self, requester: Requester, user_id: str, room_id: str, room_alias: RoomAlias

View File

@@ -20,6 +20,7 @@
#
import enum
import logging
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Collection,
@@ -30,24 +31,59 @@ from typing import (
import attr
from synapse.api.constants import Direction, EventTypes, RelationTypes
from synapse.api.constants import Direction, EventTypes, Membership, RelationTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase, relation_from_event
from synapse.events.utils import SerializeEventConfig
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import trace
from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent
from synapse.storage.databases.main.relations import (
ThreadsNextBatch,
ThreadUpdateInfo,
_RelatedEvent,
)
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, Requester, UserID
from synapse.types import (
JsonDict,
Requester,
RoomStreamToken,
StreamKeyType,
StreamToken,
UserID,
)
from synapse.util.async_helpers import gather_results
from synapse.visibility import filter_events_for_client
if TYPE_CHECKING:
from synapse.events.utils import EventClientSerializer
from synapse.handlers.sliding_sync.room_lists import RoomsForUserType
from synapse.server import HomeServer
from synapse.storage.databases.main import DataStore
logger = logging.getLogger(__name__)
# Type aliases for thread update processing
ThreadUpdatesMap = dict[str, list[ThreadUpdateInfo]]
ThreadRootsMap = dict[str, EventBase]
AggregationsMap = dict[str, "BundledAggregations"]
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ThreadUpdate:
"""
Data for a single thread update.
Attributes:
thread_root: The thread root event, or None if not requested/not visible
prev_batch: Per-thread pagination token for fetching older events in this thread
bundled_aggregations: Bundled aggregations for the thread root event
"""
thread_root: EventBase | None
prev_batch: StreamToken | None
bundled_aggregations: "BundledAggregations | None" = None
class ThreadsListInclude(str, enum.Enum):
"""Valid values for the 'include' flag of /threads."""
@@ -105,8 +141,6 @@ class RelationsHandler:
) -> JsonDict:
"""Get related events of a event, ordered by topological ordering.
TODO Accept a PaginationConfig instead of individual pagination parameters.
Args:
requester: The user requesting the relations.
event_id: Fetch events that relate to this event ID.
@@ -546,6 +580,367 @@ class RelationsHandler:
return results
async def _filter_thread_updates_for_user(
self,
all_thread_updates: ThreadUpdatesMap,
user_id: str,
) -> ThreadUpdatesMap:
"""Process thread updates by filtering for visibility.
Takes raw thread updates from storage and filters them based on whether the
user can see the events. Preserves the ordering of updates within each thread.
Args:
all_thread_updates: Map of thread_id to list of ThreadUpdateInfo objects
user_id: The user ID to filter events for
Returns:
Filtered map of thread_id to list of ThreadUpdateInfo objects, containing
only updates for events the user can see.
"""
# Build a mapping of event_id -> (thread_id, update) for efficient lookup
# during visibility filtering.
event_to_thread_map: dict[str, tuple[str, ThreadUpdateInfo]] = {}
for thread_id, updates in all_thread_updates.items():
for update in updates:
event_to_thread_map[update.event_id] = (thread_id, update)
# Fetch and filter events for visibility
all_events = await self._main_store.get_events_as_list(
event_to_thread_map.keys()
)
filtered_events = await filter_events_for_client(
self._storage_controllers, user_id, all_events
)
# Rebuild thread updates from filtered events
filtered_updates: ThreadUpdatesMap = defaultdict(list)
for event in filtered_events:
if event.event_id in event_to_thread_map:
thread_id, update = event_to_thread_map[event.event_id]
filtered_updates[thread_id].append(update)
return filtered_updates
def _build_thread_updates_response(
self,
filtered_updates: ThreadUpdatesMap,
thread_root_event_map: ThreadRootsMap,
aggregations_map: AggregationsMap,
global_prev_batch_token: StreamToken | None,
) -> dict[str, dict[str, ThreadUpdate]]:
"""Build thread update response structure with per-thread prev_batch tokens.
Args:
filtered_updates: Map of thread_root_id to list of ThreadUpdateInfo
thread_root_event_map: Map of thread_root_id to EventBase
aggregations_map: Map of thread_root_id to BundledAggregations
global_prev_batch_token: Global pagination token, or None if no more results
Returns:
Map of room_id to thread_root_id to ThreadUpdate
"""
thread_updates: dict[str, dict[str, ThreadUpdate]] = {}
for thread_root_id, updates in filtered_updates.items():
# We only care about the latest update for the thread
# Updates are already sorted by stream_ordering DESC from the database query,
# and filter_events_for_client preserves order, so updates[0] is guaranteed to be
# the latest event for each thread.
latest_update = updates[0]
room_id = latest_update.room_id
# Generate per-thread prev_batch token if this thread has multiple visible updates
# or if we hit the global limit.
# When we hit the global limit, we generate prev_batch tokens for all threads, even if
# we only saw 1 update for them. This is to cover the case where we only saw
# a single update for a given thread, but the global limit prevents us from
# obtaining other updates which would have otherwise been included in the range.
per_thread_prev_batch = None
if len(updates) > 1 or global_prev_batch_token is not None:
# Create a token pointing to one position before the latest event's stream position.
# This makes it exclusive - /relations with dir=b won't return the latest event again.
# Use StreamToken.START as base (all other streams at 0) since only room position matters.
per_thread_prev_batch = StreamToken.START.copy_and_replace(
StreamKeyType.ROOM,
RoomStreamToken(stream=latest_update.stream_ordering - 1),
)
if room_id not in thread_updates:
thread_updates[room_id] = {}
thread_updates[room_id][thread_root_id] = ThreadUpdate(
thread_root=thread_root_event_map.get(thread_root_id),
prev_batch=per_thread_prev_batch,
bundled_aggregations=aggregations_map.get(thread_root_id),
)
return thread_updates
async def _fetch_thread_updates(
self,
room_ids: frozenset[str],
room_membership_map: Mapping[str, "RoomsForUserType"],
from_token: StreamToken | None,
to_token: StreamToken,
limit: int,
exclude_thread_ids: set[str] | None = None,
) -> tuple[ThreadUpdatesMap, StreamToken | None]:
"""Fetch thread updates across multiple rooms, handling membership states properly.
This method separates rooms based on membership status (LEAVE/BAN vs others)
and queries them appropriately to prevent data leaks. For rooms where the user
has left or been banned, we bound the query to their leave/ban event position.
Args:
room_ids: The set of room IDs to fetch thread updates for
room_membership_map: Map of room_id to RoomsForUserType containing membership info
from_token: Lower bound (exclusive) for the query, or None for no lower bound
to_token: Upper bound for the query (for joined/invited/knocking rooms)
limit: Maximum number of thread updates to return across all rooms
exclude_thread_ids: Optional set of thread IDs to exclude from results
Returns:
A tuple of:
- Map of thread_id to list of ThreadUpdateInfo objects
- Global prev_batch token if there are more results, None otherwise
"""
# Separate rooms based on membership to handle LEAVE/BAN rooms specially
leave_ban_rooms: set[str] = set()
other_rooms: set[str] = set()
for room_id in room_ids:
membership_info = room_membership_map.get(room_id)
if membership_info and membership_info.membership in (
Membership.LEAVE,
Membership.BAN,
):
leave_ban_rooms.add(room_id)
else:
other_rooms.add(room_id)
# Fetch thread updates from storage, handling LEAVE/BAN rooms separately
all_thread_updates: ThreadUpdatesMap = {}
prev_batch_token: StreamToken | None = None
remaining_limit = limit
# Query LEAVE/BAN rooms with bounded to_token to prevent data leaks
if leave_ban_rooms:
for room_id in leave_ban_rooms:
if remaining_limit <= 0:
# We've hit the limit, set prev_batch to indicate more results
prev_batch_token = to_token
break
membership_info = room_membership_map[room_id]
bounded_to_token = membership_info.event_pos.to_room_stream_token()
(
room_thread_updates,
room_prev_batch,
) = await self._main_store.get_thread_updates_for_rooms(
room_ids={room_id},
from_token=from_token.room_key if from_token else None,
to_token=bounded_to_token,
limit=remaining_limit,
exclude_thread_ids=exclude_thread_ids,
)
# Count updates and reduce remaining limit
num_updates = sum(
len(updates) for updates in room_thread_updates.values()
)
remaining_limit -= num_updates
# Merge updates
for thread_id, updates in room_thread_updates.items():
all_thread_updates.setdefault(thread_id, []).extend(updates)
# Merge prev_batch tokens (take the maximum for backward pagination)
if room_prev_batch is not None:
if prev_batch_token is None:
prev_batch_token = room_prev_batch
elif (
room_prev_batch.room_key.stream
> prev_batch_token.room_key.stream
):
prev_batch_token = room_prev_batch
# Query other rooms (joined/invited/knocking) with normal to_token
if other_rooms and remaining_limit > 0:
(
other_thread_updates,
other_prev_batch,
) = await self._main_store.get_thread_updates_for_rooms(
room_ids=other_rooms,
from_token=from_token.room_key if from_token else None,
to_token=to_token.room_key,
limit=remaining_limit,
exclude_thread_ids=exclude_thread_ids,
)
# Merge updates
for thread_id, updates in other_thread_updates.items():
all_thread_updates.setdefault(thread_id, []).extend(updates)
# Merge prev_batch tokens
if other_prev_batch is not None:
if prev_batch_token is None:
prev_batch_token = other_prev_batch
elif (
other_prev_batch.room_key.stream > prev_batch_token.room_key.stream
):
prev_batch_token = other_prev_batch
return all_thread_updates, prev_batch_token
async def get_thread_updates_for_rooms(
self,
room_ids: frozenset[str],
room_membership_map: Mapping[str, "RoomsForUserType"],
user_id: str,
from_token: StreamToken | None,
to_token: StreamToken,
limit: int,
include_roots: bool = False,
exclude_thread_ids: set[str] | None = None,
) -> tuple[dict[str, dict[str, ThreadUpdate]], StreamToken | None]:
"""Get thread updates across multiple rooms with full processing pipeline.
This is the main entry point for fetching thread updates. It handles:
- Fetching updates with membership-based security
- Filtering for visibility
- Optionally fetching thread roots and aggregations
- Building the response structure
Args:
room_ids: The set of room IDs to fetch updates for
room_membership_map: Map of room_id to RoomsForUserType for membership info
user_id: The user requesting the updates
from_token: Lower bound (exclusive) for the query
to_token: Upper bound for the query
limit: Maximum number of updates to return
include_roots: Whether to fetch and include thread root events (default: False)
exclude_thread_ids: Optional set of thread IDs to exclude
Returns:
A tuple of:
- Map of room_id to thread_root_id to ThreadUpdate
- Global prev_batch token if there are more results, None otherwise
"""
# Fetch thread updates with membership handling
all_thread_updates, prev_batch_token = await self._fetch_thread_updates(
room_ids=room_ids,
room_membership_map=room_membership_map,
from_token=from_token,
to_token=to_token,
limit=limit,
exclude_thread_ids=exclude_thread_ids,
)
if not all_thread_updates:
return {}, prev_batch_token
# Filter thread updates for visibility
filtered_updates = await self._filter_thread_updates_for_user(
all_thread_updates, user_id
)
if not filtered_updates:
return {}, prev_batch_token
# Optionally fetch thread root events and their bundled aggregations
thread_root_event_map: ThreadRootsMap = {}
aggregations_map: AggregationsMap = {}
if include_roots:
# Fetch thread root events
thread_root_events = await self._main_store.get_events_as_list(
filtered_updates.keys()
)
thread_root_event_map = {e.event_id: e for e in thread_root_events}
# Fetch bundled aggregations for the thread roots
if thread_root_event_map:
aggregations_map = await self.get_bundled_aggregations(
thread_root_event_map.values(),
user_id,
)
# Build response structure with per-thread prev_batch tokens
thread_updates = self._build_thread_updates_response(
filtered_updates=filtered_updates,
thread_root_event_map=thread_root_event_map,
aggregations_map=aggregations_map,
global_prev_batch_token=prev_batch_token,
)
return thread_updates, prev_batch_token
@staticmethod
async def serialize_thread_updates(
thread_updates: Mapping[str, Mapping[str, ThreadUpdate]],
prev_batch_token: StreamToken | None,
event_serializer: "EventClientSerializer",
time_now: int,
store: "DataStore",
serialize_options: SerializeEventConfig,
) -> JsonDict:
"""
Serialize thread updates to JSON format.
This helper handles serialization of ThreadUpdate objects for both the
companion endpoint and the sliding sync extension.
Args:
thread_updates: Map of room_id to thread_root_id to ThreadUpdate
prev_batch_token: Global pagination token for fetching more updates
event_serializer: The event serializer to use
time_now: Current time in milliseconds for event serialization
store: Datastore for serializing stream tokens
serialize_options: Serialization config
Returns:
JSON-serializable dict with "updates" and optionally "prev_batch"
"""
updates_dict: JsonDict = {}
for room_id, room_threads in thread_updates.items():
room_updates: JsonDict = {}
for thread_root_id, update in room_threads.items():
update_dict: JsonDict = {}
# Serialize thread_root event if present
if update.thread_root is not None:
bundle_aggs_map = (
{thread_root_id: update.bundled_aggregations}
if update.bundled_aggregations is not None
else None
)
serialized_events = await event_serializer.serialize_events(
[update.thread_root],
time_now,
config=serialize_options,
bundle_aggregations=bundle_aggs_map,
)
if serialized_events:
update_dict["thread_root"] = serialized_events[0]
# Add per-thread prev_batch if present
if update.prev_batch is not None:
update_dict["prev_batch"] = await update.prev_batch.to_string(store)
room_updates[thread_root_id] = update_dict
updates_dict[room_id] = room_updates
result: JsonDict = {"updates": updates_dict}
# Add global prev_batch token if present
if prev_batch_token is not None:
result["prev_batch"] = await prev_batch_token.to_string(store)
return result
async def get_threads(
self,
requester: Requester,

View File

@@ -305,6 +305,7 @@ class SlidingSyncHandler:
# account data, read receipts, typing indicators, to-device messages, etc).
actual_room_ids=set(relevant_room_map.keys()),
actual_room_response_map=rooms,
room_membership_for_user_at_to_token_map=room_membership_for_user_map,
from_token=from_token,
to_token=to_token,
)

View File

@@ -26,8 +26,16 @@ from typing import (
from typing_extensions import TypeAlias, assert_never
from synapse.api.constants import AccountDataTypes, EduTypes
from synapse.api.constants import (
AccountDataTypes,
EduTypes,
EventContentFields,
MRelatesToFields,
RelationTypes,
)
from synapse.events import EventBase
from synapse.handlers.receipts import ReceiptEventSource
from synapse.handlers.sliding_sync.room_lists import RoomsForUserType
from synapse.logging.opentracing import trace
from synapse.storage.databases.main.receipts import ReceiptInRoom
from synapse.types import (
@@ -73,7 +81,10 @@ class SlidingSyncExtensionHandler:
self.event_sources = hs.get_event_sources()
self.device_handler = hs.get_device_handler()
self.push_rules_handler = hs.get_push_rules_handler()
self.relations_handler = hs.get_relations_handler()
self._storage_controllers = hs.get_storage_controllers()
self._enable_thread_subscriptions = hs.config.experimental.msc4306_enabled
self._enable_threads_ext = hs.config.experimental.msc4360_enabled
@trace
async def get_extensions_response(
@@ -84,6 +95,7 @@ class SlidingSyncExtensionHandler:
actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
actual_room_ids: set[str],
actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
room_membership_for_user_at_to_token_map: Mapping[str, RoomsForUserType],
to_token: StreamToken,
from_token: SlidingSyncStreamToken | None,
) -> SlidingSyncResult.Extensions:
@@ -99,6 +111,8 @@ class SlidingSyncExtensionHandler:
actual_room_ids: The actual room IDs in the the Sliding Sync response.
actual_room_response_map: A map of room ID to room results in the the
Sliding Sync response.
room_membership_for_user_at_to_token_map: A map of room ID to the membership
information for the user in the room at the time of `to_token`.
to_token: The latest point in the stream to sync up to.
from_token: The point in the stream to sync from.
"""
@@ -174,6 +188,18 @@ class SlidingSyncExtensionHandler:
from_token=from_token,
)
threads_coro = None
if sync_config.extensions.threads is not None and self._enable_threads_ext:
threads_coro = self.get_threads_extension_response(
sync_config=sync_config,
threads_request=sync_config.extensions.threads,
actual_room_ids=actual_room_ids,
actual_room_response_map=actual_room_response_map,
room_membership_for_user_at_to_token_map=room_membership_for_user_at_to_token_map,
to_token=to_token,
from_token=from_token,
)
(
to_device_response,
e2ee_response,
@@ -181,6 +207,7 @@ class SlidingSyncExtensionHandler:
receipts_response,
typing_response,
thread_subs_response,
threads_response,
) = await gather_optional_coroutines(
to_device_coro,
e2ee_coro,
@@ -188,6 +215,7 @@ class SlidingSyncExtensionHandler:
receipts_coro,
typing_coro,
thread_subs_coro,
threads_coro,
)
return SlidingSyncResult.Extensions(
@@ -197,6 +225,7 @@ class SlidingSyncExtensionHandler:
receipts=receipts_response,
typing=typing_response,
thread_subscriptions=thread_subs_response,
threads=threads_response,
)
def find_relevant_room_ids_for_extension(
@@ -967,3 +996,104 @@ class SlidingSyncExtensionHandler:
unsubscribed=unsubscribed_threads,
prev_batch=prev_batch,
)
def _extract_thread_id_from_event(self, event: EventBase) -> str | None:
"""Extract thread ID from event if it's a thread reply.
Args:
event: The event to check.
Returns:
The thread ID if the event is a thread reply, None otherwise.
"""
relates_to = event.content.get(EventContentFields.RELATIONS)
if isinstance(relates_to, dict):
if relates_to.get(MRelatesToFields.REL_TYPE) == RelationTypes.THREAD:
return relates_to.get(MRelatesToFields.EVENT_ID)
return None
def _find_threads_in_timeline(
self,
actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
) -> set[str]:
"""Find all thread IDs that have events in room timelines.
Args:
actual_room_response_map: A map of room ID to room results.
Returns:
A set of thread IDs (thread root event IDs) that appear in the timeline.
"""
threads_in_timeline: set[str] = set()
for room_result in actual_room_response_map.values():
if room_result.timeline_events:
for event in room_result.timeline_events:
thread_id = self._extract_thread_id_from_event(event)
if thread_id:
threads_in_timeline.add(thread_id)
return threads_in_timeline
async def get_threads_extension_response(
self,
sync_config: SlidingSyncConfig,
threads_request: SlidingSyncConfig.Extensions.ThreadsExtension,
actual_room_ids: set[str],
actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
room_membership_for_user_at_to_token_map: Mapping[str, RoomsForUserType],
to_token: StreamToken,
from_token: SlidingSyncStreamToken | None,
) -> SlidingSyncResult.Extensions.ThreadsExtension | None:
"""Handle Threads extension (MSC4360)
Args:
sync_config: Sync configuration.
threads_request: The threads extension from the request.
actual_room_ids: The actual room IDs in the the Sliding Sync response.
actual_room_response_map: A map of room ID to room results in the
sliding sync response. Used to determine which threads already have
events in the room timeline.
room_membership_for_user_at_to_token_map: A map of room ID to the membership
information for the user in the room at the time of `to_token`.
to_token: The point in the stream to sync up to.
from_token: The point in the stream to sync from.
Returns:
the response (None if empty or threads extension is disabled)
"""
if not threads_request.enabled:
return None
# Identify which threads already have events in the room timelines.
# If include_roots=False, we'll exclude these threads from the DB query
# since the client already sees the thread activity in the timeline.
# If include_roots=True, we fetch all threads regardless, because the client
# wants the thread root events.
threads_to_exclude: set[str] | None = None
if not threads_request.include_roots:
threads_to_exclude = self._find_threads_in_timeline(
actual_room_response_map
)
# Get thread updates using unified helper
user_id = sync_config.user.to_string()
(
thread_updates_response,
prev_batch_token,
) = await self.relations_handler.get_thread_updates_for_rooms(
room_ids=frozenset(actual_room_ids),
room_membership_map=room_membership_for_user_at_to_token_map,
user_id=user_id,
from_token=from_token.stream_token if from_token else None,
to_token=to_token,
limit=threads_request.limit,
include_roots=threads_request.include_roots,
exclude_thread_ids=threads_to_exclude,
)
if not thread_updates_response:
return None
return SlidingSyncResult.Extensions.ThreadsExtension(
updates=thread_updates_response,
prev_batch=prev_batch_token,
)

View File

@@ -184,7 +184,9 @@ class WorkerLocksHandler:
locks: Collection[WaitingLock | WaitingMultiLock],
) -> None:
for lock in locks:
lock.release_lock()
deferred = lock.deferred
if not deferred.called:
deferred.callback(None)
self._clock.call_later(
0,
@@ -213,12 +215,6 @@ class WaitingLock:
lambda: start_active_span("WaitingLock.lock")
)
def release_lock(self) -> None:
"""Release the lock (by resolving the deferred)"""
if not self.deferred.called:
with PreserveLoggingContext():
self.deferred.callback(None)
async def __aenter__(self) -> None:
self._lock_span.__enter__()
@@ -302,12 +298,6 @@ class WaitingMultiLock:
lambda: start_active_span("WaitingLock.lock")
)
def release_lock(self) -> None:
"""Release the lock (by resolving the deferred)"""
if not self.deferred.called:
with PreserveLoggingContext():
self.deferred.callback(None)
async def __aenter__(self) -> None:
self._lock_span.__enter__()

View File

@@ -77,11 +77,7 @@ from synapse.http import QuieterFileBodyProducer, RequestTimedOutError, redact_u
from synapse.http.proxyagent import ProxyAgent
from synapse.http.replicationagent import ReplicationAgent
from synapse.http.types import QueryParams
from synapse.logging.context import (
PreserveLoggingContext,
make_deferred_yieldable,
run_in_background,
)
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import set_tag, start_active_span, tags
from synapse.metrics import SERVER_NAME_LABEL
from synapse.types import ISynapseReactor, StrSequence
@@ -1040,8 +1036,7 @@ class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol):
Report a max size exceed error and disconnect the first time this is called.
"""
if not self.deferred.called:
with PreserveLoggingContext():
self.deferred.errback(BodyExceededMaxSize())
self.deferred.errback(BodyExceededMaxSize())
# Close the connection (forcefully) since all the data will get
# discarded anyway.
assert self.transport is not None
@@ -1140,8 +1135,7 @@ class _MultipartParserProtocol(protocol.Protocol):
logger.warning(
"Exception encountered writing file data to stream: %s", e
)
with PreserveLoggingContext():
self.deferred.errback()
self.deferred.errback()
self.file_length += end - start
callbacks: "multipart.MultipartCallbacks" = {
@@ -1153,8 +1147,7 @@ class _MultipartParserProtocol(protocol.Protocol):
self.total_length += len(incoming_data)
if self.max_length is not None and self.total_length >= self.max_length:
with PreserveLoggingContext():
self.deferred.errback(BodyExceededMaxSize())
self.deferred.errback(BodyExceededMaxSize())
# Close the connection (forcefully) since all the data will get
# discarded anyway.
assert self.transport is not None
@@ -1164,8 +1157,7 @@ class _MultipartParserProtocol(protocol.Protocol):
self.parser.write(incoming_data)
except Exception as e:
logger.warning("Exception writing to multipart parser: %s", e)
with PreserveLoggingContext():
self.deferred.errback()
self.deferred.errback()
return
def connectionLost(self, reason: Failure = connectionDone) -> None:
@@ -1175,11 +1167,9 @@ class _MultipartParserProtocol(protocol.Protocol):
if reason.check(ResponseDone):
self.multipart_response.length = self.file_length
with PreserveLoggingContext():
self.deferred.callback(self.multipart_response)
self.deferred.callback(self.multipart_response)
else:
with PreserveLoggingContext():
self.deferred.errback(reason)
self.deferred.errback(reason)
class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
@@ -1203,8 +1193,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
try:
self.stream.write(data)
except Exception:
with PreserveLoggingContext():
self.deferred.errback()
self.deferred.errback()
return
self.length += len(data)
@@ -1212,8 +1201,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
# connection. dataReceived might be called again if data was received
# in the meantime.
if self.max_size is not None and self.length >= self.max_size:
with PreserveLoggingContext():
self.deferred.errback(BodyExceededMaxSize())
self.deferred.errback(BodyExceededMaxSize())
# Close the connection (forcefully) since all the data will get
# discarded anyway.
assert self.transport is not None
@@ -1225,8 +1213,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
return
if reason.check(ResponseDone):
with PreserveLoggingContext():
self.deferred.callback(self.length)
self.deferred.callback(self.length)
elif reason.check(PotentialDataLoss):
# This applies to requests which don't set `Content-Length` or a
# `Transfer-Encoding` in the response because in this case the end of the
@@ -1235,11 +1222,9 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
# behavior is expected of some servers (like YouTube), let's ignore it.
# Stolen from https://github.com/twisted/treq/pull/49/files
# http://twistedmatrix.com/trac/ticket/4840
with PreserveLoggingContext():
self.deferred.callback(self.length)
self.deferred.callback(self.length)
else:
with PreserveLoggingContext():
self.deferred.errback(reason)
self.deferred.errback(reason)
def read_body_with_max_size(

View File

@@ -41,8 +41,6 @@ from twisted.internet.protocol import ClientFactory, connectionDone
from twisted.python.failure import Failure
from twisted.web import http
from synapse.logging.context import PreserveLoggingContext
logger = logging.getLogger(__name__)
@@ -178,16 +176,14 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
def clientConnectionFailed(self, connector: IConnector, reason: Failure) -> None:
logger.debug("Connection to proxy failed: %s", reason)
if not self.on_connection.called:
with PreserveLoggingContext():
self.on_connection.errback(reason)
self.on_connection.errback(reason)
if isinstance(self.wrapped_factory, ClientFactory):
return self.wrapped_factory.clientConnectionFailed(connector, reason)
def clientConnectionLost(self, connector: IConnector, reason: Failure) -> None:
logger.debug("Connection to proxy lost: %s", reason)
if not self.on_connection.called:
with PreserveLoggingContext():
self.on_connection.errback(reason)
self.on_connection.errback(reason)
if isinstance(self.wrapped_factory, ClientFactory):
return self.wrapped_factory.clientConnectionLost(connector, reason)
@@ -242,16 +238,14 @@ class HTTPConnectProtocol(protocol.Protocol):
self.http_setup_client.connectionLost(reason)
if not self.connected_deferred.called:
with PreserveLoggingContext():
self.connected_deferred.errback(reason)
self.connected_deferred.errback(reason)
def proxyConnected(self, _: Union[None, "defer.Deferred[None]"]) -> None:
self.wrapped_connection_started = True
assert self.transport is not None
self.wrapped_protocol.makeConnection(self.transport)
with PreserveLoggingContext():
self.connected_deferred.callback(self.wrapped_protocol)
self.connected_deferred.callback(self.wrapped_protocol)
# Get any pending data from the http buf and forward it to the original protocol
buf = self.http_setup_client.clearLineBuffer()
@@ -309,8 +303,7 @@ class HTTPConnectSetupClient(http.HTTPClient):
def handleEndHeaders(self) -> None:
logger.debug("End Headers")
with PreserveLoggingContext():
self.on_connected.callback(None)
self.on_connected.callback(None)
def handleResponse(self, body: bytes) -> None:
pass

View File

@@ -619,24 +619,19 @@ class LoggingContextFilter(logging.Filter):
True to include the record in the log output.
"""
context = current_context()
# type-ignore: `context` should never be `None`, but if it somehow ends up
# being, then we end up in a death spiral of infinite loops, so let's check, for
record.request = self._default_request
# Avoid overwriting an existing `server_name` on the record. This is running in
# the context of a global log record filter so there may be 3rd-party code that
# adds their own `server_name` and we don't want to interfere with that
# (clobber).
if not hasattr(record, "server_name"):
record.server_name = "unknown_server_from_no_logcontext"
# context should never be None, but if it somehow ends up being, then
# we end up in a death spiral of infinite loops, so let's check, for
# robustness' sake.
#
# Add some default values to avoid log formatting errors.
if context is None:
record.request = self._default_request # type: ignore[unreachable]
# Avoid overwriting an existing `server_name` on the record. This is running in
# the context of a global log record filter so there may be 3rd-party code that
# adds their own `server_name` and we don't want to interfere with that
# (clobber).
if not hasattr(record, "server_name"):
record.server_name = "unknown_server_from_no_logcontext"
# Otherwise, in the normal, expected case, fill in the log record attributes
# from the logcontext.
else:
if context is not None:
def safe_set(attr: str, value: Any) -> None:
"""

View File

@@ -45,7 +45,6 @@ from synapse.api.errors import Codes, cs_error
from synapse.http.server import finish_request, respond_with_json
from synapse.http.site import SynapseRequest
from synapse.logging.context import (
PreserveLoggingContext,
defer_to_threadpool,
make_deferred_yieldable,
run_in_background,
@@ -754,10 +753,9 @@ class ThreadedFileSender:
self.wakeup_event.set()
if not self.deferred.called:
with PreserveLoggingContext():
self.deferred.errback(
ConsumerRequestedStopError("Consumer asked us to stop producing")
)
self.deferred.errback(
ConsumerRequestedStopError("Consumer asked us to stop producing")
)
async def start_read_loop(self) -> None:
"""This is the loop that drives reading/writing"""
@@ -811,8 +809,7 @@ class ThreadedFileSender:
self.consumer = None
if not self.deferred.called:
with PreserveLoggingContext():
self.deferred.errback(failure)
self.deferred.errback(failure)
def _finish(self) -> None:
"""Called when we have finished writing (either on success or
@@ -826,5 +823,4 @@ class ThreadedFileSender:
self.consumer = None
if not self.deferred.called:
with PreserveLoggingContext():
self.deferred.callback(None)
self.deferred.callback(None)

View File

@@ -47,11 +47,14 @@ class UpdateDelayedEventServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.delayed_events_handler = hs.get_delayed_events_handler()
async def on_POST(
self, request: SynapseRequest, delay_id: str
) -> tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
body = parse_json_object_from_request(request)
try:
action = str(body["action"])
@@ -72,65 +75,11 @@ class UpdateDelayedEventServlet(RestServlet):
)
if enum_action == _UpdateDelayedEventAction.CANCEL:
await self.delayed_events_handler.cancel(request, delay_id)
await self.delayed_events_handler.cancel(requester, delay_id)
elif enum_action == _UpdateDelayedEventAction.RESTART:
await self.delayed_events_handler.restart(request, delay_id)
await self.delayed_events_handler.restart(requester, delay_id)
elif enum_action == _UpdateDelayedEventAction.SEND:
await self.delayed_events_handler.send(request, delay_id)
return 200, {}
class CancelDelayedEventServlet(RestServlet):
PATTERNS = client_patterns(
r"/org\.matrix\.msc4140/delayed_events/(?P<delay_id>[^/]+)/cancel$",
releases=(),
)
CATEGORY = "Delayed event management requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
self.delayed_events_handler = hs.get_delayed_events_handler()
async def on_POST(
self, request: SynapseRequest, delay_id: str
) -> tuple[int, JsonDict]:
await self.delayed_events_handler.cancel(request, delay_id)
return 200, {}
class RestartDelayedEventServlet(RestServlet):
PATTERNS = client_patterns(
r"/org\.matrix\.msc4140/delayed_events/(?P<delay_id>[^/]+)/restart$",
releases=(),
)
CATEGORY = "Delayed event management requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
self.delayed_events_handler = hs.get_delayed_events_handler()
async def on_POST(
self, request: SynapseRequest, delay_id: str
) -> tuple[int, JsonDict]:
await self.delayed_events_handler.restart(request, delay_id)
return 200, {}
class SendDelayedEventServlet(RestServlet):
PATTERNS = client_patterns(
r"/org\.matrix\.msc4140/delayed_events/(?P<delay_id>[^/]+)/send$",
releases=(),
)
CATEGORY = "Delayed event management requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
self.delayed_events_handler = hs.get_delayed_events_handler()
async def on_POST(
self, request: SynapseRequest, delay_id: str
) -> tuple[int, JsonDict]:
await self.delayed_events_handler.send(request, delay_id)
await self.delayed_events_handler.send(requester, delay_id)
return 200, {}
@@ -159,7 +108,4 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
# The following can't currently be instantiated on workers.
if hs.config.worker.worker_app is None:
UpdateDelayedEventServlet(hs).register(http_server)
CancelDelayedEventServlet(hs).register(http_server)
RestartDelayedEventServlet(hs).register(http_server)
SendDelayedEventServlet(hs).register(http_server)
DelayedEventsServlet(hs).register(http_server)

View File

@@ -20,17 +20,33 @@
import logging
import re
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Annotated
from synapse.api.constants import Direction
from pydantic import StrictBool, StrictStr
from pydantic.types import StringConstraints
from synapse.api.constants import Direction, Membership
from synapse.api.errors import SynapseError
from synapse.events.utils import SerializeEventConfig
from synapse.handlers.relations import ThreadsListInclude
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string
from synapse.http.servlet import (
RestServlet,
parse_and_validate_json_object_from_request,
parse_boolean,
parse_integer,
parse_string,
)
from synapse.http.site import SynapseRequest
from synapse.rest.client._base import client_patterns
from synapse.storage.databases.main.relations import ThreadsNextBatch
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict
from synapse.streams.config import (
PaginationConfig,
extract_stream_token_from_pagination_token,
)
from synapse.types import JsonDict, RoomStreamToken, StreamKeyType, StreamToken, UserID
from synapse.types.handlers.sliding_sync import PerConnectionState, SlidingSyncConfig
from synapse.types.rest.client import RequestBodyModel, SlidingSyncBody
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -38,6 +54,39 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
class ThreadUpdatesBody(RequestBodyModel):
"""
Thread updates companion endpoint request body (MSC4360).
Allows paginating thread updates using the same room selection as a sliding sync
request. This enables clients to fetch thread updates for the same set of rooms
that were included in their sliding sync response.
Attributes:
lists: Sliding window API lists, using the same structure as SlidingSyncBody.lists.
If provided along with room_subscriptions, the union of rooms from both will
be used.
room_subscriptions: Room subscription API rooms, using the same structure as
SlidingSyncBody.room_subscriptions. If provided along with lists, the union
of rooms from both will be used.
include_roots: Whether to include the thread root events in the response.
Defaults to False.
If neither lists nor room_subscriptions are provided, thread updates from all
joined rooms are returned.
"""
lists: (
dict[
Annotated[str, StringConstraints(max_length=64, strict=True)],
SlidingSyncBody.SlidingSyncList,
]
| None
) = None
room_subscriptions: dict[StrictStr, SlidingSyncBody.RoomSubscription] | None = None
include_roots: StrictBool = False
class RelationPaginationServlet(RestServlet):
"""API to paginate relations on an event by topological ordering, optionally
filtered by relation type and event type.
@@ -133,6 +182,167 @@ class ThreadsServlet(RestServlet):
return 200, result
class ThreadUpdatesServlet(RestServlet):
"""
Companion endpoint to the Sliding Sync threads extension (MSC4360).
Allows clients to bulk fetch thread updates across all joined rooms.
"""
PATTERNS = client_patterns(
"/io.element.msc4360/thread_updates$",
unstable=True,
releases=(),
)
CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
self.clock = hs.get_clock()
self.auth = hs.get_auth()
self.store = hs.get_datastores().main
self.relations_handler = hs.get_relations_handler()
self.event_serializer = hs.get_event_client_serializer()
self._storage_controllers = hs.get_storage_controllers()
self.sliding_sync_handler = hs.get_sliding_sync_handler()
async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
# Parse request body
body = parse_and_validate_json_object_from_request(request, ThreadUpdatesBody)
# Parse query parameters
dir_str = parse_string(request, "dir", default="b")
if dir_str != "b":
raise SynapseError(
400,
"The 'dir' parameter must be 'b' (backward). Forward pagination is not supported.",
)
limit = parse_integer(request, "limit", default=100)
if limit <= 0:
raise SynapseError(400, "The 'limit' parameter must be positive.")
from_token_str = parse_string(request, "from")
to_token_str = parse_string(request, "to")
# Parse pagination tokens
from_token: StreamToken | None = None
to_token: StreamToken | None = None
if from_token_str:
try:
stream_token_str = extract_stream_token_from_pagination_token(
from_token_str
)
from_token = await StreamToken.from_string(self.store, stream_token_str)
except Exception as e:
logger.exception("Error parsing 'from' token: %s", from_token_str)
raise SynapseError(400, "'from' parameter is invalid") from e
if to_token_str:
try:
stream_token_str = extract_stream_token_from_pagination_token(
to_token_str
)
to_token = await StreamToken.from_string(self.store, stream_token_str)
except Exception:
raise SynapseError(400, "'to' parameter is invalid")
# Get the list of rooms to fetch thread updates for
user_id = requester.user.to_string()
user = UserID.from_string(user_id)
# Get the current stream token for membership lookup
if from_token is None:
max_stream_ordering = self.store.get_room_max_stream_ordering()
current_token = StreamToken.START.copy_and_replace(
StreamKeyType.ROOM, RoomStreamToken(stream=max_stream_ordering)
)
else:
current_token = from_token
# Get room membership information to properly handle LEAVE/BAN rooms
(
room_membership_for_user_at_to_token_map,
_,
_,
) = await self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token(
user=user,
to_token=current_token,
from_token=None,
)
# Determine which rooms to fetch updates for based on lists/room_subscriptions
if body.lists is not None or body.room_subscriptions is not None:
# Use sliding sync room selection logic
sync_config = SlidingSyncConfig(
user=user,
requester=requester,
lists=body.lists,
room_subscriptions=body.room_subscriptions,
)
# Use the sliding sync room list handler to get the same set of rooms
interested_rooms = (
await self.sliding_sync_handler.room_lists.compute_interested_rooms(
sync_config=sync_config,
previous_connection_state=PerConnectionState(),
to_token=current_token,
from_token=None,
)
)
room_ids = frozenset(interested_rooms.relevant_room_map.keys())
else:
# No lists or room_subscriptions, use only joined rooms
room_ids = frozenset(
room_id
for room_id, membership_info in room_membership_for_user_at_to_token_map.items()
if membership_info.membership == Membership.JOIN
)
# Get thread updates using unified helper
(
thread_updates,
prev_batch_token,
) = await self.relations_handler.get_thread_updates_for_rooms(
room_ids=room_ids,
room_membership_map=room_membership_for_user_at_to_token_map,
user_id=user_id,
from_token=to_token,
to_token=from_token if from_token else current_token,
limit=limit,
include_roots=body.include_roots,
)
if not thread_updates:
return 200, {"chunk": {}}
# Serialize thread updates using shared helper
time_now = self.clock.time_msec()
serialize_options = SerializeEventConfig(requester=requester)
serialized = await self.relations_handler.serialize_thread_updates(
thread_updates=thread_updates,
prev_batch_token=prev_batch_token,
event_serializer=self.event_serializer,
time_now=time_now,
store=self.store,
serialize_options=serialize_options,
)
# Build response with "chunk" wrapper and "next_batch" key
# (companion endpoint uses different key names than sliding sync)
response: JsonDict = {"chunk": serialized["updates"]}
if "prev_batch" in serialized:
response["next_batch"] = serialized["prev_batch"]
return 200, response
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RelationPaginationServlet(hs).register(http_server)
ThreadsServlet(hs).register(http_server)
if hs.config.experimental.msc4360_enabled:
ThreadUpdatesServlet(hs).register(http_server)

View File

@@ -31,11 +31,13 @@ from synapse.api.filtering import FilterCollection
from synapse.api.presence import UserPresenceState
from synapse.api.ratelimiting import Ratelimiter
from synapse.events.utils import (
EventClientSerializer,
SerializeEventConfig,
format_event_for_client_v2_without_room_id,
format_event_raw,
)
from synapse.handlers.presence import format_user_presence_state
from synapse.handlers.relations import RelationsHandler
from synapse.handlers.sliding_sync import SlidingSyncConfig, SlidingSyncResult
from synapse.handlers.sync import (
ArchivedSyncResult,
@@ -56,6 +58,7 @@ from synapse.http.servlet import (
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import log_kv, set_tag, trace_with_opname
from synapse.rest.admin.experimental_features import ExperimentalFeature
from synapse.storage.databases.main import DataStore
from synapse.types import JsonDict, Requester, SlidingSyncStreamToken, StreamToken
from synapse.types.rest.client import SlidingSyncBody
from synapse.util.caches.lrucache import LruCache
@@ -646,6 +649,7 @@ class SlidingSyncRestServlet(RestServlet):
- receipts (MSC3960)
- account data (MSC3959)
- thread subscriptions (MSC4308)
- threads (MSC4360)
Request query parameters:
timeout: How long to wait for new events in milliseconds.
@@ -849,7 +853,10 @@ class SlidingSyncRestServlet(RestServlet):
logger.info("Client has disconnected; not serializing response.")
return 200, {}
response_content = await self.encode_response(requester, sliding_sync_results)
time_now = self.clock.time_msec()
response_content = await self.encode_response(
requester, sliding_sync_results, time_now
)
return 200, response_content
@@ -858,6 +865,7 @@ class SlidingSyncRestServlet(RestServlet):
self,
requester: Requester,
sliding_sync_result: SlidingSyncResult,
time_now: int,
) -> JsonDict:
response: JsonDict = defaultdict(dict)
@@ -866,10 +874,10 @@ class SlidingSyncRestServlet(RestServlet):
if serialized_lists:
response["lists"] = serialized_lists
response["rooms"] = await self.encode_rooms(
requester, sliding_sync_result.rooms
requester, sliding_sync_result.rooms, time_now
)
response["extensions"] = await self.encode_extensions(
requester, sliding_sync_result.extensions
requester, sliding_sync_result.extensions, time_now
)
return response
@@ -901,9 +909,8 @@ class SlidingSyncRestServlet(RestServlet):
self,
requester: Requester,
rooms: dict[str, SlidingSyncResult.RoomResult],
time_now: int,
) -> JsonDict:
time_now = self.clock.time_msec()
serialize_options = SerializeEventConfig(
event_format=format_event_for_client_v2_without_room_id,
requester=requester,
@@ -1019,7 +1026,10 @@ class SlidingSyncRestServlet(RestServlet):
@trace_with_opname("sliding_sync.encode_extensions")
async def encode_extensions(
self, requester: Requester, extensions: SlidingSyncResult.Extensions
self,
requester: Requester,
extensions: SlidingSyncResult.Extensions,
time_now: int,
) -> JsonDict:
serialized_extensions: JsonDict = {}
@@ -1089,6 +1099,18 @@ class SlidingSyncRestServlet(RestServlet):
_serialise_thread_subscriptions(extensions.thread_subscriptions)
)
# excludes both None and falsy `threads`
if extensions.threads:
serialized_extensions[
"io.element.msc4360.threads"
] = await _serialise_threads(
self.event_serializer,
time_now,
extensions.threads,
self.store,
requester,
)
return serialized_extensions
@@ -1125,6 +1147,52 @@ def _serialise_thread_subscriptions(
return out
async def _serialise_threads(
event_serializer: EventClientSerializer,
time_now: int,
threads: SlidingSyncResult.Extensions.ThreadsExtension,
store: "DataStore",
requester: Requester,
) -> JsonDict:
"""
Serialize the threads extension response for sliding sync.
Args:
event_serializer: The event serializer to use for serializing thread root events.
time_now: The current time in milliseconds, used for event serialization.
threads: The threads extension data containing thread updates and pagination tokens.
store: The datastore, needed for serializing stream tokens.
requester: The user making the request, used for transaction_id inclusion.
Returns:
A JSON-serializable dict containing:
- "updates": A nested dict mapping room_id -> thread_root_id -> thread update.
Each thread update may contain:
- "thread_root": The serialized thread root event (if include_roots was True),
with bundled aggregations including the latest_event in unsigned.m.relations.m.thread.
- "prev_batch": A pagination token for fetching older events in the thread.
- "prev_batch": A pagination token for fetching older thread updates (if available).
"""
if not threads.updates:
out: JsonDict = {}
if threads.prev_batch:
out["prev_batch"] = await threads.prev_batch.to_string(store)
return out
# Create serialization config to include transaction_id for requester's events
serialize_options = SerializeEventConfig(requester=requester)
# Use shared serialization helper (static method)
return await RelationsHandler.serialize_thread_updates(
thread_updates=threads.updates,
prev_batch_token=threads.prev_batch,
event_serializer=event_serializer,
time_now=time_now,
store=store,
serialize_options=serialize_options,
)
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
SyncRestServlet(hs).register(http_server)

View File

@@ -322,7 +322,7 @@ class LoggingTransaction:
self, callback: Callable[P, object], *args: P.args, **kwargs: P.kwargs
) -> None:
"""Call the given callback on the main twisted thread after the transaction has
finished successfully.
finished.
Mostly used to invalidate the caches on the correct thread.
@@ -343,7 +343,7 @@ class LoggingTransaction:
self, callback: Callable[P, Awaitable], *args: P.args, **kwargs: P.kwargs
) -> None:
"""Call the given asynchronous callback on the main twisted thread after
the transaction has finished successfully (but before those added in `call_after`).
the transaction has finished (but before those added in `call_after`).
Mostly used to invalidate remote caches after transactions.

View File

@@ -13,26 +13,18 @@
#
import logging
from typing import TYPE_CHECKING, NewType
from typing import NewType
import attr
from synapse.api.errors import NotFoundError
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
StoreError,
)
from synapse.storage.database import LoggingTransaction, StoreError
from synapse.storage.engines import PostgresEngine
from synapse.types import JsonDict, RoomID
from synapse.util import stringutils
from synapse.util.json import json_encoder
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -63,27 +55,6 @@ class DelayedEventDetails(EventDetails):
class DelayedEventsStore(SQLBaseStore):
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
# Set delayed events to be uniquely identifiable by their delay_id.
# In practice, delay_ids are already unique because they are generated
# from cryptographically strong random strings.
# Therefore, adding this constraint is not expected to ever fail,
# despite the current pkey technically allowing non-unique delay_ids.
self.db_pool.updates.register_background_index_update(
update_name="delayed_events_idx",
index_name="delayed_events_idx",
table="delayed_events",
columns=("delay_id",),
unique=True,
)
async def get_delayed_events_stream_pos(self) -> int:
"""
Gets the stream position of the background process to watch for state events
@@ -163,7 +134,9 @@ class DelayedEventsStore(SQLBaseStore):
async def restart_delayed_event(
self,
*,
delay_id: str,
user_localpart: str,
current_ts: Timestamp,
) -> Timestamp:
"""
@@ -172,6 +145,7 @@ class DelayedEventsStore(SQLBaseStore):
Args:
delay_id: The ID of the delayed event to restart.
user_localpart: The localpart of the delayed event's owner.
current_ts: The current time, which will be used to calculate the new send time.
Returns: The send time of the next delayed event to be sent,
@@ -189,11 +163,13 @@ class DelayedEventsStore(SQLBaseStore):
"""
UPDATE delayed_events
SET send_ts = ? + delay
WHERE delay_id = ? AND NOT is_processed
WHERE delay_id = ? AND user_localpart = ?
AND NOT is_processed
""",
(
current_ts,
delay_id,
user_localpart,
),
)
if txn.rowcount == 0:
@@ -343,15 +319,21 @@ class DelayedEventsStore(SQLBaseStore):
async def process_target_delayed_event(
self,
*,
delay_id: str,
user_localpart: str,
) -> tuple[
DelayedEventDetails,
EventDetails,
Timestamp | None,
]:
"""
Marks for processing the matching delayed event, regardless of its timeout time,
as long as it has not already been marked as such.
Args:
delay_id: The ID of the delayed event to restart.
user_localpart: The localpart of the delayed event's owner.
Returns: The details of the matching delayed event,
and the send time of the next delayed event to be sent, if any.
@@ -362,38 +344,39 @@ class DelayedEventsStore(SQLBaseStore):
def process_target_delayed_event_txn(
txn: LoggingTransaction,
) -> tuple[
DelayedEventDetails,
EventDetails,
Timestamp | None,
]:
txn.execute(
"""
UPDATE delayed_events
SET is_processed = TRUE
WHERE delay_id = ? AND NOT is_processed
WHERE delay_id = ? AND user_localpart = ?
AND NOT is_processed
RETURNING
room_id,
event_type,
state_key,
origin_server_ts,
content,
device_id,
user_localpart
device_id
""",
(delay_id,),
(
delay_id,
user_localpart,
),
)
row = txn.fetchone()
if row is None:
raise NotFoundError("Delayed event not found")
event = DelayedEventDetails(
event = EventDetails(
RoomID.from_string(row[0]),
EventType(row[1]),
StateKey(row[2]) if row[2] is not None else None,
Timestamp(row[3]) if row[3] is not None else None,
db_to_json(row[4]),
DeviceID(row[5]) if row[5] is not None else None,
DelayID(delay_id),
UserLocalpart(row[6]),
)
return event, self._get_next_delayed_event_send_ts_txn(txn)
@@ -402,10 +385,19 @@ class DelayedEventsStore(SQLBaseStore):
"process_target_delayed_event", process_target_delayed_event_txn
)
async def cancel_delayed_event(self, delay_id: str) -> Timestamp | None:
async def cancel_delayed_event(
self,
*,
delay_id: str,
user_localpart: str,
) -> Timestamp | None:
"""
Cancels the matching delayed event, i.e. remove it as long as it hasn't been processed.
Args:
delay_id: The ID of the delayed event to restart.
user_localpart: The localpart of the delayed event's owner.
Returns: The send time of the next delayed event to be sent, if any.
Raises:
@@ -421,6 +413,7 @@ class DelayedEventsStore(SQLBaseStore):
table="delayed_events",
keyvalues={
"delay_id": delay_id,
"user_localpart": user_localpart,
"is_processed": False,
},
)
@@ -480,7 +473,11 @@ class DelayedEventsStore(SQLBaseStore):
"cancel_delayed_state_events", cancel_delayed_state_events_txn
)
async def delete_processed_delayed_event(self, delay_id: DelayID) -> None:
async def delete_processed_delayed_event(
self,
delay_id: DelayID,
user_localpart: UserLocalpart,
) -> None:
"""
Delete the matching delayed event, as long as it has been marked as processed.
@@ -491,6 +488,7 @@ class DelayedEventsStore(SQLBaseStore):
table="delayed_events",
keyvalues={
"delay_id": delay_id,
"user_localpart": user_localpart,
"is_processed": True,
},
desc="delete_processed_delayed_event",
@@ -556,7 +554,7 @@ def _generate_delay_id() -> DelayID:
# We use the following format for delay IDs:
# syd_<random string>
# They are not scoped to user localparts, but the random string
# is expected to be sufficiently random to be globally unique.
# They are scoped to user localparts, so it is possible for
# the same ID to exist for multiple users.
return DelayID(f"syd_{stringutils.random_string(20)}")

View File

@@ -74,6 +74,7 @@ from synapse.types import (
MutableStateMap,
StateMap,
StrCollection,
get_domain_from_id,
)
from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES
from synapse.types.state import StateFilter
@@ -95,28 +96,9 @@ persist_event_counter = Counter(
event_counter = Counter(
"synapse_storage_events_persisted_events_sep",
"",
labelnames=[
"type", # The event type or "*other*" for types we don't track
"origin_type",
SERVER_NAME_LABEL,
],
labelnames=["type", "origin_type", "origin_entity", SERVER_NAME_LABEL],
)
# Event types that we track in the `events_counter` metric above.
#
# This list is chosen to balance tracking the most common event types that are
# useful to monitor (and are likely to spike), while keeping the cardinality of
# the metric low enough to avoid wasted resources.
TRACKED_EVENT_TYPES = {
EventTypes.Message,
EventTypes.Encrypted,
EventTypes.Member,
EventTypes.ThirdPartyInvite,
EventTypes.Redaction,
EventTypes.Create,
EventTypes.Tombstone,
}
# State event type/key pairs that we need to gather to fill in the
# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables.
SLIDING_SYNC_RELEVANT_STATE_SET = (
@@ -392,21 +374,19 @@ class PersistEventsStore:
for event, context in events_and_contexts:
if context.app_service:
origin_type = "application_service"
origin_type = "local"
origin_entity = context.app_service.id
elif self.hs.is_mine_id(event.sender):
origin_type = "local"
origin_entity = "*client*"
else:
origin_type = "remote"
# We only track a subset of event types, to avoid high
# cardinality in the metrics.
metrics_event_type = (
event.type if event.type in TRACKED_EVENT_TYPES else "*other*"
)
origin_entity = get_domain_from_id(event.sender)
event_counter.labels(
type=metrics_event_type,
type=event.type,
origin_type=origin_type,
origin_entity=origin_entity,
**{SERVER_NAME_LABEL: self.server_name},
).inc()

View File

@@ -1600,21 +1600,18 @@ class EventsWorkerStore(SQLBaseStore):
if d:
d.redactions.append(redacter)
# check for MSC4293 redactions
# check for MSC4932 redactions
to_check = []
events: list[_EventRow] = []
for e in evs:
try:
event = event_dict.get(e)
if not event:
continue
events.append(event)
event_json = json.loads(event.json)
room_id = event_json.get("room_id")
user_id = event_json.get("sender")
to_check.append((room_id, user_id))
except Exception as exc:
raise InvalidEventError(f"Invalid event {event_id}") from exc
event = event_dict.get(e)
if not event:
continue
events.append(event)
event_json = json.loads(event.json)
room_id = event_json.get("room_id")
user_id = event_json.get("sender")
to_check.append((room_id, user_id))
# likely that some of these events may be for the same room/user combo, in
# which case we don't need to do redundant queries

View File

@@ -239,16 +239,6 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
txn.execute("SELECT event_id, should_delete FROM events_to_purge")
event_rows = txn.fetchall()
if len(event_rows) == 0:
logger.info("[purge] no events found to purge")
# For the sake of cleanliness: drop the temp table.
# This will commit the txn in sqlite, so make sure to keep this actually last.
txn.execute("DROP TABLE events_to_purge")
# no referenced state groups
return set()
logger.info(
"[purge] found %i events before cutoff, of which %i can be deleted",
len(event_rows),

View File

@@ -19,6 +19,7 @@
#
import logging
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Collection,
@@ -40,13 +41,19 @@ from synapse.storage.database import (
LoggingTransaction,
make_in_list_sql_clause,
)
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.databases.main.stream import (
generate_next_token,
generate_pagination_bounds,
generate_pagination_where_clause,
)
from synapse.storage.engines import PostgresEngine
from synapse.types import JsonDict, StreamKeyType, StreamToken
from synapse.types import (
JsonDict,
RoomStreamToken,
StreamKeyType,
StreamToken,
)
from synapse.util.caches.descriptors import cached, cachedList
if TYPE_CHECKING:
@@ -88,7 +95,23 @@ class _RelatedEvent:
sender: str
class RelationsWorkerStore(SQLBaseStore):
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ThreadUpdateInfo:
"""
Information about a thread update for the sliding sync threads extension.
Attributes:
event_id: The event ID of the event in the thread.
room_id: The room ID where this thread exists.
stream_ordering: The stream ordering of this event.
"""
event_id: str
room_id: str
stream_ordering: int
class RelationsWorkerStore(EventsWorkerStore, SQLBaseStore):
def __init__(
self,
database: DatabasePool,
@@ -584,14 +607,18 @@ class RelationsWorkerStore(SQLBaseStore):
"get_applicable_edits", _get_applicable_edits_txn
)
edits = await self.get_events(edit_ids.values()) # type: ignore[attr-defined]
edits = await self.get_events(edit_ids.values())
# Map to the original event IDs to the edit events.
#
# There might not be an edit event due to there being no edits or
# due to the event not being known, either case is treated the same.
return {
original_event_id: edits.get(edit_ids.get(original_event_id))
original_event_id: (
edits.get(edit_id)
if (edit_id := edit_ids.get(original_event_id))
else None
)
for original_event_id in event_ids
}
@@ -699,7 +726,7 @@ class RelationsWorkerStore(SQLBaseStore):
"get_thread_summaries", _get_thread_summaries_txn
)
latest_events = await self.get_events(latest_event_ids.values()) # type: ignore[attr-defined]
latest_events = await self.get_events(latest_event_ids.values())
# Map to the event IDs to the thread summary.
#
@@ -1111,6 +1138,148 @@ class RelationsWorkerStore(SQLBaseStore):
"get_related_thread_id", _get_related_thread_id
)
async def get_thread_updates_for_rooms(
self,
*,
room_ids: Collection[str],
from_token: RoomStreamToken | None = None,
to_token: RoomStreamToken | None = None,
limit: int = 5,
exclude_thread_ids: Collection[str] | None = None,
) -> tuple[dict[str, list[ThreadUpdateInfo]], StreamToken | None]:
"""Get a list of updated threads, ordered by stream ordering of their
latest reply, filtered to only include threads in rooms where the user
is currently joined.
Args:
room_ids: The room IDs to fetch thread updates for.
from_token: The lower bound (exclusive) for thread updates. If None,
fetch from the start of the room timeline.
to_token: The upper bound (inclusive) for thread updates. If None,
fetch up to the current position in the room timeline.
limit: Maximum number of thread updates to return.
exclude_thread_ids: Optional collection of thread root event IDs to exclude
from the results. Useful for filtering out threads already visible
in the room timeline.
Returns:
A tuple of:
A dict mapping thread_id to list of ThreadUpdateInfo objects,
ordered by stream_ordering descending (most recent first).
A prev_batch StreamToken (exclusive) if there are more results available,
None otherwise.
"""
# Ensure bad limits aren't being passed in.
assert limit > 0
if len(room_ids) == 0:
return ({}), None
def _get_thread_updates_for_user_txn(
txn: LoggingTransaction,
) -> tuple[list[tuple[str, str, str, int]], int | None]:
room_clause, room_id_values = make_in_list_sql_clause(
txn.database_engine, "e.room_id", room_ids
)
# Generate the pagination clause, if necessary.
pagination_clause = ""
pagination_args: list[str] = []
if from_token:
from_bound = from_token.stream
pagination_clause += " AND stream_ordering > ?"
pagination_args.append(str(from_bound))
if to_token:
to_bound = to_token.stream
pagination_clause += " AND stream_ordering <= ?"
pagination_args.append(str(to_bound))
# Generate the exclusion clause for thread IDs, if necessary.
exclusion_clause = ""
exclusion_args: list[str] = []
if exclude_thread_ids:
exclusion_clause, exclusion_args = make_in_list_sql_clause(
txn.database_engine,
"er.relates_to_id",
exclude_thread_ids,
negative=True,
)
exclusion_clause = f" AND {exclusion_clause}"
# TODO: improve the fact that multiple hits for the same thread means we
# won't get as many overall updates for the sss response
# Find any thread events between the stream ordering bounds.
sql = f"""
SELECT e.event_id, er.relates_to_id, e.room_id, e.stream_ordering
FROM event_relations AS er
INNER JOIN events AS e ON er.event_id = e.event_id
WHERE er.relation_type = '{RelationTypes.THREAD}'
AND {room_clause}
{exclusion_clause}
{pagination_clause}
ORDER BY stream_ordering DESC
LIMIT ?
"""
# Fetch `limit + 1` rows as a way to detect if there are more results beyond
# what we're returning. If we get exactly `limit + 1` rows back, we know there
# are more results available and we can set `next_token`. We only return the
# first `limit` rows to the caller. This avoids needing a separate COUNT query.
txn.execute(
sql,
(
*room_id_values,
*exclusion_args,
*pagination_args,
limit + 1,
),
)
# SQL returns: event_id, thread_id, room_id, stream_ordering
rows = cast(list[tuple[str, str, str, int]], txn.fetchall())
# If there are more events, generate the next pagination key from the
# last thread which will be returned.
next_token = None
if len(rows) > limit:
# Set the next_token to be the second last row in the result set since
# that will be the last row we return from this function.
# This works as an exclusive bound that can be backpaginated from.
# Use the stream_ordering field (index 2 in original rows)
next_token = rows[-2][3]
return rows[:limit], next_token
thread_infos, next_token_int = await self.db_pool.runInteraction(
"get_thread_updates_for_user", _get_thread_updates_for_user_txn
)
# Convert the next_token int (stream ordering) to a StreamToken.
# Use StreamToken.START as base (all other streams at 0) since only room
# position matters.
# Subtract 1 to make it exclusive - the client can paginate from this point without
# receiving the last thread update that was already returned.
next_token = None
if next_token_int is not None:
next_token = StreamToken.START.copy_and_replace(
StreamKeyType.ROOM, RoomStreamToken(stream=next_token_int - 1)
)
# Build ThreadUpdateInfo objects.
thread_update_infos: dict[str, list[ThreadUpdateInfo]] = defaultdict(list)
for event_id, thread_id, room_id, stream_ordering in thread_infos:
thread_update_infos[thread_id].append(
ThreadUpdateInfo(
event_id=event_id,
room_id=room_id,
stream_ordering=stream_ordering,
)
)
return (thread_update_infos, next_token)
class RelationsStore(RelationsWorkerStore):
pass

View File

@@ -99,8 +99,8 @@ class PostgresEngine(
allow_unsafe_locale = self.config.get("allow_unsafe_locale", False)
# Are we on a supported PostgreSQL version?
if not allow_outdated_version and self._version < 140000:
raise RuntimeError("Synapse requires PostgreSQL 14 or above.")
if not allow_outdated_version and self._version < 130000:
raise RuntimeError("Synapse requires PostgreSQL 13 or above.")
with db_conn.cursor() as txn:
txn.execute("SHOW SERVER_ENCODING")

View File

@@ -19,7 +19,7 @@
#
#
SCHEMA_VERSION = 93 # remember to update the list below when updating
SCHEMA_VERSION = 92 # remember to update the list below when updating
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
@@ -168,15 +168,11 @@ Changes in SCHEMA_VERSION = 91
Changes in SCHEMA_VERSION = 92
- Cleaned up a trigger that was added in #18260 and then reverted.
Changes in SCHEMA_VERSION = 93
- MSC4140: Set delayed events to be uniquely identifiable by their delay ID.
"""
SCHEMA_COMPAT_VERSION = (
# Transitive links are no longer written to `event_auth_chain_links`
# TODO: On the next compat bump, update the primary key of `delayed_events`
84
)
"""Limit on how far the synapse codebase can be rolled back without breaking db compat

View File

@@ -0,0 +1,33 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2025 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- Add indexes to improve performance of the thread_updates endpoint and
-- sliding sync threads extension (MSC4360).
-- Index for efficiently finding all events that relate to a specific event
-- (e.g., all replies to a thread root). This is used by the correlated subquery
-- in get_thread_updates_for_user that counts thread updates.
-- Also useful for other relation queries (edits, reactions, etc.).
CREATE INDEX IF NOT EXISTS event_relations_relates_to_id_type
ON event_relations(relates_to_id, relation_type);
-- Index for the /thread_updates endpoint's cross-room query.
-- Allows efficient descending ordering and range filtering of threads
-- by stream_ordering across all rooms.
CREATE INDEX IF NOT EXISTS threads_stream_ordering_desc
ON threads(stream_ordering DESC);
-- Index for the EXISTS clause that filters threads to only joined rooms.
-- Allows efficient lookup of a user's current room memberships.
CREATE INDEX IF NOT EXISTS local_current_membership_user_room
ON local_current_membership(user_id, membership, room_id);

View File

@@ -1,15 +0,0 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2025 Element Creations, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(9301, 'delayed_events_idx', '{}');

View File

@@ -175,8 +175,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
Uses a Postgres sequence to coordinate ID assignment, but positions of other
writers will only get updated when `advance` is called (by replication).
On SQLite, falls back to a single-writer implementation, which is fine because
Synapse only supports monolith mode when SQLite is the database driver.
Note: Only works with Postgres.
Warning: Streams using this generator start at ID 2, because ID 1 is always assumed
to have been 'seen as persisted'.
@@ -537,16 +536,6 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
def get_next_txn(self, txn: LoggingTransaction) -> int:
"""
Generate an ID for immediate use within a database transaction.
The ID will automatically be marked as finished at the end of the
database transaction, therefore the stream rows MUST be persisted
within the active transaction (MUST NOT be persisted in a later
transaction).
The replication notifier will automatically be notified when the
transaction ends successfully.
Usage:
stream_id = stream_id_gen.get_next_txn(txn)
@@ -584,16 +573,6 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
def get_next_mult_txn(self, txn: LoggingTransaction, n: int) -> list[int]:
"""
Generate multiple IDs for immediate use within a database transaction.
The IDs will automatically be marked as finished at the end of the
database transaction, therefore the stream rows MUST be persisted
within the active transaction (MUST NOT be persisted in a later
transaction).
The replication notifier will automatically be notified when the
transaction ends successfully.
Usage:
stream_id = stream_id_gen.get_next_txn(txn)

View File

@@ -35,6 +35,32 @@ logger = logging.getLogger(__name__)
MAX_LIMIT = 1000
def extract_stream_token_from_pagination_token(token_str: str) -> str:
"""
Extract the StreamToken portion from a pagination token string.
Handles both:
- StreamToken format: "s123_456_..."
- SlidingSyncStreamToken format: "5/s123_456_..." (extracts part after /)
This allows clients using sliding sync to use their pos tokens
with endpoints like /relations and /messages.
Args:
token_str: The token string to parse
Returns:
The StreamToken portion of the token
"""
if "/" in token_str:
# SlidingSyncStreamToken format: "connection_position/stream_token"
# Split and return just the stream_token part
parts = token_str.split("/", 1)
if len(parts) == 2:
return parts[1]
return token_str
@attr.s(slots=True, auto_attribs=True)
class PaginationConfig:
"""A configuration object which stores pagination parameters."""
@@ -62,14 +88,20 @@ class PaginationConfig:
if from_tok_str == "END":
from_tok = None # For backwards compat.
elif from_tok_str:
from_tok = await StreamToken.from_string(store, from_tok_str)
stream_token_str = extract_stream_token_from_pagination_token(
from_tok_str
)
from_tok = await StreamToken.from_string(store, stream_token_str)
except Exception:
raise SynapseError(400, "'from' parameter is invalid")
try:
to_tok = None
if to_tok_str:
to_tok = await StreamToken.from_string(store, to_tok_str)
stream_token_str = extract_stream_token_from_pagination_token(
to_tok_str
)
to_tok = await StreamToken.from_string(store, stream_token_str)
except Exception:
raise SynapseError(400, "'to' parameter is invalid")

View File

@@ -35,6 +35,10 @@ from pydantic import ConfigDict
from synapse.api.constants import EventTypes
from synapse.events import EventBase
if TYPE_CHECKING:
from synapse.handlers.relations import BundledAggregations, ThreadUpdate
from synapse.types import (
DeviceListUpdates,
JsonDict,
@@ -388,12 +392,37 @@ class SlidingSyncResult:
or bool(self.prev_batch)
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ThreadsExtension:
"""The Threads extension (MSC4360)
Provides thread updates for threads that have new activity across all of the
user's joined rooms within the sync window.
Attributes:
updates: A nested mapping of room_id -> thread_root_id -> ThreadUpdate.
Each ThreadUpdate contains information about a thread that has new activity,
including the thread root event (if requested) and a pagination token
for fetching older events in that specific thread.
prev_batch: A pagination token for fetching more thread updates across all rooms.
If present, indicates there are more thread updates available beyond what
was returned in this response. This token can be used with a future request
to paginate through older thread updates.
"""
updates: Mapping[str, Mapping[str, "ThreadUpdate"]] | None
prev_batch: StreamToken | None
def __bool__(self) -> bool:
return bool(self.updates) or bool(self.prev_batch)
to_device: ToDeviceExtension | None = None
e2ee: E2eeExtension | None = None
account_data: AccountDataExtension | None = None
receipts: ReceiptsExtension | None = None
typing: TypingExtension | None = None
thread_subscriptions: ThreadSubscriptionsExtension | None = None
threads: ThreadsExtension | None = None
def __bool__(self) -> bool:
return bool(
@@ -403,6 +432,7 @@ class SlidingSyncResult:
or self.receipts
or self.typing
or self.thread_subscriptions
or self.threads
)
next_pos: SlidingSyncStreamToken
@@ -852,6 +882,7 @@ class PerConnectionState:
Attributes:
rooms: The status of each room for the events stream.
receipts: The status of each room for the receipts stream.
account_data: The status of each room for the account data stream.
room_configs: Map from room_id to the `RoomSyncConfig` of all
rooms that we have previously sent down.
"""

View File

@@ -383,6 +383,19 @@ class SlidingSyncBody(RequestBodyModel):
enabled: StrictBool | None = False
limit: StrictInt = 100
class ThreadsExtension(RequestBodyModel):
"""The Threads extension (MSC4360)
Attributes:
enabled: Whether the threads extension is enabled.
include_roots: whether to include thread root events in the extension response.
limit: maximum number of thread updates to return across all joined rooms.
"""
enabled: StrictBool | None = False
include_roots: StrictBool = False
limit: StrictInt = 100
to_device: ToDeviceExtension | None = None
e2ee: E2eeExtension | None = None
account_data: AccountDataExtension | None = None
@@ -391,6 +404,9 @@ class SlidingSyncBody(RequestBodyModel):
thread_subscriptions: ThreadSubscriptionsExtension | None = Field(
None, alias="io.element.msc4308.thread_subscriptions"
)
threads: ThreadsExtension | None = Field(
None, alias="io.element.msc4360.threads"
)
conn_id: StrictStr | None = None
lists: (

View File

@@ -340,6 +340,7 @@ T3 = TypeVar("T3")
T4 = TypeVar("T4")
T5 = TypeVar("T5")
T6 = TypeVar("T6")
T7 = TypeVar("T7")
@overload
@@ -469,6 +470,30 @@ async def gather_optional_coroutines(
) -> tuple[T1 | None, T2 | None, T3 | None, T4 | None, T5 | None, T6 | None]: ...
@overload
async def gather_optional_coroutines(
*coroutines: Unpack[
tuple[
Coroutine[Any, Any, T1] | None,
Coroutine[Any, Any, T2] | None,
Coroutine[Any, Any, T3] | None,
Coroutine[Any, Any, T4] | None,
Coroutine[Any, Any, T5] | None,
Coroutine[Any, Any, T6] | None,
Coroutine[Any, Any, T7] | None,
]
],
) -> tuple[
T1 | None,
T2 | None,
T3 | None,
T4 | None,
T5 | None,
T6 | None,
T7 | None,
]: ...
async def gather_optional_coroutines(
*coroutines: Unpack[tuple[Coroutine[Any, Any, T1] | None, ...]],
) -> tuple[T1 | None, ...]:
@@ -813,8 +838,7 @@ def timeout_deferred(
# will have errbacked new_d, but in case it hasn't, errback it now.
if not new_d.called:
with PreserveLoggingContext():
new_d.errback(defer.TimeoutError("Timed out after %gs" % (timeout,)))
new_d.errback(defer.TimeoutError("Timed out after %gs" % (timeout,)))
# We don't track these calls since they are short.
delayed_call = clock.call_later(
@@ -841,13 +865,11 @@ def timeout_deferred(
def success_cb(val: _T) -> None:
if not new_d.called:
with PreserveLoggingContext():
new_d.callback(val)
new_d.callback(val)
def failure_cb(val: Failure) -> None:
if not new_d.called:
with PreserveLoggingContext():
new_d.errback(val)
new_d.errback(val)
deferred.addCallbacks(success_cb, failure_cb)
@@ -949,8 +971,7 @@ def delay_cancellation(awaitable: Awaitable[T]) -> Awaitable[T]:
# propagating. we then `unpause` it once the wrapped deferred completes, to
# propagate the exception.
new_deferred.pause()
with PreserveLoggingContext():
new_deferred.errback(Failure(CancelledError()))
new_deferred.errback(Failure(CancelledError()))
deferred.addBoth(lambda _: new_deferred.unpause())
@@ -982,6 +1003,15 @@ class AwakenableSleeper:
"""Sleep for the given number of milliseconds, or return if the given
`name` is explicitly woken up.
"""
# Create a deferred that gets called in N seconds
sleep_deferred: "defer.Deferred[None]" = defer.Deferred()
call = self._clock.call_later(
delay_ms / 1000,
sleep_deferred.callback,
None,
)
# Create a deferred that will get called if `wake` is called with
# the same `name`.
stream_set = self._streams.setdefault(name, set())
@@ -991,14 +1021,13 @@ class AwakenableSleeper:
try:
# Wait for either the delay or for `wake` to be called.
await make_deferred_yieldable(
timeout_deferred(
deferred=stop_cancellation(notify_deferred),
timeout=delay_ms / 1000,
clock=self._clock,
defer.DeferredList(
[sleep_deferred, notify_deferred],
fireOnOneCallback=True,
fireOnOneErrback=True,
consumeErrors=True,
)
)
except defer.TimeoutError:
pass
finally:
# Clean up the state
curr_stream_set = self._streams.get(name)
@@ -1007,6 +1036,10 @@ class AwakenableSleeper:
if len(curr_stream_set) == 0:
self._streams.pop(name)
# Cancel the sleep if we were woken up
if call.active():
call.cancel()
class DeferredEvent:
"""Like threading.Event but for async code"""

View File

@@ -39,7 +39,6 @@ from prometheus_client import Gauge
from twisted.internet import defer
from twisted.python.failure import Failure
from synapse.logging.context import PreserveLoggingContext
from synapse.metrics import SERVER_NAME_LABEL
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.lrucache import LruCache
@@ -515,8 +514,7 @@ class CacheMultipleEntries(CacheEntry[KT, VT]):
cache._completed_callback(value, self, key)
if self._deferred:
with PreserveLoggingContext():
self._deferred.callback(result)
self._deferred.callback(result)
def error_bulk(
self, cache: DeferredCache[KT, VT], keys: Collection[KT], failure: Failure
@@ -526,5 +524,4 @@ class CacheMultipleEntries(CacheEntry[KT, VT]):
cache._error_callback(failure, self, key)
if self._deferred:
with PreserveLoggingContext():
self._deferred.errback(failure)
self._deferred.errback(failure)

View File

@@ -14,7 +14,6 @@
#
import logging
from typing import (
Any,
Callable,
@@ -31,14 +30,10 @@ from twisted.internet.task import LoopingCall
from synapse.logging import context
from synapse.types import ISynapseThreadlessReactor
from synapse.util import log_failure
from synapse.util.stringutils import random_string_insecure_fast
P = ParamSpec("P")
logger = logging.getLogger(__name__)
class Clock:
"""
A Clock wraps a Twisted reactor and provides utilities on top of it.
@@ -69,12 +64,7 @@ class Clock:
"""List of active looping calls"""
self._call_id_to_delayed_call: dict[int, IDelayedCall] = {}
"""
Mapping from unique call ID to delayed call.
For "performance", this only tracks a subset of delayed calls: those created
with `call_later` with `call_later_cancel_on_shutdown=True`.
"""
"""Mapping from unique call ID to delayed call"""
self._is_shutdown = False
"""Whether shutdown has been requested by the HomeServer"""
@@ -163,20 +153,11 @@ class Clock:
**kwargs: P.kwargs,
) -> LoopingCall:
"""Common functionality for `looping_call` and `looping_call_now`"""
instance_id = random_string_insecure_fast(5)
if self._is_shutdown:
raise Exception("Cannot start looping call. Clock has been shutdown")
looping_call_context_string = "looping_call"
if now:
looping_call_context_string = "looping_call_now"
def wrapped_f(*args: P.args, **kwargs: P.kwargs) -> Deferred:
logger.debug(
"%s(%s): Executing callback", looping_call_context_string, instance_id
)
assert context.current_context() is context.SENTINEL_CONTEXT, (
"Expected `looping_call` callback from the reactor to start with the sentinel logcontext "
f"but saw {context.current_context()}. In other words, another task shouldn't have "
@@ -220,17 +201,6 @@ class Clock:
d = call.start(msec / 1000.0, now=now)
d.addErrback(log_failure, "Looping call died", consumeErrors=False)
self._looping_calls.append(call)
logger.debug(
"%s(%s): Scheduled looping call every %sms later",
looping_call_context_string,
instance_id,
msec,
# Find out who is scheduling the call which makes it easy to follow in the
# logs.
stack_info=True,
)
return call
def cancel_all_looping_calls(self, consumeErrors: bool = True) -> None:
@@ -256,7 +226,7 @@ class Clock:
*args: Any,
call_later_cancel_on_shutdown: bool = True,
**kwargs: Any,
) -> "DelayedCallWrapper":
) -> IDelayedCall:
"""Call something later
Note that the function will be called with generic `call_later` logcontext, so
@@ -275,79 +245,74 @@ class Clock:
issue, we can just track all delayed calls.
**kwargs: Key arguments to pass to function.
"""
call_id = self._delayed_call_id
self._delayed_call_id = self._delayed_call_id + 1
if self._is_shutdown:
raise Exception("Cannot start delayed call. Clock has been shutdown")
def wrapped_callback(*args: Any, **kwargs: Any) -> None:
logger.debug("call_later(%s): Executing callback", call_id)
def create_wrapped_callback(
track_for_shutdown_cancellation: bool,
) -> Callable[P, None]:
def wrapped_callback(*args: Any, **kwargs: Any) -> None:
assert context.current_context() is context.SENTINEL_CONTEXT, (
"Expected `call_later` callback from the reactor to start with the sentinel logcontext "
f"but saw {context.current_context()}. In other words, another task shouldn't have "
"leaked their logcontext to us."
)
assert context.current_context() is context.SENTINEL_CONTEXT, (
"Expected `call_later` callback from the reactor to start with the sentinel logcontext "
f"but saw {context.current_context()}. In other words, another task shouldn't have "
"leaked their logcontext to us."
)
# Because this is a callback from the reactor, we will be using the
# `sentinel` log context at this point. We want the function to log with
# some logcontext as we want to know which server the logs came from.
#
# We use `PreserveLoggingContext` to prevent our new `call_later`
# logcontext from finishing as soon as we exit this function, in case `f`
# returns an awaitable/deferred which would continue running and may try to
# restore the `call_later` context when it's done (because it's trying to
# adhere to the Synapse logcontext rules.)
#
# This also ensures that we return to the `sentinel` context when we exit
# this function and yield control back to the reactor to avoid leaking the
# current logcontext to the reactor (which would then get picked up and
# associated with the next thing the reactor does)
try:
with context.PreserveLoggingContext(
context.LoggingContext(
name="call_later", server_name=self._server_name
)
):
# We use `run_in_background` to reset the logcontext after `f` (or the
# awaitable returned by `f`) completes to avoid leaking the current
# logcontext to the reactor
context.run_in_background(callback, *args, **kwargs)
finally:
if track_for_shutdown_cancellation:
# We still want to remove the call from the tracking map. Even if
# the callback raises an exception.
self._call_id_to_delayed_call.pop(call_id)
# Because this is a callback from the reactor, we will be using the
# `sentinel` log context at this point. We want the function to log with
# some logcontext as we want to know which server the logs came from.
#
# We use `PreserveLoggingContext` to prevent our new `call_later`
# logcontext from finishing as soon as we exit this function, in case `f`
# returns an awaitable/deferred which would continue running and may try to
# restore the `call_later` context when it's done (because it's trying to
# adhere to the Synapse logcontext rules.)
#
# This also ensures that we return to the `sentinel` context when we exit
# this function and yield control back to the reactor to avoid leaking the
# current logcontext to the reactor (which would then get picked up and
# associated with the next thing the reactor does)
try:
with context.PreserveLoggingContext(
context.LoggingContext(
name="call_later", server_name=self._server_name
)
):
# We use `run_in_background` to reset the logcontext after `f` (or the
# awaitable returned by `f`) completes to avoid leaking the current
# logcontext to the reactor
context.run_in_background(callback, *args, **kwargs)
finally:
if call_later_cancel_on_shutdown:
# We still want to remove the call from the tracking map. Even if
# the callback raises an exception.
self._call_id_to_delayed_call.pop(call_id)
return wrapped_callback
# We can ignore the lint here since this class is the one location callLater should
# be called.
call = self._reactor.callLater(delay, wrapped_callback, *args, **kwargs) # type: ignore[call-later-not-tracked]
logger.debug(
"call_later(%s): Scheduled call for %ss later (tracked for shutdown: %s)",
call_id,
delay,
call_later_cancel_on_shutdown,
# Find out who is scheduling the call which makes it easy to follow in the
# logs.
stack_info=True,
)
wrapped_call = DelayedCallWrapper(call, call_id, self)
if call_later_cancel_on_shutdown:
self._call_id_to_delayed_call[call_id] = wrapped_call
call_id = self._delayed_call_id
self._delayed_call_id = self._delayed_call_id + 1
return wrapped_call
# We can ignore the lint here since this class is the one location callLater
# should be called.
call = self._reactor.callLater(
delay, create_wrapped_callback(True), *args, **kwargs
) # type: ignore[call-later-not-tracked]
call = DelayedCallWrapper(call, call_id, self)
self._call_id_to_delayed_call[call_id] = call
return call
else:
# We can ignore the lint here since this class is the one location callLater should
# be called.
return self._reactor.callLater(
delay, create_wrapped_callback(False), *args, **kwargs
) # type: ignore[call-later-not-tracked]
def cancel_call_later(
self, wrapped_call: "DelayedCallWrapper", ignore_errs: bool = False
) -> None:
def cancel_call_later(self, timer: IDelayedCall, ignore_errs: bool = False) -> None:
try:
logger.debug(
"cancel_call_later: cancelling scheduled call %s", wrapped_call.call_id
)
wrapped_call.delayed_call.cancel()
timer.cancel()
except Exception:
if not ignore_errs:
raise
@@ -362,11 +327,8 @@ class Clock:
"""
# We make a copy here since calling `cancel()` on a delayed_call
# will result in the call removing itself from the map mid-iteration.
for call_id, call in list(self._call_id_to_delayed_call.items()):
for call in list(self._call_id_to_delayed_call.values()):
try:
logger.debug(
"cancel_all_delayed_calls: cancelling scheduled call %s", call_id
)
call.cancel()
except Exception:
if not ignore_errs:
@@ -390,11 +352,8 @@ class Clock:
*args: Postional arguments to pass to function.
**kwargs: Key arguments to pass to function.
"""
instance_id = random_string_insecure_fast(5)
def wrapped_callback(*args: Any, **kwargs: Any) -> None:
logger.debug("call_when_running(%s): Executing callback", instance_id)
# Since this callback can be invoked immediately if the reactor is already
# running, we can't always assume that we're running in the sentinel
# logcontext (i.e. we can't assert that we're in the sentinel context like
@@ -433,14 +392,6 @@ class Clock:
# callWhenRunning should be called.
self._reactor.callWhenRunning(wrapped_callback, *args, **kwargs) # type: ignore[prefer-synapse-clock-call-when-running]
logger.debug(
"call_when_running(%s): Scheduled call",
instance_id,
# Find out who is scheduling the call which makes it easy to follow in the
# logs.
stack_info=True,
)
def add_system_event_trigger(
self,
phase: str,
@@ -466,16 +417,8 @@ class Clock:
Returns:
an ID that can be used to remove this call with `reactor.removeSystemEventTrigger`.
"""
instance_id = random_string_insecure_fast(5)
def wrapped_callback(*args: Any, **kwargs: Any) -> None:
logger.debug(
"add_system_event_trigger(%s): Executing %s %s callback",
instance_id,
phase,
event_type,
)
assert context.current_context() is context.SENTINEL_CONTEXT, (
"Expected `add_system_event_trigger` callback from the reactor to start with the sentinel logcontext "
f"but saw {context.current_context()}. In other words, another task shouldn't have "
@@ -506,16 +449,6 @@ class Clock:
# logcontext to the reactor
context.run_in_background(callback, *args, **kwargs)
logger.debug(
"add_system_event_trigger(%s) for %s %s",
instance_id,
phase,
event_type,
# Find out who is scheduling the call which makes it easy to follow in the
# logs.
stack_info=True,
)
# We can ignore the lint here since this class is the one location
# `addSystemEventTrigger` should be called.
return self._reactor.addSystemEventTrigger(

View File

@@ -1057,32 +1057,6 @@ class MasAuthDelegation(HomeserverTestCase):
self.assertEqual(self.server.calls, 1)
class MasAuthDelegationWithSubpath(MasAuthDelegation):
"""Test MAS delegation when the MAS server is hosted on a subpath."""
def default_config(self) -> dict[str, Any]:
config = super().default_config()
# Override the endpoint to include a subpath
config["matrix_authentication_service"]["endpoint"] = (
self.server.endpoint + "auth/path/"
)
return config
def test_introspection_endpoint_uses_subpath(self) -> None:
"""Test that the introspection endpoint correctly uses the configured subpath."""
expected_introspection_url = (
self.server.endpoint + "auth/path/oauth2/introspect"
)
self.assertEqual(self._auth._introspection_endpoint, expected_introspection_url)
def test_metadata_url_uses_subpath(self) -> None:
"""Test that the metadata URL correctly uses the configured subpath."""
expected_metadata_url = (
self.server.endpoint + "auth/path/.well-known/openid-configuration"
)
self.assertEqual(self._auth._metadata_url, expected_metadata_url)
@parameterized_class(
("config",),
[

File diff suppressed because it is too large Load Diff

View File

@@ -28,7 +28,6 @@ from synapse.types import JsonDict
from synapse.util.clock import Clock
from tests import unittest
from tests.server import FakeChannel
from tests.unittest import HomeserverTestCase
PATH_PREFIX = "/_matrix/client/unstable/org.matrix.msc4140/delayed_events"
@@ -128,10 +127,6 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(setter_expected, content.get(setter_key), content)
def test_get_delayed_events_auth(self) -> None:
channel = self.make_request("GET", PATH_PREFIX)
self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, channel.result)
@unittest.override_config(
{"rc_delayed_event_mgmt": {"per_second": 0.5, "burst_count": 1}}
)
@@ -159,6 +154,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/",
access_token=self.user1_access_token,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, channel.result)
@@ -166,6 +162,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/abc",
access_token=self.user1_access_token,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
self.assertEqual(
@@ -178,6 +175,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
"POST",
f"{PATH_PREFIX}/abc",
{},
self.user1_access_token,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
self.assertEqual(
@@ -190,6 +188,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
"POST",
f"{PATH_PREFIX}/abc",
{"action": "oops"},
self.user1_access_token,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result)
self.assertEqual(
@@ -197,21 +196,17 @@ class DelayedEventsTestCase(HomeserverTestCase):
channel.json_body["errcode"],
)
@parameterized.expand(
(
(action, action_in_path)
for action in ("cancel", "restart", "send")
for action_in_path in (True, False)
@parameterized.expand(["cancel", "restart", "send"])
def test_update_delayed_event_without_match(self, action: str) -> None:
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/abc",
{"action": action},
self.user1_access_token,
)
)
def test_update_delayed_event_without_match(
self, action: str, action_in_path: bool
) -> None:
channel = self._update_delayed_event("abc", action, action_in_path)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, channel.result)
@parameterized.expand((True, False))
def test_cancel_delayed_state_event(self, action_in_path: bool) -> None:
def test_cancel_delayed_state_event(self) -> None:
state_key = "to_never_send"
setter_key = "setter"
@@ -226,7 +221,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
delay_id = channel.json_body.get("delay_id")
assert delay_id is not None
self.assertIsNotNone(delay_id)
self.reactor.advance(1)
events = self._get_delayed_events()
@@ -241,7 +236,12 @@ class DelayedEventsTestCase(HomeserverTestCase):
expect_code=HTTPStatus.NOT_FOUND,
)
channel = self._update_delayed_event(delay_id, "cancel", action_in_path)
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/{delay_id}",
{"action": "cancel"},
self.user1_access_token,
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertListEqual([], self._get_delayed_events())
@@ -254,11 +254,10 @@ class DelayedEventsTestCase(HomeserverTestCase):
expect_code=HTTPStatus.NOT_FOUND,
)
@parameterized.expand((True, False))
@unittest.override_config(
{"rc_delayed_event_mgmt": {"per_second": 0.5, "burst_count": 1}}
)
def test_cancel_delayed_event_ratelimit(self, action_in_path: bool) -> None:
def test_cancel_delayed_event_ratelimit(self) -> None:
delay_ids = []
for _ in range(2):
channel = self.make_request(
@@ -269,17 +268,38 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
delay_id = channel.json_body.get("delay_id")
assert delay_id is not None
self.assertIsNotNone(delay_id)
delay_ids.append(delay_id)
channel = self._update_delayed_event(delay_ids.pop(0), "cancel", action_in_path)
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/{delay_ids.pop(0)}",
{"action": "cancel"},
self.user1_access_token,
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
channel = self._update_delayed_event(delay_ids.pop(0), "cancel", action_in_path)
args = (
"POST",
f"{PATH_PREFIX}/{delay_ids.pop(0)}",
{"action": "cancel"},
self.user1_access_token,
)
channel = self.make_request(*args)
self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result)
@parameterized.expand((True, False))
def test_send_delayed_state_event(self, action_in_path: bool) -> None:
# Add the current user to the ratelimit overrides, allowing them no ratelimiting.
self.get_success(
self.hs.get_datastores().main.set_ratelimit_for_user(
self.user1_user_id, 0, 0
)
)
# Test that the request isn't ratelimited anymore.
channel = self.make_request(*args)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
def test_send_delayed_state_event(self) -> None:
state_key = "to_send_on_request"
setter_key = "setter"
@@ -294,7 +314,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
delay_id = channel.json_body.get("delay_id")
assert delay_id is not None
self.assertIsNotNone(delay_id)
self.reactor.advance(1)
events = self._get_delayed_events()
@@ -309,7 +329,12 @@ class DelayedEventsTestCase(HomeserverTestCase):
expect_code=HTTPStatus.NOT_FOUND,
)
channel = self._update_delayed_event(delay_id, "send", action_in_path)
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/{delay_id}",
{"action": "send"},
self.user1_access_token,
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertListEqual([], self._get_delayed_events())
content = self.helper.get_state(
@@ -320,9 +345,8 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(setter_expected, content.get(setter_key), content)
@parameterized.expand((True, False))
@unittest.override_config({"rc_message": {"per_second": 2.5, "burst_count": 3}})
def test_send_delayed_event_ratelimit(self, action_in_path: bool) -> None:
@unittest.override_config({"rc_message": {"per_second": 3.5, "burst_count": 4}})
def test_send_delayed_event_ratelimit(self) -> None:
delay_ids = []
for _ in range(2):
channel = self.make_request(
@@ -333,17 +357,38 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
delay_id = channel.json_body.get("delay_id")
assert delay_id is not None
self.assertIsNotNone(delay_id)
delay_ids.append(delay_id)
channel = self._update_delayed_event(delay_ids.pop(0), "send", action_in_path)
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/{delay_ids.pop(0)}",
{"action": "send"},
self.user1_access_token,
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
channel = self._update_delayed_event(delay_ids.pop(0), "send", action_in_path)
args = (
"POST",
f"{PATH_PREFIX}/{delay_ids.pop(0)}",
{"action": "send"},
self.user1_access_token,
)
channel = self.make_request(*args)
self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result)
@parameterized.expand((True, False))
def test_restart_delayed_state_event(self, action_in_path: bool) -> None:
# Add the current user to the ratelimit overrides, allowing them no ratelimiting.
self.get_success(
self.hs.get_datastores().main.set_ratelimit_for_user(
self.user1_user_id, 0, 0
)
)
# Test that the request isn't ratelimited anymore.
channel = self.make_request(*args)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
def test_restart_delayed_state_event(self) -> None:
state_key = "to_send_on_restarted_timeout"
setter_key = "setter"
@@ -358,7 +403,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
delay_id = channel.json_body.get("delay_id")
assert delay_id is not None
self.assertIsNotNone(delay_id)
self.reactor.advance(1)
events = self._get_delayed_events()
@@ -373,7 +418,12 @@ class DelayedEventsTestCase(HomeserverTestCase):
expect_code=HTTPStatus.NOT_FOUND,
)
channel = self._update_delayed_event(delay_id, "restart", action_in_path)
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/{delay_id}",
{"action": "restart"},
self.user1_access_token,
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.reactor.advance(1)
@@ -399,11 +449,10 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(setter_expected, content.get(setter_key), content)
@parameterized.expand((True, False))
@unittest.override_config(
{"rc_delayed_event_mgmt": {"per_second": 0.5, "burst_count": 1}}
)
def test_restart_delayed_event_ratelimit(self, action_in_path: bool) -> None:
def test_restart_delayed_event_ratelimit(self) -> None:
delay_ids = []
for _ in range(2):
channel = self.make_request(
@@ -414,19 +463,37 @@ class DelayedEventsTestCase(HomeserverTestCase):
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
delay_id = channel.json_body.get("delay_id")
assert delay_id is not None
self.assertIsNotNone(delay_id)
delay_ids.append(delay_id)
channel = self._update_delayed_event(
delay_ids.pop(0), "restart", action_in_path
channel = self.make_request(
"POST",
f"{PATH_PREFIX}/{delay_ids.pop(0)}",
{"action": "restart"},
self.user1_access_token,
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
channel = self._update_delayed_event(
delay_ids.pop(0), "restart", action_in_path
args = (
"POST",
f"{PATH_PREFIX}/{delay_ids.pop(0)}",
{"action": "restart"},
self.user1_access_token,
)
channel = self.make_request(*args)
self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result)
# Add the current user to the ratelimit overrides, allowing them no ratelimiting.
self.get_success(
self.hs.get_datastores().main.set_ratelimit_for_user(
self.user1_user_id, 0, 0
)
)
# Test that the request isn't ratelimited anymore.
channel = self.make_request(*args)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
def test_delayed_state_is_not_cancelled_by_new_state_from_same_user(
self,
) -> None:
@@ -531,17 +598,6 @@ class DelayedEventsTestCase(HomeserverTestCase):
return content
def _update_delayed_event(
self, delay_id: str, action: str, action_in_path: bool
) -> FakeChannel:
path = f"{PATH_PREFIX}/{delay_id}"
body = {}
if action_in_path:
path += f"/{action}"
else:
body["action"] = action
return self.make_request("POST", path, body)
def _get_path_for_delayed_state(
room_id: str, event_type: str, state_key: str, delay_ms: int

View File

@@ -0,0 +1,957 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2025 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import RelationTypes
from synapse.rest.client import login, relations, room
from synapse.server import HomeServer
from synapse.types import JsonDict
from synapse.util.clock import Clock
from tests import unittest
logger = logging.getLogger(__name__)
class ThreadUpdatesTestCase(unittest.HomeserverTestCase):
"""
Test the /thread_updates companion endpoint (MSC4360).
"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
relations.register_servlets,
]
def default_config(self) -> JsonDict:
config = super().default_config()
config["experimental_features"] = {"msc4360_enabled": True}
return config
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
def test_no_updates_for_new_user(self) -> None:
"""
Test that a user with no thread updates gets an empty response.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Request thread updates
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b",
content={"include_roots": True},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Assert empty chunk and no next_batch
self.assertEqual(channel.json_body["chunk"], {})
self.assertNotIn("next_batch", channel.json_body)
def test_single_thread_update(self) -> None:
"""
Test that a single thread with one reply appears in the response.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create thread root
thread_root_resp = self.helper.send(room_id, body="Thread root", tok=user1_tok)
thread_root_id = thread_root_resp["event_id"]
# Add reply to thread
self.helper.send_event(
room_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply 1",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread_root_id,
},
},
tok=user1_tok,
)
# Request thread updates
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 200, channel.json_body)
# Assert thread is present
chunk = channel.json_body["chunk"]
self.assertIn(room_id, chunk)
self.assertIn(thread_root_id, chunk[room_id])
# Assert thread root is included
thread_update = chunk[room_id][thread_root_id]
self.assertIn("thread_root", thread_update)
self.assertEqual(thread_update["thread_root"]["event_id"], thread_root_id)
# Assert prev_batch is NOT present (only 1 update - the reply)
self.assertNotIn("prev_batch", thread_update)
def test_multiple_threads_single_room(self) -> None:
"""
Test that multiple threads in the same room are grouped correctly.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create two threads
thread1_root_id = self.helper.send(room_id, body="Thread 1", tok=user1_tok)[
"event_id"
]
thread2_root_id = self.helper.send(room_id, body="Thread 2", tok=user1_tok)[
"event_id"
]
# Add replies to both threads
self.helper.send_event(
room_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply to thread 1",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread1_root_id,
},
},
tok=user1_tok,
)
self.helper.send_event(
room_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply to thread 2",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread2_root_id,
},
},
tok=user1_tok,
)
# Request thread updates
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 200, channel.json_body)
# Assert both threads are in the same room
chunk = channel.json_body["chunk"]
self.assertIn(room_id, chunk)
self.assertEqual(len(chunk), 1, "Should only have one room")
self.assertEqual(len(chunk[room_id]), 2, "Should have two threads")
self.assertIn(thread1_root_id, chunk[room_id])
self.assertIn(thread2_root_id, chunk[room_id])
def test_threads_across_multiple_rooms(self) -> None:
"""
Test that threads from different rooms are grouped by room_id.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_a_id = self.helper.create_room_as(user1_id, tok=user1_tok)
room_b_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create threads in both rooms
thread_a_root_id = self.helper.send(room_a_id, body="Thread A", tok=user1_tok)[
"event_id"
]
thread_b_root_id = self.helper.send(room_b_id, body="Thread B", tok=user1_tok)[
"event_id"
]
# Add replies
self.helper.send_event(
room_a_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply to A",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread_a_root_id,
},
},
tok=user1_tok,
)
self.helper.send_event(
room_b_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply to B",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread_b_root_id,
},
},
tok=user1_tok,
)
# Request thread updates
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 200, channel.json_body)
# Assert both rooms are present with their threads
chunk = channel.json_body["chunk"]
self.assertEqual(len(chunk), 2, "Should have two rooms")
self.assertIn(room_a_id, chunk)
self.assertIn(room_b_id, chunk)
self.assertIn(thread_a_root_id, chunk[room_a_id])
self.assertIn(thread_b_root_id, chunk[room_b_id])
def test_pagination_with_from_token(self) -> None:
"""
Test that pagination works using the next_batch token.
This verifies that multiple calls to /thread_updates return all thread
updates with no duplicates and no gaps.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create many threads (more than default limit)
thread_ids = []
for i in range(5):
thread_root_id = self.helper.send(
room_id, body=f"Thread {i}", tok=user1_tok
)["event_id"]
thread_ids.append(thread_root_id)
# Add reply
self.helper.send_event(
room_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": f"Reply to thread {i}",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread_root_id,
},
},
tok=user1_tok,
)
# Request first page with small limit
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b&limit=2",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 200, channel.json_body)
# Should have 2 threads and a next_batch token
first_page_threads = set(channel.json_body["chunk"][room_id].keys())
self.assertEqual(len(first_page_threads), 2)
self.assertIn("next_batch", channel.json_body)
next_batch = channel.json_body["next_batch"]
# Request second page
channel = self.make_request(
"POST",
f"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b&limit=2&from={next_batch}",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 200, channel.json_body)
second_page_threads = set(channel.json_body["chunk"][room_id].keys())
self.assertEqual(len(second_page_threads), 2)
# Verify no overlap
self.assertEqual(
len(first_page_threads & second_page_threads),
0,
"Pages should not have overlapping threads",
)
# Request third page to get the remaining thread
self.assertIn("next_batch", channel.json_body)
next_batch_2 = channel.json_body["next_batch"]
channel = self.make_request(
"POST",
f"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b&limit=2&from={next_batch_2}",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 200, channel.json_body)
third_page_threads = set(channel.json_body["chunk"][room_id].keys())
self.assertEqual(len(third_page_threads), 1)
# Verify no overlap between any pages
self.assertEqual(len(first_page_threads & third_page_threads), 0)
self.assertEqual(len(second_page_threads & third_page_threads), 0)
# Verify no gaps - all threads should be accounted for across all pages
all_threads = set(thread_ids)
combined_threads = first_page_threads | second_page_threads | third_page_threads
self.assertEqual(
combined_threads,
all_threads,
"Combined pages should include all thread updates with no gaps",
)
def test_invalid_dir_parameter(self) -> None:
"""
Test that forward pagination (dir=f) is rejected with an error.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Request with forward direction should fail
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=f",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 400)
def test_invalid_limit_parameter(self) -> None:
"""
Test that invalid limit values are rejected.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Zero limit should fail
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b&limit=0",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 400)
# Negative limit should fail
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b&limit=-5",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 400)
def test_invalid_pagination_tokens(self) -> None:
"""
Test that invalid from/to tokens are rejected with appropriate errors.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Invalid from token
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b&from=invalid_token",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 400)
# Invalid to token
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b&to=invalid_token",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 400)
def test_to_token_filtering(self) -> None:
"""
Test that the to_token parameter correctly limits pagination to updates
newer than the to_token (since we paginate backwards from newest to oldest).
This also verifies the to_token boundary is exclusive - updates at exactly
the to_token position should not be included (as they were already returned
in a previous response that synced up to that position).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create two thread roots
thread1_root_id = self.helper.send(room_id, body="Thread 1", tok=user1_tok)[
"event_id"
]
thread2_root_id = self.helper.send(room_id, body="Thread 2", tok=user1_tok)[
"event_id"
]
# Send replies to both threads
self.helper.send_event(
room_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply to thread 1",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread1_root_id,
},
},
tok=user1_tok,
)
self.helper.send_event(
room_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply to thread 2",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread2_root_id,
},
},
tok=user1_tok,
)
# Request with limit=1 to get only the latest thread update
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b&limit=1",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 200)
self.assertIn("next_batch", channel.json_body)
# next_batch points to before the update we just received
next_batch = channel.json_body["next_batch"]
first_response_threads = set(channel.json_body["chunk"][room_id].keys())
# Request again with to=next_batch (lower bound for backward pagination) and no
# limit.
# This should get only the same thread updates as before, not the additional
# update.
channel = self.make_request(
"POST",
f"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b&to={next_batch}",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 200)
chunk = channel.json_body["chunk"]
self.assertIn(room_id, chunk)
# Should have exactly one thread update
self.assertEqual(len(chunk[room_id]), 1)
second_response_threads = set(chunk[room_id].keys())
# Verify no overlap - the from parameter boundary should be exclusive
self.assertEqual(
first_response_threads,
second_response_threads,
"to parameter boundary should be exclusive - both responses should be identical",
)
def test_bundled_aggregations_on_thread_roots(self) -> None:
"""
Test that thread root events include bundled aggregations with latest thread event.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create thread root
thread_root_id = self.helper.send(room_id, body="Thread root", tok=user1_tok)[
"event_id"
]
# Send replies to create bundled aggregation data
for i in range(2):
self.helper.send_event(
room_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": f"Reply {i + 1}",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread_root_id,
},
},
tok=user1_tok,
)
# Request thread updates
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 200)
# Check that thread root has bundled aggregations with latest event
chunk = channel.json_body["chunk"]
thread_update = chunk[room_id][thread_root_id]
thread_root_event = thread_update["thread_root"]
# Should have unsigned data with latest thread event content
self.assertIn("unsigned", thread_root_event)
self.assertIn("m.relations", thread_root_event["unsigned"])
relations = thread_root_event["unsigned"]["m.relations"]
self.assertIn(RelationTypes.THREAD, relations)
# Check latest event is present in bundled aggregations
thread_summary = relations[RelationTypes.THREAD]
self.assertIn("latest_event", thread_summary)
latest_event = thread_summary["latest_event"]
self.assertEqual(latest_event["content"]["body"], "Reply 2")
def test_only_joined_rooms(self) -> None:
"""
Test that thread updates only include rooms where the user is currently joined.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
# Create two rooms, user1 joins both
room1_id = self.helper.create_room_as(user1_id, tok=user1_tok)
room2_id = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room2_id, user1_id, tok=user1_tok)
# Create threads in both rooms
thread1_root_id = self.helper.send(room1_id, body="Thread 1", tok=user1_tok)[
"event_id"
]
thread2_root_id = self.helper.send(room2_id, body="Thread 2", tok=user2_tok)[
"event_id"
]
# Add replies to both threads
self.helper.send_event(
room1_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply to thread 1",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread1_root_id,
},
},
tok=user1_tok,
)
self.helper.send_event(
room2_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply to thread 2",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread2_root_id,
},
},
tok=user2_tok,
)
# User1 leaves room2
self.helper.leave(room2_id, user1_id, tok=user1_tok)
# Request thread updates for user1 - should only get room1
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b",
access_token=user1_tok,
content={"include_roots": True},
)
self.assertEqual(channel.code, 200)
chunk = channel.json_body["chunk"]
# Should only have room1, not room2
self.assertIn(room1_id, chunk)
self.assertNotIn(room2_id, chunk)
self.assertIn(thread1_root_id, chunk[room1_id])
def test_room_filtering_with_lists(self) -> None:
"""
Test that room filtering works correctly using the lists parameter.
This verifies that thread updates are only returned for rooms matching
the provided filters.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create an encrypted room and an unencrypted room
encrypted_room_id = self.helper.create_room_as(
user1_id,
tok=user1_tok,
extra_content={
"initial_state": [
{
"type": "m.room.encryption",
"state_key": "",
"content": {"algorithm": "m.megolm.v1.aes-sha2"},
}
]
},
)
unencrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create threads in both rooms
enc_thread_root_id = self.helper.send(
encrypted_room_id, body="Encrypted thread", tok=user1_tok
)["event_id"]
unenc_thread_root_id = self.helper.send(
unencrypted_room_id, body="Unencrypted thread", tok=user1_tok
)["event_id"]
# Add replies to both threads
self.helper.send_event(
encrypted_room_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply in encrypted room",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": enc_thread_root_id,
},
},
tok=user1_tok,
)
self.helper.send_event(
unencrypted_room_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply in unencrypted room",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": unenc_thread_root_id,
},
},
tok=user1_tok,
)
# Request thread updates with filter for encrypted rooms only
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b",
access_token=user1_tok,
content={
"lists": {
"encrypted_list": {
"ranges": [[0, 99]],
"required_state": [["m.room.encryption", ""]],
"timeline_limit": 10,
"filters": {"is_encrypted": True},
}
}
},
)
self.assertEqual(channel.code, 200, channel.json_body)
chunk = channel.json_body["chunk"]
# Should only include the encrypted room
self.assertIn(encrypted_room_id, chunk)
self.assertNotIn(unencrypted_room_id, chunk)
self.assertIn(enc_thread_root_id, chunk[encrypted_room_id])
def test_room_filtering_with_room_subscriptions(self) -> None:
"""
Test that room filtering works correctly using the room_subscriptions parameter.
This verifies that thread updates are only returned for explicitly subscribed rooms.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create three rooms
room1_id = self.helper.create_room_as(user1_id, tok=user1_tok)
room2_id = self.helper.create_room_as(user1_id, tok=user1_tok)
room3_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create threads in all three rooms
thread1_root_id = self.helper.send(room1_id, body="Thread 1", tok=user1_tok)[
"event_id"
]
thread2_root_id = self.helper.send(room2_id, body="Thread 2", tok=user1_tok)[
"event_id"
]
thread3_root_id = self.helper.send(room3_id, body="Thread 3", tok=user1_tok)[
"event_id"
]
# Add replies to all threads
for room_id, thread_root_id in [
(room1_id, thread1_root_id),
(room2_id, thread2_root_id),
(room3_id, thread3_root_id),
]:
self.helper.send_event(
room_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread_root_id,
},
},
tok=user1_tok,
)
# Request thread updates with subscription to only room1 and room2
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b",
access_token=user1_tok,
content={
"room_subscriptions": {
room1_id: {
"required_state": [["m.room.name", ""]],
"timeline_limit": 10,
},
room2_id: {
"required_state": [["m.room.name", ""]],
"timeline_limit": 10,
},
}
},
)
self.assertEqual(channel.code, 200, channel.json_body)
chunk = channel.json_body["chunk"]
# Should only include room1 and room2, not room3
self.assertIn(room1_id, chunk)
self.assertIn(room2_id, chunk)
self.assertNotIn(room3_id, chunk)
self.assertIn(thread1_root_id, chunk[room1_id])
self.assertIn(thread2_root_id, chunk[room2_id])
def test_room_filtering_with_lists_and_room_subscriptions(self) -> None:
"""
Test that room filtering works correctly when both lists and room_subscriptions
are provided. The union of rooms from both should be included.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create an encrypted room and two unencrypted rooms
encrypted_room_id = self.helper.create_room_as(
user1_id,
tok=user1_tok,
extra_content={
"initial_state": [
{
"type": "m.room.encryption",
"state_key": "",
"content": {"algorithm": "m.megolm.v1.aes-sha2"},
}
]
},
)
unencrypted_room1_id = self.helper.create_room_as(user1_id, tok=user1_tok)
unencrypted_room2_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# Create threads in all three rooms
enc_thread_root_id = self.helper.send(
encrypted_room_id, body="Encrypted thread", tok=user1_tok
)["event_id"]
unenc1_thread_root_id = self.helper.send(
unencrypted_room1_id, body="Unencrypted thread 1", tok=user1_tok
)["event_id"]
unenc2_thread_root_id = self.helper.send(
unencrypted_room2_id, body="Unencrypted thread 2", tok=user1_tok
)["event_id"]
# Add replies to all threads
for room_id, thread_root_id in [
(encrypted_room_id, enc_thread_root_id),
(unencrypted_room1_id, unenc1_thread_root_id),
(unencrypted_room2_id, unenc2_thread_root_id),
]:
self.helper.send_event(
room_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread_root_id,
},
},
tok=user1_tok,
)
# Request thread updates with:
# - lists: filter for encrypted rooms
# - room_subscriptions: explicitly subscribe to unencrypted_room1_id
# Expected: should get both encrypted_room_id (from list) and unencrypted_room1_id
# (from subscription), but NOT unencrypted_room2_id
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b",
access_token=user1_tok,
content={
"lists": {
"encrypted_list": {
"ranges": [[0, 99]],
"required_state": [["m.room.encryption", ""]],
"timeline_limit": 10,
"filters": {"is_encrypted": True},
}
},
"room_subscriptions": {
unencrypted_room1_id: {
"required_state": [["m.room.name", ""]],
"timeline_limit": 10,
}
},
},
)
self.assertEqual(channel.code, 200, channel.json_body)
chunk = channel.json_body["chunk"]
# Should include encrypted_room_id (from list filter) and unencrypted_room1_id
# (from subscription), but not unencrypted_room2_id
self.assertIn(encrypted_room_id, chunk)
self.assertIn(unencrypted_room1_id, chunk)
self.assertNotIn(unencrypted_room2_id, chunk)
self.assertIn(enc_thread_root_id, chunk[encrypted_room_id])
self.assertIn(unenc1_thread_root_id, chunk[unencrypted_room1_id])
def test_threads_not_returned_after_leaving_room(self) -> None:
"""
Test that thread updates are properly bounded when a user leaves a room.
Users should see thread updates that occurred up to the point they left,
but NOT updates that occurred after they left.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
# Create room and both users join
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
self.helper.join(room_id, user2_id, tok=user2_tok)
# Create thread
res = self.helper.send(room_id, body="Thread root", tok=user1_tok)
thread_root = res["event_id"]
# Reply in thread while user2 is joined
self.helper.send_event(
room_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply 1 while user2 joined",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread_root,
},
},
tok=user1_tok,
)
# User2 leaves the room
self.helper.leave(room_id, user2_id, tok=user2_tok)
# Another reply after user2 left
self.helper.send_event(
room_id,
type="m.room.message",
content={
"msgtype": "m.text",
"body": "Reply 2 after user2 left",
"m.relates_to": {
"rel_type": RelationTypes.THREAD,
"event_id": thread_root,
},
},
tok=user1_tok,
)
# User2 gets thread updates with an explicit room subscription
# (We need to explicitly subscribe to the room to include it after leaving;
# otherwise only joined rooms are returned)
channel = self.make_request(
"POST",
"/_matrix/client/unstable/io.element.msc4360/thread_updates?dir=b&limit=100",
{
"room_subscriptions": {
room_id: {
"required_state": [],
"timeline_limit": 0,
}
}
},
access_token=user2_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Assert: User2 SHOULD see Reply 1 (happened while joined) but NOT Reply 2 (after leaving)
chunk = channel.json_body["chunk"]
self.assertIn(
room_id,
chunk,
"Thread updates should include the room user2 left",
)
self.assertIn(
thread_root,
chunk[room_id],
"Thread root should be in the updates",
)
# Verify that only a single update was seen (Reply 1) by checking that there's
# no prev_batch token. If Reply 2 was also included, there would be multiple
# updates and a prev_batch token would be present.
thread_update = chunk[room_id][thread_root]
self.assertNotIn(
"prev_batch",
thread_update,
"No prev_batch should be present since only one update (Reply 1) is visible",
)