Compare commits

..

4 Commits

Author SHA1 Message Date
Eric Eastwood
7e1a2f5a96 Better label lowercase to avoid confusion 2024-11-01 11:40:04 -05:00
Eric Eastwood
150ce230cb Harden test 2024-11-01 11:35:55 -05:00
Eric Eastwood
842304df57 Add changelog 2024-11-01 11:30:34 -05:00
Eric Eastwood
d29cb8abed Remove usage on internal header encoding API 2024-11-01 11:24:22 -05:00
171 changed files with 1288 additions and 5020 deletions

View File

@@ -36,11 +36,11 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
#
# For PRs, we only run each type of test with the oldest Python version supported (which
# is Python 3.9 right now)
# is Python 3.8 right now)
trial_sqlite_tests = [
{
"python-version": "3.9",
"python-version": "3.8",
"database": "sqlite",
"extras": "all",
}
@@ -53,12 +53,12 @@ if not IS_PR:
"database": "sqlite",
"extras": "all",
}
for version in ("3.10", "3.11", "3.12", "3.13")
for version in ("3.9", "3.10", "3.11", "3.12", "3.13")
)
trial_postgres_tests = [
{
"python-version": "3.9",
"python-version": "3.8",
"database": "postgres",
"postgres-version": "11",
"extras": "all",
@@ -77,7 +77,7 @@ if not IS_PR:
trial_no_extra_tests = [
{
"python-version": "3.9",
"python-version": "3.8",
"database": "sqlite",
"extras": "",
}
@@ -99,24 +99,24 @@ set_output("trial_test_matrix", test_matrix)
# First calculate the various sytest jobs.
#
# For each type of test we only run on bullseye on PRs
# For each type of test we only run on focal on PRs
sytest_tests = [
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
},
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"postgres": "postgres",
},
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"postgres": "multi-postgres",
"workers": "workers",
},
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"postgres": "multi-postgres",
"workers": "workers",
"reactor": "asyncio",
@@ -127,11 +127,11 @@ if not IS_PR:
sytest_tests.extend(
[
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"reactor": "asyncio",
},
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"postgres": "postgres",
"reactor": "asyncio",
},

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env bash
# this script is run by GitHub Actions in a plain `jammy` container; it
# this script is run by GitHub Actions in a plain `focal` container; it
# - installs the minimal system requirements, and poetry;
# - patches the project definition file to refer to old versions only;
# - creates a venv with these old versions using poetry; and finally

View File

@@ -132,9 +132,9 @@ jobs:
fail-fast: false
matrix:
include:
- sytest-tag: bullseye
- sytest-tag: focal
- sytest-tag: bullseye
- sytest-tag: focal
postgres: postgres
workers: workers
redis: redis

View File

@@ -91,19 +91,10 @@ jobs:
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Artifact name
id: artifact-name
# We can't have colons in the upload name of the artifact, so we convert
# e.g. `debian:sid` to `sid`.
env:
DISTRO: ${{ matrix.distro }}
run: |
echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT"
- name: Upload debs as artifacts
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
with:
name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
name: debs
path: debs/*
build-wheels:
@@ -111,7 +102,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-22.04, macos-13]
os: [ubuntu-20.04, macos-12]
arch: [x86_64, aarch64]
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
# It is not read by the rest of the workflow.
@@ -121,9 +112,9 @@ jobs:
exclude:
# Don't build macos wheels on PR CI.
- is_pr: true
os: "macos-13"
os: "macos-12"
# Don't build aarch64 wheels on mac.
- os: "macos-13"
- os: "macos-12"
arch: aarch64
# Don't build aarch64 wheels on PR CI.
- is_pr: true
@@ -153,7 +144,7 @@ jobs:
- name: Only build a single wheel on PR
if: startsWith(github.ref, 'refs/pull/')
run: echo "CIBW_BUILD="cp39-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
@@ -165,9 +156,9 @@ jobs:
CARGO_NET_GIT_FETCH_WITH_CLI: true
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
with:
name: Wheel-${{ matrix.os }}-${{ matrix.arch }}
name: Wheel
path: ./wheelhouse/*.whl
build-sdist:
@@ -186,7 +177,7 @@ jobs:
- name: Build sdist
run: python -m build --sdist
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
with:
name: Sdist
path: dist/*.tar.gz
@@ -203,21 +194,19 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v3 # Don't upgrade to v4, it should match upload-artifact
- name: Build a tarball for the debs
# We need to merge all the debs uploads into one folder, then compress
# that.
run: |
mkdir debs
mv debs*/* debs/
tar -cvJf debs.tar.xz debs
run: tar -cvJf debs.tar.xz debs
- name: Attach to release
# Pinned to work around https://github.com/softprops/action-gh-release/issues/445
uses: softprops/action-gh-release@v0.1.15
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
files: |
Sdist/*
Wheel*/*
Wheel/*
debs.tar.xz
# if it's not already published, keep the release as a draft.
draft: true
# mark it as a prerelease if the tag contains 'rc'.
prerelease: ${{ contains(github.ref, 'rc') }}

View File

@@ -397,7 +397,7 @@ jobs:
needs:
- linting-done
- changes
runs-on: ubuntu-22.04
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
@@ -409,12 +409,12 @@ jobs:
# their build dependencies
- run: |
sudo apt-get -qq update
sudo apt-get -qq install build-essential libffi-dev python3-dev \
sudo apt-get -qq install build-essential libffi-dev python-dev \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
- uses: actions/setup-python@v5
with:
python-version: '3.9'
python-version: '3.8'
- name: Prepare old deps
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
@@ -458,7 +458,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.9"]
python-version: ["pypy-3.8"]
extras: ["all"]
steps:
@@ -580,11 +580,11 @@ jobs:
strategy:
matrix:
include:
- python-version: "3.9"
- python-version: "3.8"
postgres-version: "11"
- python-version: "3.13"
postgres-version: "17"
- python-version: "3.11"
postgres-version: "15"
services:
postgres:

View File

@@ -99,11 +99,11 @@ jobs:
if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
container:
# We're using debian:bullseye because it uses Python 3.9 which is our minimum supported Python version.
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
image: matrixdotorg/sytest-synapse:bullseye
image: matrixdotorg/sytest-synapse:focal
volumes:
- ${{ github.workspace }}:/src

View File

@@ -1,221 +1,3 @@
# Synapse 1.121.0rc1 (2024-12-04)
This release candidate contains the security fixes from [v1.120.2](https://github.com/element-hq/synapse/releases/tag/v1.120.2).
New changes listed below.
### Features
- Support for [MSC4190](https://github.com/matrix-org/matrix-spec-proposals/pull/4190): device management for Application Services. ([\#17705](https://github.com/element-hq/synapse/issues/17705))
- Update [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync to include invite, ban, kick, targets when `$LAZY`-loading room members. ([\#17947](https://github.com/element-hq/synapse/issues/17947))
- Use stable `M_USER_LOCKED` error code for locked accounts, as per [Matrix 1.12](https://spec.matrix.org/v1.12/client-server-api/#account-locking). ([\#17965](https://github.com/element-hq/synapse/issues/17965))
- [MSC4076](https://github.com/matrix-org/matrix-spec-proposals/pull/4076): Add `disable_badge_count` to pusher configuration. ([\#17975](https://github.com/element-hq/synapse/issues/17975))
### Bugfixes
- Fix long-standing bug where read receipts could get overly delayed being sent over federation. ([\#17933](https://github.com/element-hq/synapse/issues/17933))
### Improved Documentation
- Add OIDC example configuration for Forgejo (fork of Gitea). ([\#17872](https://github.com/element-hq/synapse/issues/17872))
- Link to element-docker-demo from contrib/docker*. ([\#17953](https://github.com/element-hq/synapse/issues/17953))
### Internal Changes
- [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108): Add a `Content-Type` header on the `PUT` response to work around a faulty behavior in some caching reverse proxies. ([\#17253](https://github.com/element-hq/synapse/issues/17253))
- Fix incorrect comment in new schema delta. ([\#17936](https://github.com/element-hq/synapse/issues/17936))
- Raise setuptools_rust version cap to 1.10.2. ([\#17944](https://github.com/element-hq/synapse/issues/17944))
- Enable encrypted appservice related experimental features in the complement docker image. ([\#17945](https://github.com/element-hq/synapse/issues/17945))
- Return whether the user is suspended when querying the user account in the Admin API. ([\#17952](https://github.com/element-hq/synapse/issues/17952))
- Fix new scheduled tasks jumping the queue. ([\#17962](https://github.com/element-hq/synapse/issues/17962))
- Bump pyo3 and dependencies to v0.23.2. ([\#17966](https://github.com/element-hq/synapse/issues/17966))
- Update setuptools-rust and fix building abi3 wheels in latest version. ([\#17969](https://github.com/element-hq/synapse/issues/17969))
- Consolidate SSO redirects through `/_matrix/client/v3/login/sso/redirect(/{idpId})`. ([\#17972](https://github.com/element-hq/synapse/issues/17972))
- Fix Docker and Complement config to be able to use `public_baseurl`. ([\#17986](https://github.com/element-hq/synapse/issues/17986))
- Fix building wheels for MacOS which was temporarily disabled in Synapse 1.120.2. ([\#17993](https://github.com/element-hq/synapse/issues/17993))
- Fix release process to not create duplicate releases. ([\#17970](https://github.com/element-hq/synapse/issues/17970), [\#17995](https://github.com/element-hq/synapse/issues/17995))
### Updates to locked dependencies
* Bump bytes from 1.8.0 to 1.9.0. ([\#17982](https://github.com/element-hq/synapse/issues/17982))
* Bump pysaml2 from 7.3.1 to 7.5.0. ([\#17978](https://github.com/element-hq/synapse/issues/17978))
* Bump serde_json from 1.0.132 to 1.0.133. ([\#17939](https://github.com/element-hq/synapse/issues/17939))
* Bump tomli from 2.0.2 to 2.1.0. ([\#17959](https://github.com/element-hq/synapse/issues/17959))
* Bump tomli from 2.1.0 to 2.2.1. ([\#17979](https://github.com/element-hq/synapse/issues/17979))
* Bump tornado from 6.4.1 to 6.4.2. ([\#17955](https://github.com/element-hq/synapse/issues/17955))
# Synapse 1.120.2 (2024-12-03)
This version has building of wheels for macOS disabled.
It is functionally identical to 1.120.1, which contains multiple security fixes.
If you are already using 1.120.1, there is no need to upgrade to this version.
# Synapse 1.120.1 (2024-12-03)
This patch release fixes multiple security vulnerabilities, some affecting all prior versions of Synapse. Server administrators are encouraged to update Synapse as soon as possible. We are not aware of these vulnerabilities being exploited in the wild.
Administrators who are unable to update Synapse may use the workarounds described in the linked GitHub Security Advisory below.
### Security advisory
The following issues are fixed in 1.120.1.
- [GHSA-rfq8-j7rh-8hf2](https://github.com/element-hq/synapse/security/advisories/GHSA-rfq8-j7rh-8hf2) / [CVE-2024-52805](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-52805): **Unsupported content types can lead to memory exhaustion**
Synapse instances which have a high `max_upload_size` and which don't have a reverse proxy in front of them that would otherwise limit upload size are affected.
Fixed by [4b7154c58501b4bf5e1c2d6c11ebef96529f2fdf](https://github.com/element-hq/synapse/commit/4b7154c58501b4bf5e1c2d6c11ebef96529f2fdf).
- [GHSA-f3r3-h2mq-hx2h](https://github.com/element-hq/synapse/security/advisories/GHSA-f3r3-h2mq-hx2h) / [CVE-2024-52815](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-52815): **Malicious invites via federation can break a user's sync**
Fixed by [d82e1ed357b7ee21dff83d06cba7a67840cfd464](https://github.com/element-hq/synapse/commit/d82e1ed357b7ee21dff83d06cba7a67840cfd464).
- [GHSA-vp6v-whfm-rv3g](https://github.com/element-hq/synapse/security/advisories/GHSA-vp6v-whfm-rv3g) / [CVE-2024-53863](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-53863): **Synapse can be forced to thumbnail unexpected file formats, invoking potentially untrustworthy decoders**
Synapse instances can disable dynamic thumbnailing by setting `dynamic_thumbnails` to `false` in the configuration file.
Fixed by [b64a4e5fbbbf119b6c65aedf0d999b4237d55503](https://github.com/element-hq/synapse/commit/b64a4e5fbbbf119b6c65aedf0d999b4237d55503).
- [GHSA-56w4-5538-8v8h](https://github.com/element-hq/synapse/security/advisories/GHSA-56w4-5538-8v8h) / [CVE-2024-53867](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-53867): **The Sliding Sync feature on Synapse versions between 1.113.0rc1 and 1.120.0 can leak partial room state changes to users no longer in a room**
Non-state events, like messages, are unaffected.
Synapse instances can disable the Sliding Sync feature by setting `experimental_features.msc3575_enabled` to `false` in the configuration file.
Fixed by [4daa533e82f345ce87b9495d31781af570ba3ead](https://github.com/element-hq/synapse/commit/4daa533e82f345ce87b9495d31781af570ba3ead).
See the advisories for more details. If you have any questions, email [security at element.io](mailto:security@element.io).
### Bugfixes
- Fix release process to not create duplicate releases. ([\#17970](https://github.com/element-hq/synapse/issues/17970))
# Synapse 1.120.0 (2024-11-26)
### Bugfixes
- Fix a bug introduced in Synapse v1.120rc1 which would cause the newly-introduced `delete_old_otks` job to fail in worker-mode deployments. ([\#17960](https://github.com/element-hq/synapse/issues/17960))
# Synapse 1.120.0rc1 (2024-11-20)
This release enables the enforcement of authenticated media by default, with exemptions for media that is already present in the
homeserver's media store.
Most homeservers operating in the public federation will not be impacted by this change, given that
the large homeserver `matrix.org` enabled this in September 2024 and therefore most clients and servers
will already have updated as a result.
Some server administrators may still wish to disable this enforcement for the time being, in the interest of compatibility with older clients
and older federated homeservers.
See the [upgrade notes](https://element-hq.github.io/synapse/v1.120/upgrade.html#authenticated-media-is-now-enforced-by-default) for more information.
### Features
- Enforce authenticated media by default. Administrators can revert this by configuring `enable_authenticated_media` to `false`. In a future release of Synapse, this option will be removed and become always-on. ([\#17889](https://github.com/element-hq/synapse/issues/17889))
- Add a one-off task to delete old One-Time Keys, to guard against us having old OTKs in the database that the client has long forgotten about. ([\#17934](https://github.com/element-hq/synapse/issues/17934))
### Improved Documentation
- Clarify the semantics of the `enable_authenticated_media` configuration option. ([\#17913](https://github.com/element-hq/synapse/issues/17913))
- Add documentation about backing up Synapse. ([\#17931](https://github.com/element-hq/synapse/issues/17931))
### Deprecations and Removals
- Remove support for [MSC3886: Simple client rendezvous capability](https://github.com/matrix-org/matrix-spec-proposals/pull/3886), which has been superseded by [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108) and therefore closed. ([\#17638](https://github.com/element-hq/synapse/issues/17638))
### Internal Changes
- Addressed some typos in docs and returned error message for unknown MXC ID. ([\#17865](https://github.com/element-hq/synapse/issues/17865))
- Unpin the upload release GHA action. ([\#17923](https://github.com/element-hq/synapse/issues/17923))
- Bump macOS version used to build wheels during release, as current version used is end-of-life. ([\#17924](https://github.com/element-hq/synapse/issues/17924))
- Move server event filtering logic to Rust. ([\#17928](https://github.com/element-hq/synapse/issues/17928))
- Support new package name of PyPI package `python-multipart` 0.0.13 so that distro packagers do not need to work around name conflict with PyPI package `multipart`. ([\#17932](https://github.com/element-hq/synapse/issues/17932))
- Speed up slow initial sliding syncs on large servers. ([\#17946](https://github.com/element-hq/synapse/issues/17946))
### Updates to locked dependencies
* Bump anyhow from 1.0.92 to 1.0.93. ([\#17920](https://github.com/element-hq/synapse/issues/17920))
* Bump bleach from 6.1.0 to 6.2.0. ([\#17918](https://github.com/element-hq/synapse/issues/17918))
* Bump immutabledict from 4.2.0 to 4.2.1. ([\#17941](https://github.com/element-hq/synapse/issues/17941))
* Bump packaging from 24.1 to 24.2. ([\#17940](https://github.com/element-hq/synapse/issues/17940))
* Bump phonenumbers from 8.13.49 to 8.13.50. ([\#17942](https://github.com/element-hq/synapse/issues/17942))
* Bump pygithub from 2.4.0 to 2.5.0. ([\#17917](https://github.com/element-hq/synapse/issues/17917))
* Bump ruff from 0.7.2 to 0.7.3. ([\#17919](https://github.com/element-hq/synapse/issues/17919))
* Bump serde from 1.0.214 to 1.0.215. ([\#17938](https://github.com/element-hq/synapse/issues/17938))
# Synapse 1.119.0 (2024-11-13)
No significant changes since 1.119.0rc2.
### Python 3.8 support dropped
Python 3.8 is [end-of-life](https://devguide.python.org/versions/) and is no longer supported by Synapse. The minimum supported Python version is now 3.9.
If you are running Synapse with Python 3.8, please upgrade to Python 3.9 (or greater) before upgrading Synapse.
# Synapse 1.119.0rc2 (2024-11-11)
Note that due to packaging issues there was no v1.119.0rc1.
### Features
- Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API. ([\#17374](https://github.com/element-hq/synapse/issues/17374))
- Add experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2). ([\#17888](https://github.com/element-hq/synapse/issues/17888))
### Bugfixes
- Fix bug with sliding sync where `$LAZY`-loading room members would not return `required_state` membership in incremental syncs. ([\#17809](https://github.com/element-hq/synapse/issues/17809))
- Check if user has membership in a room before tagging it. Contributed by Lama Alosaimi. ([\#17839](https://github.com/element-hq/synapse/issues/17839))
- Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in
the config option `run_background_tasks_on`. ([\#17847](https://github.com/element-hq/synapse/issues/17847))
- Fix bug where some presence and typing timeouts can expire early. ([\#17850](https://github.com/element-hq/synapse/issues/17850))
- Fix detection when the built Rust library was outdated when using source installations. ([\#17861](https://github.com/element-hq/synapse/issues/17861))
- Fix a long-standing bug in Synapse which could cause one-time keys to be issued in the incorrect order, causing message decryption failures. ([\#17903](https://github.com/element-hq/synapse/pull/17903))
- Fix experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2) where we would return the full state on incremental syncs when using lazy loaded members and there were no new events in the timeline. ([\#17915](https://github.com/element-hq/synapse/pull/17915))
### Internal Changes
- Remove support for python 3.8. ([\#17908](https://github.com/element-hq/synapse/issues/17908))
- Add a test for downloading and thumbnailing a CMYK JPEG. ([\#17786](https://github.com/element-hq/synapse/issues/17786))
- Refactor database calls to remove `Generator` usage. ([\#17813](https://github.com/element-hq/synapse/issues/17813), [\#17814](https://github.com/element-hq/synapse/issues/17814), [\#17815](https://github.com/element-hq/synapse/issues/17815), [\#17816](https://github.com/element-hq/synapse/issues/17816), [\#17817](https://github.com/element-hq/synapse/issues/17817), [\#17818](https://github.com/element-hq/synapse/issues/17818), [\#17890](https://github.com/element-hq/synapse/issues/17890))
- Include the destination in the error of 'Destination mismatch' on federation requests. ([\#17830](https://github.com/element-hq/synapse/issues/17830))
- The nix flake inside the repository no longer tracks nixpkgs/master to not catch the latest bugs from a PR merged 5 minutes ago. ([\#17852](https://github.com/element-hq/synapse/issues/17852))
- Minor speed-up of sliding sync by computing extensions results in parallel. ([\#17884](https://github.com/element-hq/synapse/issues/17884))
- Bump the default Python version in the Synapse Dockerfile from 3.11 -> 3.12. ([\#17887](https://github.com/element-hq/synapse/issues/17887))
- Remove usage of internal header encoding API. ([\#17894](https://github.com/element-hq/synapse/issues/17894))
- Use unique name for each os.arch variant when uploading Wheel artifacts. ([\#17905](https://github.com/element-hq/synapse/issues/17905))
- Fix tests to run with latest Twisted. ([\#17906](https://github.com/element-hq/synapse/pull/17906), [\#17907](https://github.com/element-hq/synapse/pull/17907), [\#17911](https://github.com/element-hq/synapse/pull/17911))
- Update version constraint to allow the latest poetry-core 1.9.1. ([\#17902](https://github.com/element-hq/synapse/pull/17902))
- Update the portdb CI to use Python 3.13 and Postgres 17 as latest dependencies. ([\#17909](https://github.com/element-hq/synapse/pull/17909))
- Add an index to `current_state_delta_stream` table. ([\#17912](https://github.com/element-hq/synapse/issues/17912))
- Fix building and attaching release artifacts during the release process. ([\#17921](https://github.com/element-hq/synapse/issues/17921))
### Updates to locked dependencies
* Bump actions/download-artifact & actions/upload-artifact from 3 to 4 in /.github/workflows. ([\#17657](https://github.com/element-hq/synapse/issues/17657))
* Bump anyhow from 1.0.89 to 1.0.92. ([\#17858](https://github.com/element-hq/synapse/issues/17858), [\#17876](https://github.com/element-hq/synapse/issues/17876), [\#17901](https://github.com/element-hq/synapse/issues/17901))
* Bump bytes from 1.7.2 to 1.8.0. ([\#17877](https://github.com/element-hq/synapse/issues/17877))
* Bump cryptography from 43.0.1 to 43.0.3. ([\#17853](https://github.com/element-hq/synapse/issues/17853))
* Bump mypy-zope from 1.0.7 to 1.0.8. ([\#17898](https://github.com/element-hq/synapse/issues/17898))
* Bump phonenumbers from 8.13.47 to 8.13.49. ([\#17880](https://github.com/element-hq/synapse/issues/17880), [\#17899](https://github.com/element-hq/synapse/issues/17899))
* Bump python-multipart from 0.0.12 to 0.0.16. ([\#17879](https://github.com/element-hq/synapse/issues/17879))
* Bump regex from 1.11.0 to 1.11.1. ([\#17874](https://github.com/element-hq/synapse/issues/17874))
* Bump ruff from 0.6.9 to 0.7.2. ([\#17868](https://github.com/element-hq/synapse/issues/17868), [\#17897](https://github.com/element-hq/synapse/issues/17897))
* Bump serde from 1.0.210 to 1.0.214. ([\#17875](https://github.com/element-hq/synapse/issues/17875), [\#17900](https://github.com/element-hq/synapse/issues/17900))
* Bump serde_json from 1.0.128 to 1.0.132. ([\#17857](https://github.com/element-hq/synapse/issues/17857))
* Bump types-psycopg2 from 2.9.21.20240819 to 2.9.21.20241019. ([\#17855](https://github.com/element-hq/synapse/issues/17855))
* Bump types-setuptools from 75.1.0.20241014 to 75.2.0.20241019. ([\#17856](https://github.com/element-hq/synapse/issues/17856))
# Synapse 1.118.0 (2024-10-29)
No significant changes since 1.118.0rc1.

180
Cargo.lock generated
View File

@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.93"
version = "1.0.91"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775"
checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8"
[[package]]
name = "arc-swap"
@@ -35,6 +35,12 @@ version = "0.21.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
[[package]]
name = "bitflags"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
[[package]]
name = "blake2"
version = "0.10.6"
@@ -61,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]]
name = "bytes"
version = "1.9.0"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b"
checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da"
[[package]]
name = "cfg-if"
@@ -156,9 +162,9 @@ dependencies = [
[[package]]
name = "heck"
version = "0.5.0"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "hex"
@@ -216,6 +222,16 @@ version = "0.2.154"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
[[package]]
name = "lock_api"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.22"
@@ -249,6 +265,29 @@ version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "parking_lot"
version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb"
dependencies = [
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.9.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
"windows-targets",
]
[[package]]
name = "portable-atomic"
version = "1.6.0"
@@ -272,16 +311,16 @@ dependencies = [
[[package]]
name = "pyo3"
version = "0.23.2"
version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f54b3d09cbdd1f8c20650b28e7b09e338881482f4aa908a5f61a00c98fba2690"
checksum = "a5e00b96a521718e08e03b1a622f01c8a8deb50719335de3f60b3b3950f069d8"
dependencies = [
"anyhow",
"cfg-if",
"indoc",
"libc",
"memoffset",
"once_cell",
"parking_lot",
"portable-atomic",
"pyo3-build-config",
"pyo3-ffi",
@@ -291,9 +330,9 @@ dependencies = [
[[package]]
name = "pyo3-build-config"
version = "0.23.2"
version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3015cf985888fe66cfb63ce0e321c603706cd541b7aec7ddd35c281390af45d8"
checksum = "7883df5835fafdad87c0d888b266c8ec0f4c9ca48a5bed6bbb592e8dedee1b50"
dependencies = [
"once_cell",
"target-lexicon",
@@ -301,9 +340,9 @@ dependencies = [
[[package]]
name = "pyo3-ffi"
version = "0.23.2"
version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fca7cd8fd809b5ac4eefb89c1f98f7a7651d3739dfb341ca6980090f554c270"
checksum = "01be5843dc60b916ab4dad1dca6d20b9b4e6ddc8e15f50c47fe6d85f1fb97403"
dependencies = [
"libc",
"pyo3-build-config",
@@ -311,9 +350,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
version = "0.12.0"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3eb421dc86d38d08e04b927b02424db480be71b777fa3a56f32e2f2a3a1a3b08"
checksum = "2af49834b8d2ecd555177e63b273b708dea75150abc6f5341d0a6e1a9623976c"
dependencies = [
"arc-swap",
"log",
@@ -322,9 +361,9 @@ dependencies = [
[[package]]
name = "pyo3-macros"
version = "0.23.2"
version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34e657fa5379a79151b6ff5328d9216a84f55dc93b17b08e7c3609a969b73aa0"
checksum = "77b34069fc0682e11b31dbd10321cbf94808394c56fd996796ce45217dfac53c"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
@@ -334,9 +373,9 @@ dependencies = [
[[package]]
name = "pyo3-macros-backend"
version = "0.23.2"
version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "295548d5ffd95fd1981d2d3cf4458831b21d60af046b729b6fd143b0ba7aee2f"
checksum = "08260721f32db5e1a5beae69a55553f56b99bd0e1c3e6e0a5e8851a9d0f5a85c"
dependencies = [
"heck",
"proc-macro2",
@@ -347,9 +386,9 @@ dependencies = [
[[package]]
name = "pythonize"
version = "0.23.0"
version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91a6ee7a084f913f98d70cdc3ebec07e852b735ae3059a1500db2661265da9ff"
checksum = "9d0664248812c38cc55a4ed07f88e4df516ce82604b93b1ffdc041aa77a6cb3c"
dependencies = [
"pyo3",
"serde",
@@ -394,6 +433,15 @@ dependencies = [
"getrandom",
]
[[package]]
name = "redox_syscall"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e"
dependencies = [
"bitflags",
]
[[package]]
name = "regex"
version = "1.11.1"
@@ -430,19 +478,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
[[package]]
name = "serde"
version = "1.0.215"
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
version = "1.0.213"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.215"
version = "1.0.213"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0"
checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5"
dependencies = [
"proc-macro2",
"quote",
@@ -451,9 +505,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.133"
version = "1.0.132"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377"
checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03"
dependencies = [
"itoa",
"memchr",
@@ -483,6 +537,12 @@ dependencies = [
"digest",
]
[[package]]
name = "smallvec"
version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "subtle"
version = "2.5.0"
@@ -634,3 +694,67 @@ dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "windows-targets"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6"
[[package]]
name = "windows_i686_gnu"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9"
[[package]]
name = "windows_i686_msvc"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0"

View File

@@ -1,10 +1,8 @@
# A build script for poetry that adds the rust extension.
import itertools
import os
from typing import Any, Dict
from packaging.specifiers import SpecifierSet
from setuptools_rust import Binding, RustExtension
@@ -16,8 +14,6 @@ def build(setup_kwargs: Dict[str, Any]) -> None:
target="synapse.synapse_rust",
path=cargo_toml_path,
binding=Binding.PyO3,
# This flag is a no-op in the latest versions. Instead, we need to
# specify this in the `bdist_wheel` config below.
py_limited_api=True,
# We force always building in release mode, as we can't tell the
# difference between using `poetry` in development vs production.
@@ -25,18 +21,3 @@ def build(setup_kwargs: Dict[str, Any]) -> None:
)
setup_kwargs.setdefault("rust_extensions", []).append(extension)
setup_kwargs["zip_safe"] = False
# We lookup the minimum supported python version by looking at
# `python_requires` (e.g. ">=3.9.0,<4.0.0") and finding the first python
# version that matches. We then convert that into the `py_limited_api` form,
# e.g. cp39 for python 3.9.
py_limited_api: str
python_bounds = SpecifierSet(setup_kwargs["python_requires"])
for minor_version in itertools.count(start=8):
if f"3.{minor_version}.0" in python_bounds:
py_limited_api = f"cp3{minor_version}"
break
setup_kwargs.setdefault("options", {}).setdefault("bdist_wheel", {})[
"py_limited_api"
] = py_limited_api

View File

@@ -0,0 +1 @@
Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API.

1
changelog.d/17786.misc Normal file
View File

@@ -0,0 +1 @@
Add a test for downloading and thumbnailing a CMYK JPEG.

1
changelog.d/17813.bugfix Normal file
View File

@@ -0,0 +1 @@
Avoid lost data on some database query retries.

1
changelog.d/17814.bugfix Normal file
View File

@@ -0,0 +1 @@
Avoid lost data on some database query retries.

1
changelog.d/17815.bugfix Normal file
View File

@@ -0,0 +1 @@
Avoid lost data on some database query retries.

1
changelog.d/17816.bugfix Normal file
View File

@@ -0,0 +1 @@
Avoid lost data on some database query retries.

1
changelog.d/17817.bugfix Normal file
View File

@@ -0,0 +1 @@
Avoid lost data on some database query retries.

1
changelog.d/17818.bugfix Normal file
View File

@@ -0,0 +1 @@
Avoid lost data on some database query retries.

1
changelog.d/17830.misc Normal file
View File

@@ -0,0 +1 @@
Include the destination in the error of 'Destination mismatch' on federation requests.

1
changelog.d/17839.bugfix Normal file
View File

@@ -0,0 +1 @@
Check if user has membership in a room before tagging it. Contributed by Lama Alosaimi.

2
changelog.d/17847.bugfix Normal file
View File

@@ -0,0 +1,2 @@
Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in
the config option `run_background_tasks_on`.

1
changelog.d/17861.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix detection when the built Rust library was outdated when using source installations.

1
changelog.d/17884.misc Normal file
View File

@@ -0,0 +1 @@
Minor speed-up of sliding sync by computing extensions results in parallel.

1
changelog.d/17894.misc Normal file
View File

@@ -0,0 +1 @@
Remove usage of internal header encoding API.

View File

@@ -1 +0,0 @@
Support stable account suspension from [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823).

View File

@@ -1 +0,0 @@
Add `RoomID` & `EventID` rust types.

View File

@@ -1 +0,0 @@
Bump mypy from 1.11.2 to 1.12.1.

View File

@@ -30,6 +30,3 @@ docker-compose up -d
### More information
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)
**For a more comprehensive Docker Compose example showcasing a full Matrix 2.0 stack, please see
https://github.com/element-hq/element-docker-demo**

View File

@@ -8,9 +8,6 @@ All examples and snippets assume that your Synapse service is called `synapse` i
An example Docker Compose file can be found [here](docker-compose.yaml).
**For a more comprehensive Docker Compose example, showcasing a full Matrix 2.0 stack (originally based on this
docker-compose.yaml), please see https://github.com/element-hq/element-docker-demo**
## Worker Service Examples in Docker Compose
In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`).

48
debian/changelog vendored
View File

@@ -1,51 +1,3 @@
matrix-synapse-py3 (1.121.0~rc1) stable; urgency=medium
* New Synapse release 1.121.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 04 Dec 2024 14:47:23 +0000
matrix-synapse-py3 (1.120.2) stable; urgency=medium
* New synapse release 1.120.2.
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Dec 2024 15:43:37 +0000
matrix-synapse-py3 (1.120.1) stable; urgency=medium
* New synapse release 1.120.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Dec 2024 09:07:57 +0000
matrix-synapse-py3 (1.120.0) stable; urgency=medium
* New synapse release 1.120.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Nov 2024 13:10:23 +0000
matrix-synapse-py3 (1.120.0~rc1) stable; urgency=medium
* New Synapse release 1.120.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Nov 2024 15:02:21 +0000
matrix-synapse-py3 (1.119.0) stable; urgency=medium
* New Synapse release 1.119.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 13 Nov 2024 13:57:51 +0000
matrix-synapse-py3 (1.119.0~rc2) stable; urgency=medium
* New Synapse release 1.119.0rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 11 Nov 2024 14:33:02 +0000
matrix-synapse-py3 (1.119.0~rc1) stable; urgency=medium
* New Synapse release 1.119.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 06 Nov 2024 08:59:43 -0700
matrix-synapse-py3 (1.118.0) stable; urgency=medium
* New Synapse release 1.118.0.

View File

@@ -20,7 +20,7 @@
# `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in
# in `poetry export` in the past.
ARG PYTHON_VERSION=3.12
ARG PYTHON_VERSION=3.11
###
### Stage 0: generate requirements.txt

View File

@@ -7,7 +7,6 @@
#}
## Server ##
public_baseurl: http://127.0.0.1:8008/
report_stats: False
trusted_key_servers: []
enable_registration: true
@@ -105,16 +104,6 @@ experimental_features:
msc3967_enabled: true
# Expose a room summary for public rooms
msc3266_enabled: true
# Send to-device messages to application services
msc2409_to_device_messages_enabled: true
# Allow application services to masquerade devices
msc3202_device_masquerading: true
# Sending device list changes, one-time key counts and fallback key usage to application services
msc3202_transaction_extensions: true
# Proxy OTK claim requests to exclusive ASes
msc3983_appservice_otk_claims: true
# Proxy key queries to exclusive ASes
msc3984_appservice_key_query: true
server_notices:
system_mxid_localpart: _server

View File

@@ -42,6 +42,6 @@ server {
{% endif %}
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host:$server_port;
proxy_set_header Host $host;
}
}

View File

@@ -54,7 +54,6 @@
- [Using `synctl` with Workers](synctl_workers.md)
- [Systemd](systemd-with-workers/README.md)
- [Administration](usage/administration/README.md)
- [Backups](usage/administration/backups.md)
- [Admin API](usage/administration/admin_api/README.md)
- [Account Validity](admin_api/account_validity.md)
- [Background Updates](usage/administration/admin_api/background_updates.md)

View File

@@ -5,7 +5,6 @@ basis. The currently supported features are:
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
for another client
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
- [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222): adding `state_after` to sync v2
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).

View File

@@ -55,8 +55,7 @@ It returns a JSON body like the following:
}
],
"user_type": null,
"locked": false,
"suspended": false
"locked": false
}
```

View File

@@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common
configuration:
```sh
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:bullseye
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
```
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)

View File

@@ -336,36 +336,6 @@ but it has a `response_types_supported` which excludes "code" (which we rely on,
is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)),
so we have to disable discovery and configure the URIs manually.
### Forgejo
Forgejo is a fork of Gitea that can act as an OAuth2 provider.
The implementation of OAuth2 is improved compared to Gitea, as it provides a correctly defined `subject_claim` and `scopes`.
Synapse config:
```yaml
oidc_providers:
- idp_id: forgejo
idp_name: Forgejo
discover: false
issuer: "https://your-forgejo.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: client_secret_post
scopes: ["openid", "profile", "email", "groups"]
authorization_endpoint: "https://your-forgejo.com/login/oauth/authorize"
token_endpoint: "https://your-forgejo.com/login/oauth/access_token"
userinfo_endpoint: "https://your-forgejo.com/api/v1/user"
user_mapping_provider:
config:
subject_claim: "sub"
picture_claim: "picture"
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}"
```
### GitHub
[GitHub][github-idp] is a bit special as it is not an OpenID Connect compliant provider, but

View File

@@ -100,10 +100,6 @@ database:
keepalives_count: 3
```
## Backups
Don't forget to [back up](./usage/administration/backups.md#database) your database!
## Tuning Postgres
The default settings should be fine for most deployments. For larger

View File

@@ -208,7 +208,7 @@ When following this route please make sure that the [Platform-specific prerequis
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
- Python 3.9 or later, up to Python 3.13.
- Python 3.8 or later, up to Python 3.11.
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
If building on an uncommon architecture for which pre-built wheels are
@@ -656,10 +656,6 @@ This also requires the optional `lxml` python dependency to be installed. This
in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
means `apt-get install libxml2-dev`, or equivalent for your OS.
### Backups
Don't forget to take [backups](../usage/administration/backups.md) of your new server!
### Troubleshooting Installation
`pip` seems to leak *lots* of memory during installation. For instance, a Linux

View File

@@ -117,51 +117,6 @@ each upgrade are complete before moving on to the next upgrade, to avoid
stacking them up. You can monitor the currently running background updates with
[the Admin API](usage/administration/admin_api/background_updates.html#status).
# Upgrading to v1.120.0
## Removal of experimental MSC3886 feature
[MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886)
has been closed (and will not enter the Matrix spec). As such, we are
removing the experimental support for it in this release.
The `experimental_features.msc3886_endpoint` configuration option has
been removed.
## Authenticated media is now enforced by default
The [`enable_authenticated_media`] configuration option now defaults to true.
This means that clients and remote (federated) homeservers now need to use
the authenticated media endpoints in order to download media from your
homeserver.
As an exception, existing media that was stored on the server prior to
this option changing to `true` will still be accessible over the
unauthenticated endpoints.
The matrix.org homeserver has already been running with this option enabled
since September 2024, so most common clients and homeservers should already
be compatible.
With that said, administrators who wish to disable this feature for broader
compatibility can still do so by manually configuring
`enable_authenticated_media: False`.
[`enable_authenticated_media`]: usage/configuration/config_documentation.md#enable_authenticated_media
# Upgrading to v1.119.0
## Minimum supported Python version
The minimum supported Python version has been increased from v3.8 to v3.9.
You will need Python 3.9+ to run Synapse v1.119.0 (due out Nov 7th, 2024).
If you use current versions of the Matrix.org-distributed Docker images, no action is required.
Please note that support for Ubuntu `focal` was dropped as well since it uses Python 3.8.
# Upgrading to v1.111.0
## New worker endpoints for authenticated client and federation media

View File

@@ -1,125 +0,0 @@
# How to back up a Synapse homeserver
It is critical to maintain good backups of your server, to guard against
hardware failure as well as potential corruption due to bugs or administrator
error.
This page documents the things you will need to consider backing up as part of
a Synapse installation.
## Configuration files
Keep a copy of your configuration file (`homeserver.yaml`), as well as any
auxiliary config files it refers to such as the
[`log_config`](../configuration/config_documentation.md#log_config) file,
[`app_service_config_files`](../configuration/config_documentation.md#app_service_config_files).
Often, all such config files will be kept in a single directory such as
`/etc/synapse`, which will make this easier.
## Server signing key
Your server has a [signing
key](../configuration/config_documentation.md#signing_key_path) which it uses
to sign events and outgoing federation requests. It is easiest to back it up
with your configuration files, but an alternative is to have Synapse create a
new signing key if you have to restore.
If you do decide to replace the signing key, you should add the old *public*
key to
[`old_signing_keys`](../configuration/config_documentation.md#old_signing_keys).
## Database
Synapse's support for SQLite is only suitable for testing purposes, so for the
purposes of this document, we'll assume you are using
[PostgreSQL](../../postgres.md).
A full discussion of backup strategies for PostgreSQL is out of scope for this
document; see the [PostgreSQL
documentation](https://www.postgresql.org/docs/current/backup.html) for
detailed information.
### Synapse-specfic details
* Be very careful not to restore into a database that already has tables
present. At best, this will error; at worst, it will lead to subtle database
inconsistencies.
* The `e2e_one_time_keys_json` table should **not** be backed up, or if it is
backed up, should be
[`TRUNCATE`d](https://www.postgresql.org/docs/current/sql-truncate.html)
after restoring the database before Synapse is started.
[Background: restoring the database to an older backup can cause
used one-time-keys to be re-issued, causing subsequent [message decryption
errors](https://github.com/element-hq/element-meta/issues/2155). Clearing
all one-time-keys from the database ensures that this cannot happen, and
will prompt clients to generate and upload new one-time-keys.]
### Quick and easy database backup and restore
Typically, the easiest solution is to use `pg_dump` to take a copy of the whole
database. We recommend `pg_dump`'s custom dump format, as it produces
significantly smaller backup files.
```shell
sudo -u postgres pg_dump -Fc --exclude-table-data e2e_one_time_keys_json synapse > synapse.dump
```
There is no need to stop Postgres or Synapse while `pg_dump` is running: it
will take a consistent snapshot of the databse.
To restore, you will need to recreate the database as described in [Using
Postgres](../../postgres.md#set-up-database),
then load the dump into it with `pg_restore`:
```shell
sudo -u postgres createdb --encoding=UTF8 --locale=C --template=template0 --owner=synapse_user synapse
sudo -u postgres pg_restore -d synapse < synapse.dump
```
(If you forgot to exclude `e2e_one_time_keys_json` during `pg_dump`, remember
to connect to the new database and `TRUNCATE e2e_one_time_keys_json;` before
starting Synapse.)
To reiterate: do **not** restore a dump over an existing database.
Again, if you plan to run your homeserver at any sort of production level, we
recommend studying the PostgreSQL documentation on backup options.
## Media store
Synapse keeps a copy of media uploaded by users, including avatars and message
attachments, in its [Media
store](../configuration/config_documentation.md#media-store).
It is a directory on the local disk, containing the following directories:
* `local_content`: this is content uploaded by your local users. As a general
rule, you should back this up: it may represent the only copy of those
media files anywhere in the federation, and if they are lost, users will
see errors when viewing user or room avatars, and messages with attachments.
* `local_thumbnails`: "thumbnails" of images uploaded by your users. If
[`dynamic_thumbnails`](../configuration/config_documentation.md#dynamic_thumbnails)
is enabled, these will be regenerated if they are removed from the disk, and
there is therefore no need to back them up.
If `dynamic_thumbnails` is *not* enabled (the default): although this can
theoretically be regenerated from `local_content`, there is no tooling to do
so. We recommend that these are backed up too.
* `remote_content`: this is a cache of content that was uploaded by a user on
another server, and has since been requested by a user on your own server.
Typically there is no need to back up this directory: if a file in this directory
is removed, Synapse will attempt to fetch it again from the remote
server.
* `remote_thumbnails`: thumbnails of images uploaded by users on other
servers. As with `remote_content`, there is normally no need to back this
up.
* `url_cache`, `url_cache_thumbnails`: temporary caches of files downloaded
by the [URL previews](../../setup/installation.md#url-previews) feature.
These do not need to be backed up.

View File

@@ -1887,33 +1887,12 @@ Config options related to Synapse's media store.
When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy
unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to true (previously false). In a future release of Synapse, this option will be removed and become always-on.
In all cases, authenticated requests to download media will succeed, but for unauthenticated requests, this
case-by-case breakdown describes whether media downloads are permitted:
* `enable_authenticated_media = False`:
* unauthenticated client or homeserver requesting local media: allowed
* unauthenticated client or homeserver requesting remote media: allowed as long as the media is in the cache,
or as long as the remote homeserver does not require authentication to retrieve the media
* `enable_authenticated_media = True`:
* unauthenticated client or homeserver requesting local media:
allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist);
otherwise denied.
* unauthenticated client or homeserver requesting remote media: the same as for local media;
allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist);
otherwise denied.
It is especially notable that media downloaded before this option existed (in older Synapse versions), or whilst this option was set to `False`,
will perpetually be available over the legacy, unauthenticated endpoint, even after this option is set to `True`.
This is for backwards compatibility with older clients and homeservers that do not yet support requesting authenticated media;
those older clients or homeservers will not be cut off from media they can already see.
_Changed in Synapse 1.120:_ This option now defaults to `True` when not set, whereas before this version it defaulted to `False`.
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but
this will change to true in a future Synapse release.
Example configuration:
```yaml
enable_authenticated_media: false
enable_authenticated_media: true
```
---
### `enable_media_repo`
@@ -3129,15 +3108,6 @@ it was last used.
It is possible to build an entry from an old `signing.key` file using the
`export_signing_key` script which is provided with synapse.
If you have lost the private key file, you can ask another server you trust to
tell you the public keys it has seen from your server. To fetch the keys from
`matrix.org`, try something like:
```
curl https://matrix-federation.matrix.org/_matrix/key/v2/query/myserver.example.com |
jq '.server_keys | map(.verify_keys) | add'
```
Example configuration:
```yaml
old_signing_keys:
@@ -4401,9 +4371,9 @@ It is possible to scale the processes that handle sending outbound federation re
by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
a `federation_sender_instances` map. Doing so will remove handling of this function from
the main process. Multiple workers can be added to this map, in which case the work is
balanced across them.
balanced across them.
The way that the load balancing works is any outbound federation request will be assigned
The way that the load balancing works is any outbound federation request will be assigned
to a federation sender worker based on the hash of the destination server name. This
means that all requests being sent to the same destination will be processed by the same
worker instance. Multiple `federation_sender_instances` are useful if there is a federation
@@ -4760,7 +4730,7 @@ This setting has the following sub-options:
* `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only
for direct messages. Defaults to false.
* `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false.
* `worker_to_run_on`: Which worker to run this module on. This must match
* `worker_to_run_on`: Which worker to run this module on. This must match
the "worker_name". If not set or `null`, invites will be accepted on the
main process.

56
flake.lock generated
View File

@@ -56,6 +56,24 @@
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
@@ -168,27 +186,27 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1729265718,
"narHash": "sha256-4HQI+6LsO3kpWTYuVGIzhJs1cetFcwT7quWCk/6rqeo=",
"lastModified": 1690535733,
"narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ccc0c2126893dd20963580b6478d1a10a4512185",
"rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"ref": "master",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1728538411,
"narHash": "sha256-f0SBJz1eZ2yOuKUr5CA9BHULGXVSn6miBuUWdTyhUhU=",
"lastModified": 1681358109,
"narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b69de56fac8c2b6f8fd27f2eca01dcda8e0a4221",
"rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9",
"type": "github"
},
"original": {
@@ -231,19 +249,20 @@
"devenv": "devenv",
"nixpkgs": "nixpkgs_2",
"rust-overlay": "rust-overlay",
"systems": "systems_2"
"systems": "systems_3"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs_3"
},
"locked": {
"lastModified": 1731897198,
"narHash": "sha256-Ou7vLETSKwmE/HRQz4cImXXJBr/k9gp4J4z/PF8LzTE=",
"lastModified": 1693966243,
"narHash": "sha256-a2CA1aMIPE67JWSVIGoGtD3EGlFdK9+OlJQs0FOWCKY=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "0be641045af6d8666c11c2c40e45ffc9667839b5",
"rev": "a8b4bb4cbb744baaabc3e69099f352f99164e2c1",
"type": "github"
},
"original": {
@@ -281,6 +300,21 @@
"repo": "default",
"type": "github"
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",

View File

@@ -3,13 +3,13 @@
# (https://github.com/matrix-org/complement) Matrix homeserver test suites are also
# installed automatically.
#
# You must have already installed Nix (https://nixos.org/download/) on your system to use this.
# Nix can be installed on any Linux distribiution or MacOS; NixOS is not required.
# Windows is not directly supported, but Nix can be installed inside of WSL2 or even Docker
# You must have already installed Nix (https://nixos.org) on your system to use this.
# Nix can be installed on Linux or MacOS; NixOS is not required. Windows is not
# directly supported, but Nix can be installed inside of WSL2 or even Docker
# containers. Please refer to https://nixos.org/download for details.
#
# You must also enable support for flakes in Nix. See the following for how to
# do so permanently: https://wiki.nixos.org/wiki/Flakes#Other_Distros,_without_Home-Manager
# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes
#
# Be warned: you'll need over 3.75 GB of free space to download all the dependencies.
#
@@ -20,7 +20,7 @@
# locally from "services", such as PostgreSQL and Redis.
#
# You should now be dropped into a new shell with all programs and dependencies
# available to you!
# availabile to you!
#
# You can start up pre-configured local Synapse, PostgreSQL and Redis instances by
# running: `devenv up`. To stop them, use Ctrl-C.
@@ -39,9 +39,9 @@
{
inputs = {
# Use the rolling/unstable branch of nixpkgs. Used to fetch the latest
# Use the master/unstable branch of nixpkgs. Used to fetch the latest
# available versions of packages.
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
nixpkgs.url = "github:NixOS/nixpkgs/master";
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
systems.url = "github:nix-systems/default";
# A development environment manager built on Nix. See https://devenv.sh.
@@ -50,7 +50,7 @@
rust-overlay.url = "github:oxalica/rust-overlay";
};
outputs = { nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
let
forEachSystem = nixpkgs.lib.genAttrs (import systems);
in {
@@ -82,7 +82,7 @@
#
# NOTE: We currently need to set the Rust version unnecessarily high
# in order to work around https://github.com/matrix-org/synapse/issues/15939
(rust-bin.stable."1.82.0".default.override {
(rust-bin.stable."1.71.1".default.override {
# Additionally install the "rust-src" extension to allow diving into the
# Rust source code in an IDE (rust-analyzer will also make use of it).
extensions = [ "rust-src" ];
@@ -126,7 +126,7 @@
# Automatically activate the poetry virtualenv upon entering the shell.
languages.python.poetry.activate.enable = true;
# Install all extra Python dependencies; this is needed to run the unit
# tests and utilise all Synapse features.
# tests and utilitise all Synapse features.
languages.python.poetry.install.arguments = ["--extras all"];
# Install the 'matrix-synapse' package from the local checkout.
languages.python.poetry.install.installRootPackage = true;
@@ -163,8 +163,8 @@
# Create a postgres user called 'synapse_user' which has ownership
# over the 'synapse' database.
services.postgres.initialScript = ''
CREATE USER synapse_user;
ALTER DATABASE synapse OWNER TO synapse_user;
CREATE USER synapse_user;
ALTER DATABASE synapse OWNER TO synapse_user;
'';
# Redis is needed in order to run Synapse in worker mode.
@@ -205,7 +205,7 @@
# corresponding Nix packages on https://search.nixos.org/packages.
#
# This was done until `./install-deps.pl --dryrun` produced no output.
env.PERL5LIB = "${with pkgs.perl538Packages; makePerlPath [
env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [
DBI
ClassMethodModifiers
CryptEd25519

View File

@@ -26,7 +26,7 @@ strict_equality = True
# Run mypy type checking with the minimum supported Python version to catch new usage
# that isn't backwards-compatible (types, overloads, etc).
python_version = 3.9
python_version = 3.8
files =
docker/,

253
poetry.lock generated
View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -11,6 +11,9 @@ files = [
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
]
[package.dependencies]
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""}
[[package]]
name = "attrs"
version = "24.2.0"
@@ -104,20 +107,21 @@ typecheck = ["mypy"]
[[package]]
name = "bleach"
version = "6.2.0"
version = "6.1.0"
description = "An easy safelist-based HTML-sanitizing tool."
optional = false
python-versions = ">=3.9"
python-versions = ">=3.8"
files = [
{file = "bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e"},
{file = "bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f"},
{file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"},
{file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"},
]
[package.dependencies]
six = ">=1.9.0"
webencodings = "*"
[package.extras]
css = ["tinycss2 (>=1.1.0,<1.5)"]
css = ["tinycss2 (>=1.1.0,<1.3)"]
[[package]]
name = "canonicaljson"
@@ -724,13 +728,13 @@ files = [
[[package]]
name = "immutabledict"
version = "4.2.1"
version = "4.2.0"
description = "Immutable wrapper around dictionaries (a fork of frozendict)"
optional = false
python-versions = ">=3.8"
python-versions = ">=3.8,<4.0"
files = [
{file = "immutabledict-4.2.1-py3-none-any.whl", hash = "sha256:c56a26ced38c236f79e74af3ccce53772827cef5c3bce7cab33ff2060f756373"},
{file = "immutabledict-4.2.1.tar.gz", hash = "sha256:d91017248981c72eb66c8ff9834e99c2f53562346f23e7f51e7a5ebcf66a3bcc"},
{file = "immutabledict-4.2.0-py3-none-any.whl", hash = "sha256:d728b2c2410d698d95e6200237feb50a695584d20289ad3379a439aa3d90baba"},
{file = "immutabledict-4.2.0.tar.gz", hash = "sha256:e003fd81aad2377a5a758bf7e1086cf3b70b63e9a5cc2f46bce8d0a2b4727c5f"},
]
[[package]]
@@ -870,7 +874,9 @@ files = [
[package.dependencies]
attrs = ">=22.2.0"
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
jsonschema-specifications = ">=2023.03.6"
pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
referencing = ">=0.28.4"
rpds-py = ">=0.7.1"
@@ -890,6 +896,7 @@ files = [
]
[package.dependencies]
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
referencing = ">=0.28.0"
[[package]]
@@ -905,6 +912,7 @@ files = [
[package.dependencies]
importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""}
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
"jaraco.classes" = "*"
jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""}
pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""}
@@ -1314,43 +1322,38 @@ files = [
[[package]]
name = "mypy"
version = "1.12.1"
version = "1.11.2"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "mypy-1.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3d7d4371829184e22fda4015278fbfdef0327a4b955a483012bd2d423a788801"},
{file = "mypy-1.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f59f1dfbf497d473201356966e353ef09d4daec48caeacc0254db8ef633a28a5"},
{file = "mypy-1.12.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b947097fae68004b8328c55161ac9db7d3566abfef72d9d41b47a021c2fba6b1"},
{file = "mypy-1.12.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:96af62050971c5241afb4701c15189ea9507db89ad07794a4ee7b4e092dc0627"},
{file = "mypy-1.12.1-cp310-cp310-win_amd64.whl", hash = "sha256:d90da248f4c2dba6c44ddcfea94bb361e491962f05f41990ff24dbd09969ce20"},
{file = "mypy-1.12.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1230048fec1380faf240be6385e709c8570604d2d27ec6ca7e573e3bc09c3735"},
{file = "mypy-1.12.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:02dcfe270c6ea13338210908f8cadc8d31af0f04cee8ca996438fe6a97b4ec66"},
{file = "mypy-1.12.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a5a437c9102a6a252d9e3a63edc191a3aed5f2fcb786d614722ee3f4472e33f6"},
{file = "mypy-1.12.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:186e0c8346efc027ee1f9acf5ca734425fc4f7dc2b60144f0fbe27cc19dc7931"},
{file = "mypy-1.12.1-cp311-cp311-win_amd64.whl", hash = "sha256:673ba1140a478b50e6d265c03391702fa11a5c5aff3f54d69a62a48da32cb811"},
{file = "mypy-1.12.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9fb83a7be97c498176fb7486cafbb81decccaef1ac339d837c377b0ce3743a7f"},
{file = "mypy-1.12.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:389e307e333879c571029d5b93932cf838b811d3f5395ed1ad05086b52148fb0"},
{file = "mypy-1.12.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:94b2048a95a21f7a9ebc9fbd075a4fcd310410d078aa0228dbbad7f71335e042"},
{file = "mypy-1.12.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ee5932370ccf7ebf83f79d1c157a5929d7ea36313027b0d70a488493dc1b179"},
{file = "mypy-1.12.1-cp312-cp312-win_amd64.whl", hash = "sha256:19bf51f87a295e7ab2894f1d8167622b063492d754e69c3c2fed6563268cb42a"},
{file = "mypy-1.12.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d34167d43613ffb1d6c6cdc0cc043bb106cac0aa5d6a4171f77ab92a3c758bcc"},
{file = "mypy-1.12.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:427878aa54f2e2c5d8db31fa9010c599ed9f994b3b49e64ae9cd9990c40bd635"},
{file = "mypy-1.12.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5fcde63ea2c9f69d6be859a1e6dd35955e87fa81de95bc240143cf00de1f7f81"},
{file = "mypy-1.12.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d54d840f6c052929f4a3d2aab2066af0f45a020b085fe0e40d4583db52aab4e4"},
{file = "mypy-1.12.1-cp313-cp313-win_amd64.whl", hash = "sha256:20db6eb1ca3d1de8ece00033b12f793f1ea9da767334b7e8c626a4872090cf02"},
{file = "mypy-1.12.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b16fe09f9c741d85a2e3b14a5257a27a4f4886c171d562bc5a5e90d8591906b8"},
{file = "mypy-1.12.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0dcc1e843d58f444fce19da4cce5bd35c282d4bde232acdeca8279523087088a"},
{file = "mypy-1.12.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e10ba7de5c616e44ad21005fa13450cd0de7caaa303a626147d45307492e4f2d"},
{file = "mypy-1.12.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0e6fe449223fa59fbee351db32283838a8fee8059e0028e9e6494a03802b4004"},
{file = "mypy-1.12.1-cp38-cp38-win_amd64.whl", hash = "sha256:dc6e2a2195a290a7fd5bac3e60b586d77fc88e986eba7feced8b778c373f9afe"},
{file = "mypy-1.12.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:de5b2a8988b4e1269a98beaf0e7cc71b510d050dce80c343b53b4955fff45f19"},
{file = "mypy-1.12.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:843826966f1d65925e8b50d2b483065c51fc16dc5d72647e0236aae51dc8d77e"},
{file = "mypy-1.12.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9fe20f89da41a95e14c34b1ddb09c80262edcc295ad891f22cc4b60013e8f78d"},
{file = "mypy-1.12.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8135ffec02121a75f75dc97c81af7c14aa4ae0dda277132cfcd6abcd21551bfd"},
{file = "mypy-1.12.1-cp39-cp39-win_amd64.whl", hash = "sha256:a7b76fa83260824300cc4834a3ab93180db19876bce59af921467fd03e692810"},
{file = "mypy-1.12.1-py3-none-any.whl", hash = "sha256:ce561a09e3bb9863ab77edf29ae3a50e65685ad74bba1431278185b7e5d5486e"},
{file = "mypy-1.12.1.tar.gz", hash = "sha256:f5b3936f7a6d0e8280c9bdef94c7ce4847f5cdfc258fbb2c29a8c1711e8bb96d"},
{file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"},
{file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"},
{file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"},
{file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"},
{file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"},
{file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"},
{file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"},
{file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"},
{file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"},
{file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"},
{file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"},
{file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"},
{file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"},
{file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"},
{file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"},
{file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"},
{file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"},
{file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"},
{file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"},
{file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"},
{file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"},
{file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"},
{file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"},
{file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"},
{file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"},
{file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"},
{file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"},
]
[package.dependencies]
@@ -1377,17 +1380,17 @@ files = [
[[package]]
name = "mypy-zope"
version = "1.0.8"
version = "1.0.7"
description = "Plugin for mypy to support zope interfaces"
optional = false
python-versions = "*"
files = [
{file = "mypy_zope-1.0.8-py3-none-any.whl", hash = "sha256:8794a77dae0c7e2f28b8ac48569091310b3ee45bb9d6cd4797dcb837c40f9976"},
{file = "mypy_zope-1.0.8.tar.gz", hash = "sha256:854303a95aefc4289e8a0796808e002c2c7ecde0a10a8f7b8f48092f94ef9b9f"},
{file = "mypy_zope-1.0.7-py3-none-any.whl", hash = "sha256:f19de249574319d81083b15f8a022c6b15583582f23340a860922141f1b651ca"},
{file = "mypy_zope-1.0.7.tar.gz", hash = "sha256:32a79ce78647c0bea61e7e0c0eb1233fcb97bb94e8950cca73f17d3419c602f7"},
]
[package.dependencies]
mypy = ">=1.0.0,<1.13.0"
mypy = ">=1.0.0,<1.12.0"
"zope.interface" = "*"
"zope.schema" = "*"
@@ -1423,13 +1426,13 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte
[[package]]
name = "packaging"
version = "24.2"
version = "24.1"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
files = [
{file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
{file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
{file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
]
[[package]]
@@ -1448,13 +1451,13 @@ dev = ["jinja2"]
[[package]]
name = "phonenumbers"
version = "8.13.50"
version = "8.13.48"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
optional = false
python-versions = "*"
files = [
{file = "phonenumbers-8.13.50-py2.py3-none-any.whl", hash = "sha256:bb95dbc0d9979c51f7ad94bcd780784938958861fbb4b75a2fe39ccd3d58954a"},
{file = "phonenumbers-8.13.50.tar.gz", hash = "sha256:e05ac6fb7b98c6d719a87ea895b9fc153673b4a51f455ec9afaf557ef4629da6"},
{file = "phonenumbers-8.13.48-py2.py3-none-any.whl", hash = "sha256:5c51939acefa390eb74119750afb10a85d3c628dc83fd62c52d6f532fcf5d205"},
{file = "phonenumbers-8.13.48.tar.gz", hash = "sha256:62d8df9b0f3c3c41571c6b396f044ddd999d61631534001b8be7fdf7ba1b18f3"},
]
[[package]]
@@ -1568,6 +1571,17 @@ files = [
[package.extras]
testing = ["pytest", "pytest-cov"]
[[package]]
name = "pkgutil-resolve-name"
version = "1.3.10"
description = "Resolve a name to an object."
optional = false
python-versions = ">=3.6"
files = [
{file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"},
{file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"},
]
[[package]]
name = "prometheus-client"
version = "0.21.0"
@@ -1789,13 +1803,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pygithub"
version = "2.5.0"
version = "2.4.0"
description = "Use the full Github API v3"
optional = false
python-versions = ">=3.8"
files = [
{file = "PyGithub-2.5.0-py3-none-any.whl", hash = "sha256:b0b635999a658ab8e08720bdd3318893ff20e2275f6446fcf35bf3f44f2c0fd2"},
{file = "pygithub-2.5.0.tar.gz", hash = "sha256:e1613ac508a9be710920d26eb18b1905ebd9926aa49398e88151c1b526aad3cf"},
{file = "PyGithub-2.4.0-py3-none-any.whl", hash = "sha256:81935aa4bdc939fba98fee1cb47422c09157c56a27966476ff92775602b9ee24"},
{file = "pygithub-2.4.0.tar.gz", hash = "sha256:6601e22627e87bac192f1e2e39c6e6f69a43152cfb8f307cee575879320b3051"},
]
[package.dependencies]
@@ -1922,23 +1936,24 @@ test = ["pretend", "pytest (>=3.0.1)", "pytest-rerunfailures"]
[[package]]
name = "pysaml2"
version = "7.5.0"
version = "7.3.1"
description = "Python implementation of SAML Version 2 Standard"
optional = true
python-versions = ">=3.9,<4.0"
python-versions = ">=3.6.2,<4.0.0"
files = [
{file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
{file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
{file = "pysaml2-7.3.1-py3-none-any.whl", hash = "sha256:2cc66e7a371d3f5ff9601f0ed93b5276cca816fce82bb38447d5a0651f2f5193"},
{file = "pysaml2-7.3.1.tar.gz", hash = "sha256:eab22d187c6dd7707c58b5bb1688f9b8e816427667fc99d77f54399e15cd0a0a"},
]
[package.dependencies]
cryptography = ">=3.1"
defusedxml = "*"
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
pyopenssl = "*"
python-dateutil = "*"
pytz = "*"
requests = ">=2,<3"
xmlschema = ">=2,<3"
xmlschema = ">=1.2.1"
[package.extras]
s2repoze = ["paste", "repoze.who", "zope.interface"]
@@ -1959,13 +1974,13 @@ six = ">=1.5"
[[package]]
name = "python-multipart"
version = "0.0.18"
version = "0.0.16"
description = "A streaming multipart parser for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "python_multipart-0.0.18-py3-none-any.whl", hash = "sha256:efe91480f485f6a361427a541db4796f9e1591afc0fb8e7a4ba06bfbc6708996"},
{file = "python_multipart-0.0.18.tar.gz", hash = "sha256:7a68db60c8bfb82e460637fa4750727b45af1d5e2ed215593f917f64694d34fe"},
{file = "python_multipart-0.0.16-py3-none-any.whl", hash = "sha256:c2759b7b976ef3937214dfb592446b59dfaa5f04682a076f78b117c94776d87a"},
{file = "python_multipart-0.0.16.tar.gz", hash = "sha256:8dee37b88dab9b59922ca173c35acb627cc12ec74019f5cd4578369c6df36554"},
]
[[package]]
@@ -2149,6 +2164,7 @@ files = [
[package.dependencies]
markdown-it-py = ">=2.2.0,<3.0.0"
pygments = ">=2.13.0,<3.0.0"
typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""}
[package.extras]
jupyter = ["ipywidgets (>=7.5.1,<9)"]
@@ -2261,29 +2277,29 @@ files = [
[[package]]
name = "ruff"
version = "0.7.3"
version = "0.7.1"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.7.3-py3-none-linux_armv6l.whl", hash = "sha256:34f2339dc22687ec7e7002792d1f50712bf84a13d5152e75712ac08be565d344"},
{file = "ruff-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:fb397332a1879b9764a3455a0bb1087bda876c2db8aca3a3cbb67b3dbce8cda0"},
{file = "ruff-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:37d0b619546103274e7f62643d14e1adcbccb242efda4e4bdb9544d7764782e9"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59f0c3ee4d1a6787614e7135b72e21024875266101142a09a61439cb6e38a5"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:44eb93c2499a169d49fafd07bc62ac89b1bc800b197e50ff4633aed212569299"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d0242ce53f3a576c35ee32d907475a8d569944c0407f91d207c8af5be5dae4e"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6b6224af8b5e09772c2ecb8dc9f3f344c1aa48201c7f07e7315367f6dd90ac29"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c50f95a82b94421c964fae4c27c0242890a20fe67d203d127e84fbb8013855f5"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f3eff9961b5d2644bcf1616c606e93baa2d6b349e8aa8b035f654df252c8c67"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8963cab06d130c4df2fd52c84e9f10d297826d2e8169ae0c798b6221be1d1d2"},
{file = "ruff-0.7.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:61b46049d6edc0e4317fb14b33bd693245281a3007288b68a3f5b74a22a0746d"},
{file = "ruff-0.7.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:10ebce7696afe4644e8c1a23b3cf8c0f2193a310c18387c06e583ae9ef284de2"},
{file = "ruff-0.7.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3f36d56326b3aef8eeee150b700e519880d1aab92f471eefdef656fd57492aa2"},
{file = "ruff-0.7.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5d024301109a0007b78d57ab0ba190087b43dce852e552734ebf0b0b85e4fb16"},
{file = "ruff-0.7.3-py3-none-win32.whl", hash = "sha256:4ba81a5f0c5478aa61674c5a2194de8b02652f17addf8dfc40c8937e6e7d79fc"},
{file = "ruff-0.7.3-py3-none-win_amd64.whl", hash = "sha256:588a9ff2fecf01025ed065fe28809cd5a53b43505f48b69a1ac7707b1b7e4088"},
{file = "ruff-0.7.3-py3-none-win_arm64.whl", hash = "sha256:1713e2c5545863cdbfe2cbce21f69ffaf37b813bfd1fb3b90dc9a6f1963f5a8c"},
{file = "ruff-0.7.3.tar.gz", hash = "sha256:e1d1ba2e40b6e71a61b063354d04be669ab0d39c352461f3d789cac68b54a313"},
{file = "ruff-0.7.1-py3-none-linux_armv6l.whl", hash = "sha256:cb1bc5ed9403daa7da05475d615739cc0212e861b7306f314379d958592aaa89"},
{file = "ruff-0.7.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27c1c52a8d199a257ff1e5582d078eab7145129aa02721815ca8fa4f9612dc35"},
{file = "ruff-0.7.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:588a34e1ef2ea55b4ddfec26bbe76bc866e92523d8c6cdec5e8aceefeff02d99"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94fc32f9cdf72dc75c451e5f072758b118ab8100727168a3df58502b43a599ca"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:985818742b833bffa543a84d1cc11b5e6871de1b4e0ac3060a59a2bae3969250"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32f1e8a192e261366c702c5fb2ece9f68d26625f198a25c408861c16dc2dea9c"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:699085bf05819588551b11751eff33e9ca58b1b86a6843e1b082a7de40da1565"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344cc2b0814047dc8c3a8ff2cd1f3d808bb23c6658db830d25147339d9bf9ea7"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4316bbf69d5a859cc937890c7ac7a6551252b6a01b1d2c97e8fc96e45a7c8b4a"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79d3af9dca4c56043e738a4d6dd1e9444b6d6c10598ac52d146e331eb155a8ad"},
{file = "ruff-0.7.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c5c121b46abde94a505175524e51891f829414e093cd8326d6e741ecfc0a9112"},
{file = "ruff-0.7.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8422104078324ea250886954e48f1373a8fe7de59283d747c3a7eca050b4e378"},
{file = "ruff-0.7.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:56aad830af8a9db644e80098fe4984a948e2b6fc2e73891538f43bbe478461b8"},
{file = "ruff-0.7.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:658304f02f68d3a83c998ad8bf91f9b4f53e93e5412b8f2388359d55869727fd"},
{file = "ruff-0.7.1-py3-none-win32.whl", hash = "sha256:b517a2011333eb7ce2d402652ecaa0ac1a30c114fbbd55c6b8ee466a7f600ee9"},
{file = "ruff-0.7.1-py3-none-win_amd64.whl", hash = "sha256:f38c41fcde1728736b4eb2b18850f6d1e3eedd9678c914dede554a70d5241307"},
{file = "ruff-0.7.1-py3-none-win_arm64.whl", hash = "sha256:19aa200ec824c0f36d0c9114c8ec0087082021732979a359d6f3c390a6ff2a37"},
{file = "ruff-0.7.1.tar.gz", hash = "sha256:9d8a41d4aa2dad1575adb98a82870cf5db5f76b2938cf2206c22c940034a36f4"},
]
[[package]]
@@ -2410,18 +2426,19 @@ test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata
[[package]]
name = "setuptools-rust"
version = "1.10.2"
version = "1.8.1"
description = "Setuptools Rust extension plugin"
optional = false
python-versions = ">=3.8"
files = [
{file = "setuptools_rust-1.10.2-py3-none-any.whl", hash = "sha256:4b39c435ae9670315d522ed08fa0e8cb29f2a6048033966b6be2571a90ce4f1c"},
{file = "setuptools_rust-1.10.2.tar.gz", hash = "sha256:5d73e7eee5f87a6417285b617c97088a7c20d1a70fcea60e3bdc94ff567c29dc"},
{file = "setuptools-rust-1.8.1.tar.gz", hash = "sha256:94b1dd5d5308b3138d5b933c3a2b55e6d6927d1a22632e509fcea9ddd0f7e486"},
{file = "setuptools_rust-1.8.1-py3-none-any.whl", hash = "sha256:b5324493949ccd6aa0c03890c5f6b5f02de4512e3ac1697d02e9a6c02b18aa8e"},
]
[package.dependencies]
semantic-version = ">=2.8.2,<3"
setuptools = ">=62.4"
tomli = {version = ">=1.2.1", markers = "python_version < \"3.11\""}
[[package]]
name = "signedjson"
@@ -2519,63 +2536,33 @@ twisted = ["twisted"]
[[package]]
name = "tomli"
version = "2.2.1"
version = "2.0.2"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"},
{file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"},
{file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"},
{file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"},
{file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"},
{file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"},
{file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"},
{file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"},
{file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"},
{file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"},
{file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"},
{file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"},
{file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"},
{file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"},
{file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"},
]
[[package]]
name = "tornado"
version = "6.4.2"
version = "6.4.1"
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
optional = true
python-versions = ">=3.8"
files = [
{file = "tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1"},
{file = "tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803"},
{file = "tornado-6.4.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a017d239bd1bb0919f72af256a970624241f070496635784d9bf0db640d3fec"},
{file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c36e62ce8f63409301537222faffcef7dfc5284f27eec227389f2ad11b09d946"},
{file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca9eb02196e789c9cb5c3c7c0f04fb447dc2adffd95265b2c7223a8a615ccbf"},
{file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:304463bd0772442ff4d0f5149c6f1c2135a1fae045adf070821c6cdc76980634"},
{file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:c82c46813ba483a385ab2a99caeaedf92585a1f90defb5693351fa7e4ea0bf73"},
{file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:932d195ca9015956fa502c6b56af9eb06106140d844a335590c1ec7f5277d10c"},
{file = "tornado-6.4.2-cp38-abi3-win32.whl", hash = "sha256:2876cef82e6c5978fde1e0d5b1f919d756968d5b4282418f3146b79b58556482"},
{file = "tornado-6.4.2-cp38-abi3-win_amd64.whl", hash = "sha256:908b71bf3ff37d81073356a5fadcc660eb10c1476ee6e2725588626ce7e5ca38"},
{file = "tornado-6.4.2.tar.gz", hash = "sha256:92bad5b4746e9879fd7bf1eb21dce4e3fc5128d71601f80005afa39237ad620b"},
{file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"},
{file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"},
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"},
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"},
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"},
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"},
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"},
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"},
{file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"},
{file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"},
{file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"},
]
[[package]]
@@ -3134,5 +3121,5 @@ user-search = ["pyicu"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9.0"
content-hash = "170c8bc97d4cd16a38a98ed9d48bef0ef09c994d8b129160ef262306cf689db7"
python-versions = "^3.8.0"
content-hash = "aa1f6d97809596c23a6d160c0c5804971dad0ba49e34b137bbfb79df038fe6f0"

View File

@@ -36,7 +36,7 @@
[tool.ruff]
line-length = 88
target-version = "py39"
target-version = "py38"
[tool.ruff.lint]
# See https://beta.ruff.rs/docs/rules/#error-e
@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
version = "1.121.0rc1"
version = "1.118.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later"
@@ -155,7 +155,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.poetry.dependencies]
python = "^3.9.0"
python = "^3.8.0"
# Mandatory Dependencies
# ----------------------
@@ -178,7 +178,7 @@ Twisted = {extras = ["tls"], version = ">=18.9.0"}
treq = ">=15.1"
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
pyOpenSSL = ">=16.0.0"
PyYAML = ">=5.3"
PyYAML = ">=3.13"
pyasn1 = ">=0.1.9"
pyasn1-modules = ">=0.0.7"
bcrypt = ">=3.1.7"
@@ -241,7 +241,7 @@ authlib = { version = ">=0.15.1", optional = true }
# `contrib/systemd/log_config.yaml`.
# Note: systemd-python 231 appears to have been yanked from pypi
systemd-python = { version = ">=231", optional = true }
lxml = { version = ">=4.5.2", optional = true }
lxml = { version = ">=4.2.0", optional = true }
sentry-sdk = { version = ">=0.7.2", optional = true }
opentracing = { version = ">=2.2.0", optional = true }
jaeger-client = { version = ">=4.0.0", optional = true }
@@ -320,7 +320,7 @@ all = [
# failing on new releases. Keeping lower bounds loose here means that dependabot
# can bump versions without having to update the content-hash in the lockfile.
# This helps prevents merge conflicts when running a batch of dependabot updates.
ruff = "0.7.3"
ruff = "0.7.1"
# Type checking only works with the pydantic.v1 compat module from pydantic v2
pydantic = "^2"
@@ -370,7 +370,7 @@ tomli = ">=1.2.3"
# runtime errors caused by build system changes.
# We are happy to raise these upper bounds upon request,
# provided we check that it's safe to do so (i.e. that CI passes).
requires = ["poetry-core>=1.1.0,<=1.9.1", "setuptools_rust>=1.3,<=1.10.2"]
requires = ["poetry-core>=1.1.0,<=1.9.0", "setuptools_rust>=1.3,<=1.8.1"]
build-backend = "poetry.core.masonry.api"
@@ -378,19 +378,16 @@ build-backend = "poetry.core.masonry.api"
# Skip unsupported platforms (by us or by Rust).
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
# We skip:
# - CPython 3.6, 3.7 and 3.8: EOLed
# - PyPy 3.7 and 3.8: we only support Python 3.9+
# - CPython 3.6 and 3.7: EOLed
# - PyPy 3.7: we only support Python 3.8+
# - musllinux i686: excluded to reduce number of wheels we build.
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
# c.f. https://github.com/matrix-org/synapse/pull/14259
skip = "cp36* cp37* cp38* pp37* pp38* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
# We need a rust compiler.
#
# We temporarily pin Rust to 1.82.0 to work around
# https://github.com/element-hq/synapse/issues/17988
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.82.0 -y --profile minimal"
# We need a rust compiler
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
environment= { PATH = "$PATH:$HOME/.cargo/bin" }
# For some reason if we don't manually clean the build directory we

View File

@@ -30,14 +30,14 @@ http = "1.1.0"
lazy_static = "1.4.0"
log = "0.4.17"
mime = "0.3.17"
pyo3 = { version = "0.23.2", features = [
pyo3 = { version = "0.21.0", features = [
"macros",
"anyhow",
"abi3",
"abi3-py38",
] }
pyo3-log = "0.12.0"
pythonize = "0.23.0"
pyo3-log = "0.10.0"
pythonize = "0.21.0"
regex = "1.6.0"
sha2 = "0.10.8"
serde = { version = "1.0.144", features = ["derive"] }

View File

@@ -32,14 +32,14 @@ use crate::push::utils::{glob_to_regex, GlobMatchType};
/// Called when registering modules with python.
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
let child_module = PyModule::new(py, "acl")?;
let child_module = PyModule::new_bound(py, "acl")?;
child_module.add_class::<ServerAclEvaluator>()?;
m.add_submodule(&child_module)?;
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import acl` work.
py.import("sys")?
py.import_bound("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.acl", child_module)?;

View File

@@ -1,107 +0,0 @@
/*
* This file is licensed under the Affero General Public License (AGPL) version 3.
*
* Copyright (C) 2024 New Vector, Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* See the GNU Affero General Public License for more details:
* <https://www.gnu.org/licenses/agpl-3.0.html>.
*/
use std::collections::HashMap;
use pyo3::{exceptions::PyValueError, pyfunction, PyResult};
use crate::{
identifier::UserID,
matrix_const::{
HISTORY_VISIBILITY_INVITED, HISTORY_VISIBILITY_JOINED, MEMBERSHIP_INVITE, MEMBERSHIP_JOIN,
},
};
#[pyfunction(name = "event_visible_to_server")]
pub fn event_visible_to_server_py(
sender: String,
target_server_name: String,
history_visibility: String,
erased_senders: HashMap<String, bool>,
partial_state_invisible: bool,
memberships: Vec<(String, String)>, // (state_key, membership)
) -> PyResult<bool> {
event_visible_to_server(
sender,
target_server_name,
history_visibility,
erased_senders,
partial_state_invisible,
memberships,
)
.map_err(|e| PyValueError::new_err(format!("{e}")))
}
/// Return whether the target server is allowed to see the event.
///
/// For a fully stated room, the target server is allowed to see an event E if:
/// - the state at E has world readable or shared history vis, OR
/// - the state at E says that the target server is in the room.
///
/// For a partially stated room, the target server is allowed to see E if:
/// - E was created by this homeserver, AND:
/// - the partial state at E has world readable or shared history vis, OR
/// - the partial state at E says that the target server is in the room.
pub fn event_visible_to_server(
sender: String,
target_server_name: String,
history_visibility: String,
erased_senders: HashMap<String, bool>,
partial_state_invisible: bool,
memberships: Vec<(String, String)>, // (state_key, membership)
) -> anyhow::Result<bool> {
if let Some(&erased) = erased_senders.get(&sender) {
if erased {
return Ok(false);
}
}
if partial_state_invisible {
return Ok(false);
}
if history_visibility != HISTORY_VISIBILITY_INVITED
&& history_visibility != HISTORY_VISIBILITY_JOINED
{
return Ok(true);
}
let mut visible = false;
for (state_key, membership) in memberships {
let state_key = UserID::try_from(state_key.as_ref())
.map_err(|e| anyhow::anyhow!(format!("invalid user_id ({state_key}): {e}")))?;
if state_key.server_name() != target_server_name {
return Err(anyhow::anyhow!(
"state_key.server_name ({}) does not match target_server_name ({target_server_name})",
state_key.server_name()
));
}
match membership.as_str() {
MEMBERSHIP_INVITE => {
if history_visibility == HISTORY_VISIBILITY_INVITED {
visible = true;
break;
}
}
MEMBERSHIP_JOIN => {
visible = true;
break;
}
_ => continue,
}
}
Ok(visible)
}

View File

@@ -41,11 +41,9 @@ use pyo3::{
pybacked::PyBackedStr,
pyclass, pymethods,
types::{PyAnyMethods, PyDict, PyDictMethods, PyString},
Bound, IntoPyObject, PyAny, PyObject, PyResult, Python,
Bound, IntoPy, PyAny, PyObject, PyResult, Python,
};
use crate::UnwrapInfallible;
/// Definitions of the various fields of the internal metadata.
#[derive(Clone)]
enum EventInternalMetadataData {
@@ -62,59 +60,31 @@ enum EventInternalMetadataData {
impl EventInternalMetadataData {
/// Convert the field to its name and python object.
fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, Bound<'a, PyAny>) {
fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, PyObject) {
match self {
EventInternalMetadataData::OutOfBandMembership(o) => (
pyo3::intern!(py, "out_of_band_membership"),
o.into_pyobject(py)
.unwrap_infallible()
.to_owned()
.into_any(),
),
EventInternalMetadataData::SendOnBehalfOf(o) => (
pyo3::intern!(py, "send_on_behalf_of"),
o.into_pyobject(py).unwrap_infallible().into_any(),
),
EventInternalMetadataData::RecheckRedaction(o) => (
pyo3::intern!(py, "recheck_redaction"),
o.into_pyobject(py)
.unwrap_infallible()
.to_owned()
.into_any(),
),
EventInternalMetadataData::SoftFailed(o) => (
pyo3::intern!(py, "soft_failed"),
o.into_pyobject(py)
.unwrap_infallible()
.to_owned()
.into_any(),
),
EventInternalMetadataData::ProactivelySend(o) => (
pyo3::intern!(py, "proactively_send"),
o.into_pyobject(py)
.unwrap_infallible()
.to_owned()
.into_any(),
),
EventInternalMetadataData::Redacted(o) => (
pyo3::intern!(py, "redacted"),
o.into_pyobject(py)
.unwrap_infallible()
.to_owned()
.into_any(),
),
EventInternalMetadataData::TxnId(o) => (
pyo3::intern!(py, "txn_id"),
o.into_pyobject(py).unwrap_infallible().into_any(),
),
EventInternalMetadataData::TokenId(o) => (
pyo3::intern!(py, "token_id"),
o.into_pyobject(py).unwrap_infallible().into_any(),
),
EventInternalMetadataData::DeviceId(o) => (
pyo3::intern!(py, "device_id"),
o.into_pyobject(py).unwrap_infallible().into_any(),
),
EventInternalMetadataData::OutOfBandMembership(o) => {
(pyo3::intern!(py, "out_of_band_membership"), o.into_py(py))
}
EventInternalMetadataData::SendOnBehalfOf(o) => {
(pyo3::intern!(py, "send_on_behalf_of"), o.into_py(py))
}
EventInternalMetadataData::RecheckRedaction(o) => {
(pyo3::intern!(py, "recheck_redaction"), o.into_py(py))
}
EventInternalMetadataData::SoftFailed(o) => {
(pyo3::intern!(py, "soft_failed"), o.into_py(py))
}
EventInternalMetadataData::ProactivelySend(o) => {
(pyo3::intern!(py, "proactively_send"), o.into_py(py))
}
EventInternalMetadataData::Redacted(o) => {
(pyo3::intern!(py, "redacted"), o.into_py(py))
}
EventInternalMetadataData::TxnId(o) => (pyo3::intern!(py, "txn_id"), o.into_py(py)),
EventInternalMetadataData::TokenId(o) => (pyo3::intern!(py, "token_id"), o.into_py(py)),
EventInternalMetadataData::DeviceId(o) => {
(pyo3::intern!(py, "device_id"), o.into_py(py))
}
}
}
@@ -277,7 +247,7 @@ impl EventInternalMetadata {
///
/// Note that `outlier` and `stream_ordering` are stored in separate columns so are not returned here.
fn get_dict(&self, py: Python<'_>) -> PyResult<PyObject> {
let dict = PyDict::new(py);
let dict = PyDict::new_bound(py);
for entry in &self.data {
let (key, value) = entry.to_python_pair(py);

View File

@@ -22,23 +22,21 @@
use pyo3::{
types::{PyAnyMethods, PyModule, PyModuleMethods},
wrap_pyfunction, Bound, PyResult, Python,
Bound, PyResult, Python,
};
pub mod filter;
mod internal_metadata;
/// Called when registering modules with python.
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
let child_module = PyModule::new(py, "events")?;
let child_module = PyModule::new_bound(py, "events")?;
child_module.add_class::<internal_metadata::EventInternalMetadata>()?;
child_module.add_function(wrap_pyfunction!(filter::event_visible_to_server_py, m)?)?;
m.add_submodule(&child_module)?;
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import events` work.
py.import("sys")?
py.import_bound("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.events", child_module)?;

View File

@@ -70,7 +70,7 @@ pub fn http_request_from_twisted(request: &Bound<'_, PyAny>) -> PyResult<Request
let headers_iter = request
.getattr("requestHeaders")?
.call_method0("getAllRawHeaders")?
.try_iter()?;
.iter()?;
for header in headers_iter {
let header = header?;

View File

@@ -1,252 +0,0 @@
/*
* This file is licensed under the Affero General Public License (AGPL) version 3.
*
* Copyright (C) 2024 New Vector, Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* See the GNU Affero General Public License for more details:
* <https://www.gnu.org/licenses/agpl-3.0.html>.
*/
//! # Matrix Identifiers
//!
//! This module contains definitions and utilities for working with matrix identifiers.
use std::{fmt, ops::Deref};
/// Errors that can occur when parsing a matrix identifier.
#[derive(Clone, Debug, PartialEq)]
pub enum IdentifierError {
IncorrectSigil,
MissingColon,
}
impl fmt::Display for IdentifierError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
/// A Matrix user_id.
#[derive(Clone, Debug, PartialEq)]
pub struct UserID(String);
impl UserID {
/// Returns the `localpart` of the user_id.
pub fn localpart(&self) -> &str {
&self[1..self.colon_pos()]
}
/// Returns the `server_name` / `domain` of the user_id.
pub fn server_name(&self) -> &str {
&self[self.colon_pos() + 1..]
}
/// Returns the position of the ':' inside of the user_id.
/// Used when splitting the user_id into it's respective parts.
fn colon_pos(&self) -> usize {
self.find(':').unwrap()
}
}
impl TryFrom<&str> for UserID {
type Error = IdentifierError;
/// Will try creating a `UserID` from the provided `&str`.
/// Can fail if the user_id is incorrectly formatted.
fn try_from(s: &str) -> Result<Self, Self::Error> {
if !s.starts_with('@') {
return Err(IdentifierError::IncorrectSigil);
}
if s.find(':').is_none() {
return Err(IdentifierError::MissingColon);
}
Ok(UserID(s.to_string()))
}
}
impl TryFrom<String> for UserID {
type Error = IdentifierError;
/// Will try creating a `UserID` from the provided `&str`.
/// Can fail if the user_id is incorrectly formatted.
fn try_from(s: String) -> Result<Self, Self::Error> {
if !s.starts_with('@') {
return Err(IdentifierError::IncorrectSigil);
}
if s.find(':').is_none() {
return Err(IdentifierError::MissingColon);
}
Ok(UserID(s))
}
}
impl<'de> serde::Deserialize<'de> for UserID {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s: String = serde::Deserialize::deserialize(deserializer)?;
UserID::try_from(s).map_err(serde::de::Error::custom)
}
}
impl Deref for UserID {
type Target = str;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Display for UserID {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
/// A Matrix room_id.
#[derive(Clone, Debug, PartialEq)]
pub struct RoomID(String);
impl RoomID {
/// Returns the `localpart` of the room_id.
pub fn localpart(&self) -> &str {
&self[1..self.colon_pos()]
}
/// Returns the `server_name` / `domain` of the room_id.
pub fn server_name(&self) -> &str {
&self[self.colon_pos() + 1..]
}
/// Returns the position of the ':' inside of the room_id.
/// Used when splitting the room_id into it's respective parts.
fn colon_pos(&self) -> usize {
self.find(':').unwrap()
}
}
impl TryFrom<&str> for RoomID {
type Error = IdentifierError;
/// Will try creating a `RoomID` from the provided `&str`.
/// Can fail if the room_id is incorrectly formatted.
fn try_from(s: &str) -> Result<Self, Self::Error> {
if !s.starts_with('!') {
return Err(IdentifierError::IncorrectSigil);
}
if s.find(':').is_none() {
return Err(IdentifierError::MissingColon);
}
Ok(RoomID(s.to_string()))
}
}
impl TryFrom<String> for RoomID {
type Error = IdentifierError;
/// Will try creating a `RoomID` from the provided `String`.
/// Can fail if the room_id is incorrectly formatted.
fn try_from(s: String) -> Result<Self, Self::Error> {
if !s.starts_with('!') {
return Err(IdentifierError::IncorrectSigil);
}
if s.find(':').is_none() {
return Err(IdentifierError::MissingColon);
}
Ok(RoomID(s))
}
}
impl<'de> serde::Deserialize<'de> for RoomID {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s: String = serde::Deserialize::deserialize(deserializer)?;
RoomID::try_from(s).map_err(serde::de::Error::custom)
}
}
impl Deref for RoomID {
type Target = str;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Display for RoomID {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
/// A Matrix event_id.
#[derive(Clone, Debug, PartialEq)]
pub struct EventID(String);
impl TryFrom<&str> for EventID {
type Error = IdentifierError;
/// Will try creating a `EventID` from the provided `&str`.
/// Can fail if the event_id is incorrectly formatted.
fn try_from(s: &str) -> Result<Self, Self::Error> {
if !s.starts_with('$') {
return Err(IdentifierError::IncorrectSigil);
}
Ok(EventID(s.to_string()))
}
}
impl TryFrom<String> for EventID {
type Error = IdentifierError;
/// Will try creating a `EventID` from the provided `String`.
/// Can fail if the event_id is incorrectly formatted.
fn try_from(s: String) -> Result<Self, Self::Error> {
if !s.starts_with('$') {
return Err(IdentifierError::IncorrectSigil);
}
Ok(EventID(s))
}
}
impl<'de> serde::Deserialize<'de> for EventID {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s: String = serde::Deserialize::deserialize(deserializer)?;
EventID::try_from(s).map_err(serde::de::Error::custom)
}
}
impl Deref for EventID {
type Target = str;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Display for EventID {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}

View File

@@ -1,5 +1,3 @@
use std::convert::Infallible;
use lazy_static::lazy_static;
use pyo3::prelude::*;
use pyo3_log::ResetHandle;
@@ -8,8 +6,6 @@ pub mod acl;
pub mod errors;
pub mod events;
pub mod http;
pub mod identifier;
pub mod matrix_const;
pub mod push;
pub mod rendezvous;
@@ -54,16 +50,3 @@ fn synapse_rust(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
Ok(())
}
pub trait UnwrapInfallible<T> {
fn unwrap_infallible(self) -> T;
}
impl<T> UnwrapInfallible<T> for Result<T, Infallible> {
fn unwrap_infallible(self) -> T {
match self {
Ok(val) => val,
Err(never) => match never {},
}
}
}

View File

@@ -1,28 +0,0 @@
/*
* This file is licensed under the Affero General Public License (AGPL) version 3.
*
* Copyright (C) 2024 New Vector, Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* See the GNU Affero General Public License for more details:
* <https://www.gnu.org/licenses/agpl-3.0.html>.
*/
//! # Matrix Constants
//!
//! This module contains definitions for constant values described by the matrix specification.
pub const HISTORY_VISIBILITY_WORLD_READABLE: &str = "world_readable";
pub const HISTORY_VISIBILITY_SHARED: &str = "shared";
pub const HISTORY_VISIBILITY_INVITED: &str = "invited";
pub const HISTORY_VISIBILITY_JOINED: &str = "joined";
pub const MEMBERSHIP_BAN: &str = "ban";
pub const MEMBERSHIP_LEAVE: &str = "leave";
pub const MEMBERSHIP_KNOCK: &str = "knock";
pub const MEMBERSHIP_INVITE: &str = "invite";
pub const MEMBERSHIP_JOIN: &str = "join";

View File

@@ -167,7 +167,6 @@ impl PushRuleEvaluator {
///
/// Returns the set of actions, if any, that match (filtering out any
/// `dont_notify` and `coalesce` actions).
#[pyo3(signature = (push_rules, user_id=None, display_name=None))]
pub fn run(
&self,
push_rules: &FilteredPushRules,
@@ -237,7 +236,6 @@ impl PushRuleEvaluator {
}
/// Check if the given condition matches.
#[pyo3(signature = (condition, user_id=None, display_name=None))]
fn matches(
&self,
condition: Condition,

View File

@@ -65,8 +65,8 @@ use anyhow::{Context, Error};
use log::warn;
use pyo3::exceptions::PyTypeError;
use pyo3::prelude::*;
use pyo3::types::{PyBool, PyInt, PyList, PyString};
use pythonize::{depythonize, pythonize, PythonizeError};
use pyo3::types::{PyBool, PyList, PyLong, PyString};
use pythonize::{depythonize_bound, pythonize};
use serde::de::Error as _;
use serde::{Deserialize, Serialize};
use serde_json::Value;
@@ -79,7 +79,7 @@ pub mod utils;
/// Called when registering modules with python.
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
let child_module = PyModule::new(py, "push")?;
let child_module = PyModule::new_bound(py, "push")?;
child_module.add_class::<PushRule>()?;
child_module.add_class::<PushRules>()?;
child_module.add_class::<FilteredPushRules>()?;
@@ -90,7 +90,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()>
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import push` work.
py.import("sys")?
py.import_bound("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.push", child_module)?;
@@ -182,16 +182,12 @@ pub enum Action {
Unknown(Value),
}
impl<'py> IntoPyObject<'py> for Action {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PythonizeError;
fn into_pyobject(self, py: Python<'py>) -> Result<Self::Output, Self::Error> {
impl IntoPy<PyObject> for Action {
fn into_py(self, py: Python<'_>) -> PyObject {
// When we pass the `Action` struct to Python we want it to be converted
// to a dict. We use `pythonize`, which converts the struct using the
// `serde` serialization.
pythonize(py, &self)
pythonize(py, &self).expect("valid action")
}
}
@@ -274,13 +270,13 @@ pub enum SimpleJsonValue {
}
impl<'source> FromPyObject<'source> for SimpleJsonValue {
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
if let Ok(s) = ob.downcast::<PyString>() {
Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string())))
// A bool *is* an int, ensure we try bool first.
} else if let Ok(b) = ob.downcast::<PyBool>() {
Ok(SimpleJsonValue::Bool(b.extract()?))
} else if let Ok(i) = ob.downcast::<PyInt>() {
} else if let Ok(i) = ob.downcast::<PyLong>() {
Ok(SimpleJsonValue::Int(i.extract()?))
} else if ob.is_none() {
Ok(SimpleJsonValue::Null)
@@ -302,19 +298,15 @@ pub enum JsonValue {
}
impl<'source> FromPyObject<'source> for JsonValue {
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
if let Ok(l) = ob.downcast::<PyList>() {
match l
.iter()
.map(|it| SimpleJsonValue::extract_bound(&it))
.collect()
{
match l.iter().map(SimpleJsonValue::extract).collect() {
Ok(a) => Ok(JsonValue::Array(a)),
Err(e) => Err(PyTypeError::new_err(format!(
"Can't convert to JsonValue::Array: {e}"
))),
}
} else if let Ok(v) = SimpleJsonValue::extract_bound(ob) {
} else if let Ok(v) = SimpleJsonValue::extract(ob) {
Ok(JsonValue::Value(v))
} else {
Err(PyTypeError::new_err(format!(
@@ -371,19 +363,15 @@ pub enum KnownCondition {
},
}
impl<'source> IntoPyObject<'source> for Condition {
type Target = PyAny;
type Output = Bound<'source, Self::Target>;
type Error = PythonizeError;
fn into_pyobject(self, py: Python<'source>) -> Result<Self::Output, Self::Error> {
pythonize(py, &self)
impl IntoPy<PyObject> for Condition {
fn into_py(self, py: Python<'_>) -> PyObject {
pythonize(py, &self).expect("valid condition")
}
}
impl<'source> FromPyObject<'source> for Condition {
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
Ok(depythonize(ob)?)
Ok(depythonize_bound(ob.clone())?)
}
}

View File

@@ -23,6 +23,7 @@ use anyhow::bail;
use anyhow::Context;
use anyhow::Error;
use lazy_static::lazy_static;
use regex;
use regex::Regex;
use regex::RegexBuilder;

View File

@@ -29,7 +29,7 @@ use pyo3::{
exceptions::PyValueError,
pyclass, pymethods,
types::{PyAnyMethods, PyModule, PyModuleMethods},
Bound, IntoPyObject, Py, PyAny, PyObject, PyResult, Python,
Bound, Py, PyAny, PyObject, PyResult, Python, ToPyObject,
};
use ulid::Ulid;
@@ -37,7 +37,6 @@ use self::session::Session;
use crate::{
errors::{NotFoundError, SynapseError},
http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt},
UnwrapInfallible,
};
mod session;
@@ -126,11 +125,7 @@ impl RendezvousHandler {
let base = Uri::try_from(format!("{base}_synapse/client/rendezvous"))
.map_err(|_| PyValueError::new_err("Invalid base URI"))?;
let clock = homeserver
.call_method0("get_clock")?
.into_pyobject(py)
.unwrap_infallible()
.unbind();
let clock = homeserver.call_method0("get_clock")?.to_object(py);
// Construct a Python object so that we can get a reference to the
// evict method and schedule it to run.
@@ -293,13 +288,6 @@ impl RendezvousHandler {
let mut response = Response::new(Bytes::new());
*response.status_mut() = StatusCode::ACCEPTED;
prepare_headers(response.headers_mut(), session);
// Even though this isn't mandated by the MSC, we set a Content-Type on the response. It
// doesn't do any harm as the body is empty, but this helps escape a bug in some reverse
// proxy/cache setup which strips the ETag header if there is no Content-Type set.
// Specifically, we noticed this behaviour when placing Synapse behind Cloudflare.
response.headers_mut().typed_insert(ContentType::text());
http_response_to_twisted(twisted_request, response)?;
Ok(())
@@ -323,7 +311,7 @@ impl RendezvousHandler {
}
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
let child_module = PyModule::new(py, "rendezvous")?;
let child_module = PyModule::new_bound(py, "rendezvous")?;
child_module.add_class::<RendezvousHandler>()?;
@@ -331,7 +319,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()>
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import rendezvous` work.
py.import("sys")?
py.import_bound("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.rendezvous", child_module)?;

View File

@@ -28,8 +28,9 @@ from typing import Collection, Optional, Sequence, Set
# example)
DISTS = (
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
"debian:bookworm", # (EOL 2026-06) (our EOL forced by Python 3.11 is 2027-10-24)
"debian:sid", # (rolling distro, no EOL)
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
"ubuntu:noble", # 24.04 LTS (EOL 2029-06)
"ubuntu:oracular", # 24.10 (EOL 2025-07)

View File

@@ -195,10 +195,6 @@ if [ -z "$skip_docker_build" ]; then
# Build the unified Complement image (from the worker Synapse image we just built).
echo_if_github "::group::Build Docker image: complement/Dockerfile"
$CONTAINER_RUNTIME build -t complement-synapse \
`# This is the tag we end up pushing to the registry (see` \
`# .github/workflows/push_complement_image.yml) so let's just label it now` \
`# so people can reference it by the same name locally.` \
-t ghcr.io/element-hq/synapse/complement-synapse \
-f "docker/complement/Dockerfile" "docker/complement"
echo_if_github "::endgroup::"

View File

@@ -39,8 +39,8 @@ ImageFile.LOAD_TRUNCATED_IMAGES = True
# Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the
# if-statement completely.
py_version = sys.version_info
if py_version < (3, 9):
print("Synapse requires Python 3.9 or above.")
if py_version < (3, 8):
print("Synapse requires Python 3.8 or above.")
sys.exit(1)
# Allow using the asyncio reactor via env var.

View File

@@ -88,7 +88,6 @@ from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomBackgroundUpdateStore
from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore
from synapse.storage.databases.main.search import SearchBackgroundUpdateStore
from synapse.storage.databases.main.sliding_sync import SlidingSyncStore
from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore
from synapse.storage.databases.main.stats import StatsStore
from synapse.storage.databases.main.user_directory import (
@@ -256,7 +255,6 @@ class Store(
ReceiptsBackgroundUpdateStore,
RelationsWorkerStore,
EventFederationWorkerStore,
SlidingSyncStore,
):
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)

View File

@@ -231,8 +231,6 @@ class EventContentFields:
ROOM_NAME: Final = "name"
MEMBERSHIP: Final = "membership"
MEMBERSHIP_DISPLAYNAME: Final = "displayname"
MEMBERSHIP_AVATAR_URL: Final = "avatar_url"
# Used in m.room.guest_access events.
GUEST_ACCESS: Final = "guest_access"

View File

@@ -87,7 +87,8 @@ class Codes(str, Enum):
WEAK_PASSWORD = "M_WEAK_PASSWORD"
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
USER_DEACTIVATED = "M_USER_DEACTIVATED"
USER_LOCKED = "M_USER_LOCKED"
# USER_LOCKED = "M_USER_LOCKED"
USER_LOCKED = "ORG_MATRIX_MSC3939_USER_LOCKED"
NOT_YET_UPLOADED = "M_NOT_YET_UPLOADED"
CANNOT_OVERWRITE_MEDIA = "M_CANNOT_OVERWRITE_MEDIA"
@@ -100,9 +101,8 @@ class Codes(str, Enum):
# The account has been suspended on the server.
# By opposition to `USER_DEACTIVATED`, this is a reversible measure
# that can possibly be appealed and reverted.
# Introduced by MSC3823
# https://github.com/matrix-org/matrix-spec-proposals/pull/3823
USER_ACCOUNT_SUSPENDED = "M_USER_SUSPENDED"
# Part of MSC3823.
USER_ACCOUNT_SUSPENDED = "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED"
BAD_ALIAS = "M_BAD_ALIAS"
# For restricted join rules.

View File

@@ -23,8 +23,7 @@
import hmac
from hashlib import sha256
from typing import Optional
from urllib.parse import urlencode, urljoin
from urllib.parse import urlencode
from synapse.config import ConfigError
from synapse.config.homeserver import HomeServerConfig
@@ -67,42 +66,3 @@ class ConsentURIBuilder:
urlencode({"u": user_id, "h": mac}),
)
return consent_uri
class LoginSSORedirectURIBuilder:
def __init__(self, hs_config: HomeServerConfig):
self._public_baseurl = hs_config.server.public_baseurl
def build_login_sso_redirect_uri(
self, *, idp_id: Optional[str], client_redirect_url: str
) -> str:
"""Build a `/login/sso/redirect` URI for the given identity provider.
Builds `/_matrix/client/v3/login/sso/redirect/{idpId}?redirectUrl=xxx` when `idp_id` is specified.
Otherwise, builds `/_matrix/client/v3/login/sso/redirect?redirectUrl=xxx` when `idp_id` is `None`.
Args:
idp_id: Optional ID of the identity provider
client_redirect_url: URL to redirect the user to after login
Returns
The URI to follow when choosing a specific identity provider.
"""
base_url = urljoin(
self._public_baseurl,
f"{CLIENT_API_PREFIX}/v3/login/sso/redirect",
)
serialized_query_parameters = urlencode({"redirectUrl": client_redirect_url})
if idp_id:
resultant_url = urljoin(
# We have to add a trailing slash to the base URL to ensure that the
# last path segment is not stripped away when joining with another path.
f"{base_url}/",
f"{idp_id}?{serialized_query_parameters}",
)
else:
resultant_url = f"{base_url}?{serialized_query_parameters}"
return resultant_url

View File

@@ -87,7 +87,6 @@ class ApplicationService:
ip_range_whitelist: Optional[IPSet] = None,
supports_ephemeral: bool = False,
msc3202_transaction_extensions: bool = False,
msc4190_device_management: bool = False,
):
self.token = token
self.url = (
@@ -101,7 +100,6 @@ class ApplicationService:
self.ip_range_whitelist = ip_range_whitelist
self.supports_ephemeral = supports_ephemeral
self.msc3202_transaction_extensions = msc3202_transaction_extensions
self.msc4190_device_management = msc4190_device_management
if "|" in self.id:
raise Exception("application service ID cannot contain '|' character")

View File

@@ -183,18 +183,6 @@ def _load_appservice(
"The `org.matrix.msc3202` option should be true or false if specified."
)
# Opt-in flag for the MSC4190 behaviours.
# When enabled, the following C-S API endpoints change for appservices:
# - POST /register does not return an access token
# - PUT /devices/{device_id} creates a new device if one does not exist
# - DELETE /devices/{device_id} no longer requires UIA
# - POST /delete_devices/{device_id} no longer requires UIA
msc4190_enabled = as_info.get("io.element.msc4190", False)
if not isinstance(msc4190_enabled, bool):
raise ValueError(
"The `io.element.msc4190` option should be true or false if specified."
)
return ApplicationService(
token=as_info["as_token"],
url=as_info["url"],
@@ -207,5 +195,4 @@ def _load_appservice(
ip_range_whitelist=ip_range_whitelist,
supports_ephemeral=supports_ephemeral,
msc3202_transaction_extensions=msc3202_transaction_extensions,
msc4190_device_management=msc4190_enabled,
)

View File

@@ -20,7 +20,7 @@
#
#
from typing import Any, List, Optional
from typing import Any, List
from synapse.config.sso import SsoAttributeRequirement
from synapse.types import JsonDict
@@ -46,9 +46,7 @@ class CasConfig(Config):
# TODO Update this to a _synapse URL.
public_baseurl = self.root.server.public_baseurl
self.cas_service_url: Optional[str] = (
public_baseurl + "_matrix/client/r0/login/cas/ticket"
)
self.cas_service_url = public_baseurl + "_matrix/client/r0/login/cas/ticket"
self.cas_protocol_version = cas_config.get("protocol_version")
if (

View File

@@ -365,6 +365,11 @@ class ExperimentalConfig(Config):
# MSC3874: Filtering /messages with rel_types / not_rel_types.
self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False)
# MSC3886: Simple client rendezvous capability
self.msc3886_endpoint: Optional[str] = experimental.get(
"msc3886_endpoint", None
)
# MSC3890: Remotely silence local notifications
# Note: This option requires "experimental_features.msc3391_enabled" to be
# set to "true", in order to communicate account data deletions to clients.
@@ -436,14 +441,12 @@ class ExperimentalConfig(Config):
("experimental", "msc4108_delegation_endpoint"),
)
self.msc3823_account_suspension = experimental.get(
"msc3823_account_suspension", False
)
# MSC4151: Report room API (Client-Server API)
self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
# MSC4210: Remove legacy mentions
self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False)
# MSC4222: Adding `state_after` to sync v2
self.msc4222_enabled: bool = experimental.get("msc4222_enabled", False)
# MSC4076: Add `disable_badge_count`` to pusher configuration
self.msc4076_enabled: bool = experimental.get("msc4076_enabled", False)

View File

@@ -360,6 +360,5 @@ def setup_logging(
"Licensed under the AGPL 3.0 license. Website: https://github.com/element-hq/synapse"
)
logging.info("Server hostname: %s", config.server.server_name)
logging.info("Public Base URL: %s", config.server.public_baseurl)
logging.info("Instance name: %s", hs.get_instance_name())
logging.info("Twisted reactor: %s", type(reactor).__name__)

View File

@@ -22,7 +22,7 @@
import logging
import os
from typing import Any, Dict, List, Tuple
from urllib.request import getproxies_environment
from urllib.request import getproxies_environment # type: ignore
import attr
@@ -272,7 +272,9 @@ class ContentRepositoryConfig(Config):
remote_media_lifetime
)
self.enable_authenticated_media = config.get("enable_authenticated_media", True)
self.enable_authenticated_media = config.get(
"enable_authenticated_media", False
)
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
assert data_dir_path is not None

View File

@@ -215,6 +215,9 @@ class HttpListenerConfig:
additional_resources: Dict[str, dict] = attr.Factory(dict)
tag: Optional[str] = None
request_id_header: Optional[str] = None
# If true, the listener will return CORS response headers compatible with MSC3886:
# https://github.com/matrix-org/matrix-spec-proposals/pull/3886
experimental_cors_msc3886: bool = False
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -332,14 +335,8 @@ class ServerConfig(Config):
logger.info("Using default public_baseurl %s", public_baseurl)
else:
self.serve_client_wellknown = True
# Ensure that public_baseurl ends with a trailing slash
if public_baseurl[-1] != "/":
public_baseurl += "/"
# Scrutinize user-provided config
if not isinstance(public_baseurl, str):
raise ConfigError("Must be a string", ("public_baseurl",))
self.public_baseurl = public_baseurl
# check that public_baseurl is valid
@@ -1007,6 +1004,7 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
additional_resources=listener.get("additional_resources", {}),
tag=listener.get("tag"),
request_id_header=listener.get("request_id_header"),
experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
)
if socket_path:

View File

@@ -140,6 +140,7 @@ from typing import (
Iterable,
List,
Optional,
Set,
Tuple,
)
@@ -169,13 +170,7 @@ from synapse.metrics.background_process_metrics import (
run_as_background_process,
wrap_as_background_process,
)
from synapse.types import (
JsonDict,
ReadReceipt,
RoomStreamToken,
StrCollection,
get_domain_from_id,
)
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection
from synapse.util import Clock
from synapse.util.metrics import Measure
from synapse.util.retryutils import filter_destinations_by_retry_limiter
@@ -302,10 +297,12 @@ class _DestinationWakeupQueue:
# being woken up.
_MAX_TIME_IN_QUEUE = 30.0
# The maximum duration in seconds between waking up consecutive destination
# queues.
_MAX_DELAY = 0.1
sender: "FederationSender" = attr.ib()
clock: Clock = attr.ib()
max_delay_s: int = attr.ib()
queue: "OrderedDict[str, Literal[None]]" = attr.ib(factory=OrderedDict)
processing: bool = attr.ib(default=False)
@@ -335,7 +332,7 @@ class _DestinationWakeupQueue:
# We also add an upper bound to the delay, to gracefully handle the
# case where the queue only has a few entries in it.
current_sleep_seconds = min(
self.max_delay_s, self._MAX_TIME_IN_QUEUE / len(self.queue)
self._MAX_DELAY, self._MAX_TIME_IN_QUEUE / len(self.queue)
)
while self.queue:
@@ -419,15 +416,20 @@ class FederationSender(AbstractFederationSender):
self._is_processing = False
self._last_poked_id = -1
self._external_cache = hs.get_external_cache()
# map from room_id to a set of PerDestinationQueues which we believe are
# awaiting a call to flush_read_receipts_for_room. The presence of an entry
# here for a given room means that we are rate-limiting RR flushes to that room,
# and that there is a pending call to _flush_rrs_for_room in the system.
self._queues_awaiting_rr_flush_by_room: Dict[str, Set[PerDestinationQueue]] = {}
rr_txn_interval_per_room_s = (
1.0 / hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
)
self._destination_wakeup_queue = _DestinationWakeupQueue(
self, self.clock, max_delay_s=rr_txn_interval_per_room_s
self._rr_txn_interval_per_room_ms = (
1000.0
/ hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
)
self._external_cache = hs.get_external_cache()
self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock)
# Regularly wake up destinations that have outstanding PDUs to be caught up
self.clock.looping_call_now(
run_as_background_process,
@@ -743,48 +745,37 @@ class FederationSender(AbstractFederationSender):
# Some background on the rate-limiting going on here.
#
# It turns out that if we attempt to send out RRs as soon as we get them
# from a client, then we end up trying to do several hundred Hz of
# federation transactions. (The number of transactions scales as O(N^2)
# on the size of a room, since in a large room we have both more RRs
# coming in, and more servers to send them to.)
# It turns out that if we attempt to send out RRs as soon as we get them from
# a client, then we end up trying to do several hundred Hz of federation
# transactions. (The number of transactions scales as O(N^2) on the size of a
# room, since in a large room we have both more RRs coming in, and more servers
# to send them to.)
#
# This leads to a lot of CPU load, and we end up getting behind. The
# solution currently adopted is to differentiate between receipts and
# destinations we should immediately send to, and those we can trickle
# the receipts to.
# This leads to a lot of CPU load, and we end up getting behind. The solution
# currently adopted is as follows:
#
# The current logic is to send receipts out immediately if:
# - the room is "small", i.e. there's only N servers to send receipts
# to, and so sending out the receipts immediately doesn't cause too
# much load; or
# - the receipt is for an event that happened recently, as users
# notice if receipts are delayed when they know other users are
# currently reading the room; or
# - the receipt is being sent to the server that sent the event, so
# that users see receipts for their own receipts quickly.
# The first receipt in a given room is sent out immediately, at time T0. Any
# further receipts are, in theory, batched up for N seconds, where N is calculated
# based on the number of servers in the room to achieve a transaction frequency
# of around 50Hz. So, for example, if there were 100 servers in the room, then
# N would be 100 / 50Hz = 2 seconds.
#
# For destinations that we should delay sending the receipt to, we queue
# the receipts up to be sent in the next transaction, but don't trigger
# a new transaction to be sent. We then add the destination to the
# `DestinationWakeupQueue`, which will slowly iterate over each
# destination and trigger a new transaction to be sent.
# Then, after T+N, we flush out any receipts that have accumulated, and restart
# the timer to flush out more receipts at T+2N, etc. If no receipts accumulate,
# we stop the cycle and go back to the start.
#
# However, in practice, it is often possible to send out delayed
# receipts earlier: in particular, if we are sending a transaction to a
# given server anyway (for example, because we have a PDU or a RR in
# another room to send), then we may as well send out all of the pending
# RRs for that server. So it may be that by the time we get to waking up
# the destination, we don't actually have any RRs left to send out.
# However, in practice, it is often possible to flush out receipts earlier: in
# particular, if we are sending a transaction to a given server anyway (for
# example, because we have a PDU or a RR in another room to send), then we may
# as well send out all of the pending RRs for that server. So it may be that
# by the time we get to T+N, we don't actually have any RRs left to send out.
# Nevertheless we continue to buffer up RRs for the room in question until we
# reach the point that no RRs arrive between timer ticks.
#
# For even more background, see
# https://github.com/matrix-org/synapse/issues/4730.
# For even more background, see https://github.com/matrix-org/synapse/issues/4730.
room_id = receipt.room_id
# Local read receipts always have 1 event ID.
event_id = receipt.event_ids[0]
# Work out which remote servers should be poked and poke them.
domains_set = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
@@ -806,51 +797,49 @@ class FederationSender(AbstractFederationSender):
if not domains:
return
# We now split which domains we want to wake up immediately vs which we
# want to delay waking up.
immediate_domains: StrCollection
delay_domains: StrCollection
queues_pending_flush = self._queues_awaiting_rr_flush_by_room.get(room_id)
if len(domains) < 10:
# For "small" rooms send to all domains immediately
immediate_domains = domains
delay_domains = ()
# if there is no flush yet scheduled, we will send out these receipts with
# immediate flushes, and schedule the next flush for this room.
if queues_pending_flush is not None:
logger.debug("Queuing receipt for: %r", domains)
else:
metadata = await self.store.get_metadata_for_event(
receipt.room_id, event_id
)
assert metadata is not None
logger.debug("Sending receipt to: %r", domains)
self._schedule_rr_flush_for_room(room_id, len(domains))
sender_domain = get_domain_from_id(metadata.sender)
for domain in domains:
queue = self._get_per_destination_queue(domain)
queue.queue_read_receipt(receipt)
if self.clock.time_msec() - metadata.received_ts < 60_000:
# We always send receipts for recent messages immediately
immediate_domains = domains
delay_domains = ()
# if there is already a RR flush pending for this room, then make sure this
# destination is registered for the flush
if queues_pending_flush is not None:
queues_pending_flush.add(queue)
else:
# Otherwise, we delay waking up all destinations except for the
# sender's domain.
immediate_domains = []
delay_domains = []
for domain in domains:
if domain == sender_domain:
immediate_domains.append(domain)
else:
delay_domains.append(domain)
queue.flush_read_receipts_for_room(room_id)
for domain in immediate_domains:
# Add to destination queue and wake the destination up
queue = self._get_per_destination_queue(domain)
queue.queue_read_receipt(receipt)
queue.attempt_new_transaction()
def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None:
# that is going to cause approximately len(domains) transactions, so now back
# off for that multiplied by RR_TXN_INTERVAL_PER_ROOM
backoff_ms = self._rr_txn_interval_per_room_ms * n_domains
for domain in delay_domains:
# Add to destination queue...
queue = self._get_per_destination_queue(domain)
queue.queue_read_receipt(receipt)
logger.debug("Scheduling RR flush in %s in %d ms", room_id, backoff_ms)
self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id)
self._queues_awaiting_rr_flush_by_room[room_id] = set()
# ... and schedule the destination to be woken up.
self._destination_wakeup_queue.add_to_queue(domain)
def _flush_rrs_for_room(self, room_id: str) -> None:
queues = self._queues_awaiting_rr_flush_by_room.pop(room_id)
logger.debug("Flushing RRs in %s to %s", room_id, queues)
if not queues:
# no more RRs arrived for this room; we are done.
return
# schedule the next flush
self._schedule_rr_flush_for_room(room_id, len(queues))
for queue in queues:
queue.flush_read_receipts_for_room(room_id)
async def send_presence_to_destinations(
self, states: Iterable[UserPresenceState], destinations: Iterable[str]

View File

@@ -156,6 +156,7 @@ class PerDestinationQueue:
# Each receipt can only have a single receipt per
# (room ID, receipt type, user ID, thread ID) tuple.
self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = []
self._rrs_pending_flush = False
# stream_id of last successfully sent to-device message.
# NB: may be a long or an int.
@@ -257,7 +258,15 @@ class PerDestinationQueue:
}
)
self.mark_new_data()
def flush_read_receipts_for_room(self, room_id: str) -> None:
# If there are any pending receipts for this room then force-flush them
# in a new transaction.
for edu in self._pending_receipt_edus:
if room_id in edu:
self._rrs_pending_flush = True
self.attempt_new_transaction()
# No use in checking remaining EDUs if the room was found.
break
def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
self._pending_edus_keyed[(edu.edu_type, key)] = edu
@@ -594,9 +603,12 @@ class PerDestinationQueue:
self._destination, last_successful_stream_ordering
)
def _get_receipt_edus(self, limit: int) -> Iterable[Edu]:
def _get_receipt_edus(self, force_flush: bool, limit: int) -> Iterable[Edu]:
if not self._pending_receipt_edus:
return
if not force_flush and not self._rrs_pending_flush:
# not yet time for this lot
return
# Send at most limit EDUs for receipts.
for content in self._pending_receipt_edus[:limit]:
@@ -735,7 +747,7 @@ class _TransactionQueueManager:
)
# Add read receipt EDUs.
pending_edus.extend(self.queue._get_receipt_edus(limit=5))
pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
edu_limit = MAX_EDUS_PER_TRANSACTION - len(pending_edus)
# Next, prioritize to-device messages so that existing encryption channels
@@ -783,6 +795,13 @@ class _TransactionQueueManager:
if not self._pdus and not pending_edus:
return [], []
# if we've decided to send a transaction anyway, and we have room, we
# may as well send any pending RRs
if edu_limit:
pending_edus.extend(
self.queue._get_receipt_edus(force_flush=True, limit=edu_limit)
)
if self._pdus:
self._last_stream_ordering = self._pdus[
-1

View File

@@ -509,9 +509,6 @@ class FederationV2InviteServlet(BaseFederationServerServlet):
event = content["event"]
invite_room_state = content.get("invite_room_state", [])
if not isinstance(invite_room_state, list):
invite_room_state = []
# Synapse expects invite_room_state to be in unsigned, as it is in v1
# API

View File

@@ -124,7 +124,6 @@ class AdminHandler:
"consent_ts": user_info.consent_ts,
"user_type": user_info.user_type,
"is_guest": user_info.is_guest,
"suspended": user_info.suspended,
}
if self._msc3866_enabled:

View File

@@ -896,10 +896,10 @@ class ApplicationServicesHandler:
results = await make_deferred_yieldable(
defer.DeferredList(
[
run_in_background( # type: ignore[call-overload]
run_in_background(
self.appservice_api.claim_client_keys,
# We know this must be an app service.
self.store.get_app_service_by_id(service_id),
self.store.get_app_service_by_id(service_id), # type: ignore[arg-type]
service_query,
)
for service_id, service_query in query_by_appservice.items()
@@ -952,10 +952,10 @@ class ApplicationServicesHandler:
results = await make_deferred_yieldable(
defer.DeferredList(
[
run_in_background( # type: ignore[call-overload]
run_in_background(
self.appservice_api.query_keys,
# We know this must be an app service.
self.store.get_app_service_by_id(service_id),
self.store.get_app_service_by_id(service_id), # type: ignore[arg-type]
service_query,
)
for service_id, service_query in query_by_appservice.items()

View File

@@ -729,40 +729,6 @@ class DeviceHandler(DeviceWorkerHandler):
await self.notify_device_update(user_id, device_ids)
async def upsert_device(
self, user_id: str, device_id: str, display_name: Optional[str] = None
) -> bool:
"""Create or update a device
Args:
user_id: The user to update devices of.
device_id: The device to update.
display_name: The new display name for this device.
Returns:
True if the device was created, False if it was updated.
"""
# Reject a new displayname which is too long.
self._check_device_name_length(display_name)
created = await self.store.store_device(
user_id,
device_id,
initial_device_display_name=display_name,
)
if not created:
await self.store.update_device(
user_id,
device_id,
new_display_name=display_name,
)
await self.notify_device_update(user_id, [device_id])
return created
async def update_device(self, user_id: str, device_id: str, content: dict) -> None:
"""Update the given device

View File

@@ -39,8 +39,6 @@ from synapse.replication.http.devices import ReplicationUploadKeysForUserRestSer
from synapse.types import (
JsonDict,
JsonMapping,
ScheduledTask,
TaskStatus,
UserID,
get_domain_from_id,
get_verify_key_from_cross_signing_key,
@@ -72,7 +70,6 @@ class E2eKeysHandler:
self.is_mine = hs.is_mine
self.clock = hs.get_clock()
self._worker_lock_handler = hs.get_worker_locks_handler()
self._task_scheduler = hs.get_task_scheduler()
federation_registry = hs.get_federation_registry()
@@ -119,10 +116,6 @@ class E2eKeysHandler:
hs.config.experimental.msc3984_appservice_key_query
)
self._task_scheduler.register_action(
self._delete_old_one_time_keys_task, "delete_old_otks"
)
@trace
@cancellable
async def query_devices(
@@ -622,7 +615,7 @@ class E2eKeysHandler:
3. Attempt to fetch fallback keys from the database.
Args:
local_query: An iterable of tuples of (user ID, device ID, algorithm, number of keys).
local_query: An iterable of tuples of (user ID, device ID, algorithm).
always_include_fallback_keys: True to always include fallback keys.
Returns:
@@ -1581,45 +1574,6 @@ class E2eKeysHandler:
return True
return False
async def _delete_old_one_time_keys_task(
self, task: ScheduledTask
) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]:
"""Scheduler task to delete old one time keys.
Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility
that it could still have old OTKs that the client has dropped. This task is scheduled exactly once
by a database schema delta file, and it clears out old one-time-keys that look like they came from libolm.
"""
last_user = task.result.get("from_user", "") if task.result else ""
while True:
# We process users in batches of 100
users, rowcount = await self.store.delete_old_otks_for_next_user_batch(
last_user, 100
)
if len(users) == 0:
# We're done!
return TaskStatus.COMPLETE, None, None
logger.debug(
"Deleted %i old one-time-keys for users '%s'..'%s'",
rowcount,
users[0],
users[-1],
)
last_user = users[-1]
# Store our progress
await self._task_scheduler.update_task(
task.id, result={"from_user": last_user}
)
# Sleep a little before doing the next user.
#
# matrix.org has about 15M users in the e2e_one_time_keys_json table
# (comprising 20M devices). We want this to take about a week, so we need
# to do about one batch of 100 users every 4 seconds.
await self.clock.sleep(4)
def _check_cross_signing_key(
key: JsonDict, user_id: str, key_type: str, signing_key: Optional[VerifyKey] = None

View File

@@ -880,9 +880,6 @@ class FederationHandler:
if stripped_room_state is None:
raise KeyError("Missing 'knock_room_state' field in send_knock response")
if not isinstance(stripped_room_state, list):
raise TypeError("'knock_room_state' has wrong type")
event.unsigned["knock_room_state"] = stripped_room_state
context = EventContext.for_outlier(self._storage_controllers)

View File

@@ -196,9 +196,7 @@ class MessageHandler:
AuthError (403) if the user doesn't have permission to view
members of this room.
"""
if state_filter is None:
state_filter = StateFilter.all()
state_filter = state_filter or StateFilter.all()
user_id = requester.user.to_string()
if at_token:

View File

@@ -630,9 +630,7 @@ class RegistrationHandler:
"""
await self._auto_join_rooms(user_id)
async def appservice_register(
self, user_localpart: str, as_token: str
) -> Tuple[str, ApplicationService]:
async def appservice_register(self, user_localpart: str, as_token: str) -> str:
user = UserID(user_localpart, self.hs.hostname)
user_id = user.to_string()
service = self.store.get_app_service_by_token(as_token)
@@ -655,7 +653,7 @@ class RegistrationHandler:
appservice_id=service_id,
create_profile_with_displayname=user.localpart,
)
return (user_id, service)
return user_id
def check_user_id_not_appservice_exclusive(
self, user_id: str, allowed_appservice: Optional[ApplicationService] = None

View File

@@ -12,7 +12,6 @@
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import itertools
import logging
from itertools import chain
from typing import TYPE_CHECKING, AbstractSet, Dict, List, Mapping, Optional, Set, Tuple
@@ -39,7 +38,6 @@ from synapse.logging.opentracing import (
trace,
)
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.databases.main.state_deltas import StateDelta
from synapse.storage.databases.main.stream import PaginateFunction
from synapse.storage.roommember import (
MemberSummary,
@@ -49,7 +47,6 @@ from synapse.types import (
MutableStateMap,
PersistedEventPosition,
Requester,
RoomStreamToken,
SlidingSyncStreamToken,
StateMap,
StrCollection,
@@ -82,15 +79,6 @@ sync_processing_time = Histogram(
["initial"],
)
# Limit the number of state_keys we should remember sending down the connection for each
# (room_id, user_id). We don't want to store and pull out too much data in the database.
#
# 100 is an arbitrary but small-ish number. The idea is that we probably won't send down
# too many redundant member state events (that the client already knows about) for a
# given ongoing conversation if we keep 100 around. Most rooms don't have 100 members
# anyway and it takes a while to cycle through 100 members.
MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER = 100
class SlidingSyncHandler:
def __init__(self, hs: "HomeServer"):
@@ -472,64 +460,6 @@ class SlidingSyncHandler:
return state_map
@trace
async def get_current_state_deltas_for_room(
self,
room_id: str,
room_membership_for_user_at_to_token: RoomsForUserType,
from_token: RoomStreamToken,
to_token: RoomStreamToken,
) -> List[StateDelta]:
"""
Get the state deltas between two tokens taking into account the user's
membership. If the user is LEAVE/BAN, we will only get the state deltas up to
their LEAVE/BAN event (inclusive).
(> `from_token` and <= `to_token`)
"""
membership = room_membership_for_user_at_to_token.membership
# We don't know how to handle `membership` values other than these. The
# code below would need to be updated.
assert membership in (
Membership.JOIN,
Membership.INVITE,
Membership.KNOCK,
Membership.LEAVE,
Membership.BAN,
)
# People shouldn't see past their leave/ban event
if membership in (
Membership.LEAVE,
Membership.BAN,
):
to_bound = (
room_membership_for_user_at_to_token.event_pos.to_room_stream_token()
)
# If we are participating in the room, we can get the latest current state in
# the room
elif membership == Membership.JOIN:
to_bound = to_token
# We can only rely on the stripped state included in the invite/knock event
# itself so there will never be any state deltas to send down.
elif membership in (Membership.INVITE, Membership.KNOCK):
return []
else:
# We don't know how to handle this type of membership yet
#
# FIXME: We should use `assert_never` here but for some reason
# the exhaustive matching doesn't recognize the `Never` here.
# assert_never(membership)
raise AssertionError(
f"Unexpected membership {membership} that we don't know how to handle yet"
)
return await self.store.get_current_state_deltas_for_room(
room_id=room_id,
from_token=from_token,
to_token=to_bound,
)
@trace
async def get_room_sync_data(
self,
@@ -815,19 +745,13 @@ class SlidingSyncHandler:
stripped_state = []
if invite_or_knock_event.membership == Membership.INVITE:
invite_state = invite_or_knock_event.unsigned.get(
"invite_room_state", []
stripped_state.extend(
invite_or_knock_event.unsigned.get("invite_room_state", [])
)
if not isinstance(invite_state, list):
invite_state = []
stripped_state.extend(invite_state)
elif invite_or_knock_event.membership == Membership.KNOCK:
knock_state = invite_or_knock_event.unsigned.get("knock_room_state", [])
if not isinstance(knock_state, list):
knock_state = []
stripped_state.extend(knock_state)
stripped_state.extend(
invite_or_knock_event.unsigned.get("knock_room_state", [])
)
stripped_state.append(strip_event(invite_or_knock_event))
@@ -856,9 +780,8 @@ class SlidingSyncHandler:
# TODO: Limit the number of state events we're about to send down
# the room, if its too many we should change this to an
# `initial=True`?
deltas = await self.get_current_state_deltas_for_room(
deltas = await self.store.get_current_state_deltas_for_room(
room_id=room_id,
room_membership_for_user_at_to_token=room_membership_for_user_at_to_token,
from_token=from_bound,
to_token=to_token.room_key,
)
@@ -950,14 +873,6 @@ class SlidingSyncHandler:
#
# Calculate the `StateFilter` based on the `required_state` for the room
required_state_filter = StateFilter.none()
# The requested `required_state_map` with the lazy membership expanded and
# `$ME` replaced with the user's ID. This allows us to see what membership we've
# sent down to the client in the next request.
#
# Make a copy so we can modify it. Still need to be careful to make a copy of
# the state key sets if we want to add/remove from them. We could make a deep
# copy but this saves us some work.
expanded_required_state_map = dict(room_sync_config.required_state_map)
if room_membership_for_user_at_to_token.membership not in (
Membership.INVITE,
Membership.KNOCK,
@@ -1022,55 +937,22 @@ class SlidingSyncHandler:
and state_key == StateValues.LAZY
):
lazy_load_room_members = True
# Everyone in the timeline is relevant
timeline_membership: Set[str] = set()
if timeline_events is not None:
for timeline_event in timeline_events:
# Anyone who sent a message is relevant
timeline_membership.add(timeline_event.sender)
# We also care about invite, ban, kick, targets,
# etc.
if timeline_event.type == EventTypes.Member:
timeline_membership.add(
timeline_event.state_key
)
# Update the required state filter so we pick up the new
# membership
for user_id in timeline_membership:
required_state_types.append(
(EventTypes.Member, user_id)
)
# Add an explicit entry for each user in the timeline
#
# Make a new set or copy of the state key set so we can
# modify it without affecting the original
# `required_state_map`
expanded_required_state_map[EventTypes.Member] = (
expanded_required_state_map.get(
EventTypes.Member, set()
)
| timeline_membership
)
# FIXME: We probably also care about invite, ban, kick, targets, etc
# but the spec only mentions "senders".
elif state_key == StateValues.ME:
num_others += 1
required_state_types.append((state_type, user.to_string()))
# Replace `$ME` with the user's ID so we can deduplicate
# when someone requests the same state with `$ME` or with
# their user ID.
#
# Make a new set or copy of the state key set so we can
# modify it without affecting the original
# `required_state_map`
expanded_required_state_map[EventTypes.Member] = (
expanded_required_state_map.get(
EventTypes.Member, set()
)
| {user.to_string()}
)
else:
num_others += 1
required_state_types.append((state_type, state_key))
@@ -1134,8 +1016,8 @@ class SlidingSyncHandler:
changed_required_state_map, added_state_filter = (
_required_state_changes(
user.to_string(),
prev_required_state_map=prev_room_sync_config.required_state_map,
request_required_state_map=expanded_required_state_map,
previous_room_config=prev_room_sync_config,
room_sync_config=room_sync_config,
state_deltas=room_state_delta_id_map,
)
)
@@ -1249,9 +1131,7 @@ class SlidingSyncHandler:
# sensible order again.
bump_stamp = 0
room_sync_required_state_map_to_persist: Mapping[str, AbstractSet[str]] = (
expanded_required_state_map
)
room_sync_required_state_map_to_persist = room_sync_config.required_state_map
if changed_required_state_map:
room_sync_required_state_map_to_persist = changed_required_state_map
@@ -1305,10 +1185,7 @@ class SlidingSyncHandler:
)
else:
new_connection_state.room_configs[room_id] = RoomSyncConfig(
timeline_limit=room_sync_config.timeline_limit,
required_state_map=room_sync_required_state_map_to_persist,
)
new_connection_state.room_configs[room_id] = room_sync_config
set_tag(SynapseTags.RESULT_PREFIX + "initial", initial)
@@ -1443,8 +1320,8 @@ class SlidingSyncHandler:
def _required_state_changes(
user_id: str,
*,
prev_required_state_map: Mapping[str, AbstractSet[str]],
request_required_state_map: Mapping[str, AbstractSet[str]],
previous_room_config: "RoomSyncConfig",
room_sync_config: RoomSyncConfig,
state_deltas: StateMap[str],
) -> Tuple[Optional[Mapping[str, AbstractSet[str]]], StateFilter]:
"""Calculates the changes between the required state room config from the
@@ -1465,6 +1342,10 @@ def _required_state_changes(
and the state filter to use to fetch extra current state that we need to
return.
"""
prev_required_state_map = previous_room_config.required_state_map
request_required_state_map = room_sync_config.required_state_map
if prev_required_state_map == request_required_state_map:
# There has been no change. Return immediately.
return None, StateFilter.none()
@@ -1497,19 +1378,12 @@ def _required_state_changes(
# client. Passed to `StateFilter.from_types(...)`
added: List[Tuple[str, Optional[str]]] = []
# Convert the list of state deltas to map from type to state_keys that have
# changed.
changed_types_to_state_keys: Dict[str, Set[str]] = {}
for event_type, state_key in state_deltas:
changed_types_to_state_keys.setdefault(event_type, set()).add(state_key)
# First we calculate what, if anything, has been *added*.
for event_type in (
prev_required_state_map.keys() | request_required_state_map.keys()
):
old_state_keys = prev_required_state_map.get(event_type, set())
request_state_keys = request_required_state_map.get(event_type, set())
changed_state_keys = changed_types_to_state_keys.get(event_type, set())
if old_state_keys == request_state_keys:
# No change to this type
@@ -1519,55 +1393,8 @@ def _required_state_changes(
# Nothing *added*, so we skip. Removals happen below.
continue
# We only remove state keys from the effective state if they've been
# removed from the request *and* the state has changed. This ensures
# that if a client removes and then re-adds a state key, we only send
# down the associated current state event if its changed (rather than
# sending down the same event twice).
invalidated_state_keys = (
old_state_keys - request_state_keys
) & changed_state_keys
# Figure out which state keys we should remember sending down the connection
inheritable_previous_state_keys = (
# Retain the previous state_keys that we've sent down before.
# Wildcard and lazy state keys are not sticky from previous requests.
(old_state_keys - {StateValues.WILDCARD, StateValues.LAZY})
- invalidated_state_keys
)
# Always update changes to include the newly added keys (we've expanded the set
# of state keys), use the new requested set with whatever hasn't been
# invalidated from the previous set.
changes[event_type] = request_state_keys | inheritable_previous_state_keys
# Limit the number of state_keys we should remember sending down the connection
# for each (room_id, user_id). We don't want to store and pull out too much data
# in the database. This is a happy-medium between remembering nothing and
# everything. We can avoid sending redundant state down the connection most of
# the time given that most rooms don't have 100 members anyway and it takes a
# while to cycle through 100 members.
#
# Only remember up to (MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER)
if len(changes[event_type]) > MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER:
# Reset back to only the requested state keys
changes[event_type] = request_state_keys
# Skip if there isn't any room to fill in the rest with previous state keys
if len(request_state_keys) < MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER:
# Fill the rest with previous state_keys. Ideally, we could sort
# these by recency but it's just a set so just pick an arbitrary
# subset (good enough).
changes[event_type] = changes[event_type] | set(
itertools.islice(
inheritable_previous_state_keys,
# Just taking the difference isn't perfect as there could be
# overlap in the keys between the requested and previous but we
# will decide to just take the easy route for now and avoid
# additional set operations to figure it out.
MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER
- len(request_state_keys),
)
)
# Always update changes to include the newly added keys
changes[event_type] = request_state_keys
if StateValues.WILDCARD in old_state_keys:
# We were previously fetching everything for this type, so we don't need to
@@ -1594,6 +1421,12 @@ def _required_state_changes(
added_state_filter = StateFilter.from_types(added)
# Convert the list of state deltas to map from type to state_keys that have
# changed.
changed_types_to_state_keys: Dict[str, Set[str]] = {}
for event_type, state_key in state_deltas:
changed_types_to_state_keys.setdefault(event_type, set()).add(state_key)
# Figure out what changes we need to apply to the effective required state
# config.
for event_type, changed_state_keys in changed_types_to_state_keys.items():
@@ -1604,23 +1437,15 @@ def _required_state_changes(
# No change.
continue
# If we see the `user_id` as a state_key, also add "$ME" to the list of state
# that has changed to account for people requesting `required_state` with `$ME`
# or their user ID.
if user_id in changed_state_keys:
changed_state_keys.add(StateValues.ME)
# We only remove state keys from the effective state if they've been
# removed from the request *and* the state has changed. This ensures
# that if a client removes and then re-adds a state key, we only send
# down the associated current state event if its changed (rather than
# sending down the same event twice).
invalidated_state_keys = (
old_state_keys - request_state_keys
) & changed_state_keys
# We've expanded the set of state keys, ... (already handled above)
if request_state_keys - old_state_keys:
# We've expanded the set of state keys, so we just clobber the
# current set with the new set.
#
# We could also ensure that we keep entries where the state hasn't
# changed, but are no longer in the requested required state, but
# that's a sufficient edge case that we can ignore (as its only a
# performance optimization).
changes[event_type] = request_state_keys
continue
old_state_key_wildcard = StateValues.WILDCARD in old_state_keys
@@ -1642,6 +1467,11 @@ def _required_state_changes(
changes[event_type] = request_state_keys
continue
# Handle "$ME" values by adding "$ME" if the state key matches the user
# ID.
if user_id in changed_state_keys:
changed_state_keys.add(StateValues.ME)
# At this point there are no wildcards and no additions to the set of
# state keys requested, only deletions.
#
@@ -1650,8 +1480,9 @@ def _required_state_changes(
# that if a client removes and then re-adds a state key, we only send
# down the associated current state event if its changed (rather than
# sending down the same event twice).
if invalidated_state_keys:
changes[event_type] = old_state_keys - invalidated_state_keys
invalidated = (old_state_keys - request_state_keys) & changed_state_keys
if invalidated:
changes[event_type] = old_state_keys - invalidated
if changes:
# Update the required state config based on the changes.

View File

@@ -143,7 +143,6 @@ class SyncConfig:
filter_collection: FilterCollection
is_guest: bool
device_id: Optional[str]
use_state_after: bool
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -1142,7 +1141,6 @@ class SyncHandler:
since_token: Optional[StreamToken],
end_token: StreamToken,
full_state: bool,
joined: bool,
) -> MutableStateMap[EventBase]:
"""Works out the difference in state between the end of the previous sync and
the start of the timeline.
@@ -1157,7 +1155,6 @@ class SyncHandler:
the point just after their leave event.
full_state: Whether to force returning the full state.
`lazy_load_members` still applies when `full_state` is `True`.
joined: whether the user is currently joined to the room
Returns:
The state to return in the sync response for the room.
@@ -1233,12 +1230,11 @@ class SyncHandler:
if full_state:
state_ids = await self._compute_state_delta_for_full_sync(
room_id,
sync_config,
sync_config.user,
batch,
end_token,
members_to_fetch,
timeline_state,
joined,
)
else:
# If this is an initial sync then full_state should be set, and
@@ -1248,7 +1244,6 @@ class SyncHandler:
state_ids = await self._compute_state_delta_for_incremental_sync(
room_id,
sync_config,
batch,
since_token,
end_token,
@@ -1321,24 +1316,20 @@ class SyncHandler:
async def _compute_state_delta_for_full_sync(
self,
room_id: str,
sync_config: SyncConfig,
syncing_user: UserID,
batch: TimelineBatch,
end_token: StreamToken,
members_to_fetch: Optional[Set[str]],
timeline_state: StateMap[str],
joined: bool,
) -> StateMap[str]:
"""Calculate the state events to be included in a full sync response.
As with `_compute_state_delta_for_incremental_sync`, the result will include
the membership events for the senders of each event in `members_to_fetch`.
Note that whether this returns the state at the start or the end of the
batch depends on `sync_config.use_state_after` (c.f. MSC4222).
Args:
room_id: The room we are calculating for.
sync_confg: The user that is calling `/sync`.
syncing_user: The user that is calling `/sync`.
batch: The timeline batch for the room that will be sent to the user.
end_token: Token of the end of the current batch. Normally this will be
the same as the global "now_token", but if the user has left the room,
@@ -1347,11 +1338,10 @@ class SyncHandler:
events in the timeline.
timeline_state: The contribution to the room state from state events in
`batch`. Only contains the last event for any given state key.
joined: whether the user is currently joined to the room
Returns:
A map from (type, state_key) to event_id, for each event that we believe
should be included in the `state` or `state_after` part of the sync response.
should be included in the `state` part of the sync response.
"""
if members_to_fetch is not None:
# Lazy-loading of membership events is enabled.
@@ -1369,7 +1359,7 @@ class SyncHandler:
# is no guarantee that our membership will be in the auth events of
# timeline events when the room is partial stated.
state_filter = StateFilter.from_lazy_load_member_list(
members_to_fetch.union((sync_config.user.to_string(),))
members_to_fetch.union((syncing_user.to_string(),))
)
# We are happy to use partial state to compute the `/sync` response.
@@ -1383,61 +1373,6 @@ class SyncHandler:
await_full_state = True
lazy_load_members = False
# Check if we are wanting to return the state at the start or end of the
# timeline. If at the end we can just use the current state.
if sync_config.use_state_after:
# If we're getting the state at the end of the timeline, we can just
# use the current state of the room (and roll back any changes
# between when we fetched the current state and `end_token`).
#
# For rooms we're not joined to, there might be a very large number
# of deltas between `end_token` and "now", and so instead we fetch
# the state at the end of the timeline.
if joined:
state_ids = await self._state_storage_controller.get_current_state_ids(
room_id,
state_filter=state_filter,
await_full_state=await_full_state,
)
# Now roll back the state by looking at the state deltas between
# end_token and now.
deltas = await self.store.get_current_state_deltas_for_room(
room_id,
from_token=end_token.room_key,
to_token=self.store.get_room_max_token(),
)
if deltas:
mutable_state_ids = dict(state_ids)
# We iterate over the deltas backwards so that if there are
# multiple changes of the same type/state_key we'll
# correctly pick the earliest delta.
for delta in reversed(deltas):
if delta.prev_event_id:
mutable_state_ids[(delta.event_type, delta.state_key)] = (
delta.prev_event_id
)
elif (delta.event_type, delta.state_key) in mutable_state_ids:
mutable_state_ids.pop((delta.event_type, delta.state_key))
state_ids = mutable_state_ids
return state_ids
else:
# Just use state groups to get the state at the end of the
# timeline, i.e. the state at the leave/etc event.
state_at_timeline_end = (
await self._state_storage_controller.get_state_ids_at(
room_id,
stream_position=end_token,
state_filter=state_filter,
await_full_state=await_full_state,
)
)
return state_at_timeline_end
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
room_id,
stream_position=end_token,
@@ -1470,7 +1405,6 @@ class SyncHandler:
async def _compute_state_delta_for_incremental_sync(
self,
room_id: str,
sync_config: SyncConfig,
batch: TimelineBatch,
since_token: StreamToken,
end_token: StreamToken,
@@ -1485,12 +1419,8 @@ class SyncHandler:
(`compute_state_delta`) is responsible for keeping track of which membership
events we have already sent to the client, and hence ripping them out.
Note that whether this returns the state at the start or the end of the
batch depends on `sync_config.use_state_after` (c.f. MSC4222).
Args:
room_id: The room we are calculating for.
sync_config
batch: The timeline batch for the room that will be sent to the user.
since_token: Token of the end of the previous batch.
end_token: Token of the end of the current batch. Normally this will be
@@ -1503,7 +1433,7 @@ class SyncHandler:
Returns:
A map from (type, state_key) to event_id, for each event that we believe
should be included in the `state` or `state_after` part of the sync response.
should be included in the `state` part of the sync response.
"""
if members_to_fetch is not None:
# Lazy-loading is enabled. Only return the state that is needed.
@@ -1515,51 +1445,6 @@ class SyncHandler:
await_full_state = True
lazy_load_members = False
# Check if we are wanting to return the state at the start or end of the
# timeline. If at the end we can just use the current state delta stream.
if sync_config.use_state_after:
delta_state_ids: MutableStateMap[str] = {}
if members_to_fetch:
# We're lazy-loading, so the client might need some more member
# events to understand the events in this timeline. So we always
# fish out all the member events corresponding to the timeline
# here. The caller will then dedupe any redundant ones.
member_ids = await self._state_storage_controller.get_current_state_ids(
room_id=room_id,
state_filter=StateFilter.from_types(
(EventTypes.Member, member) for member in members_to_fetch
),
await_full_state=await_full_state,
)
delta_state_ids.update(member_ids)
# We don't do LL filtering for incremental syncs - see
# https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
# N.B. this slows down incr syncs as we are now processing way more
# state in the server than if we were LLing.
#
# i.e. we return all state deltas, including membership changes that
# we'd normally exclude due to LL.
deltas = await self.store.get_current_state_deltas_for_room(
room_id=room_id,
from_token=since_token.room_key,
to_token=end_token.room_key,
)
for delta in deltas:
if delta.event_id is None:
# There was a state reset and this state entry is no longer
# present, but we have no way of informing the client about
# this, so we just skip it for now.
continue
# Note that deltas are in stream ordering, so if there are
# multiple deltas for a given type/state_key we'll always pick
# the latest one.
delta_state_ids[(delta.event_type, delta.state_key)] = delta.event_id
return delta_state_ids
# For a non-gappy sync if the events in the timeline are simply a linear
# chain (i.e. no merging/branching of the graph), then we know the state
# delta between the end of the previous sync and start of the new one is
@@ -2982,7 +2867,6 @@ class SyncHandler:
since_token,
room_builder.end_token,
full_state=full_state,
joined=room_builder.rtype == "joined",
)
else:
# An out of band room won't have any state changes.

View File

@@ -36,6 +36,7 @@ from typing import (
)
import attr
import multipart
import treq
from canonicaljson import encode_canonical_json
from netaddr import AddrFormatError, IPAddress, IPSet
@@ -92,20 +93,6 @@ from synapse.util.async_helpers import timeout_deferred
if TYPE_CHECKING:
from synapse.server import HomeServer
# Support both import names for the `python-multipart` (PyPI) library,
# which renamed its package name from `multipart` to `python_multipart`
# in 0.0.13 (though supports the old import name for compatibility).
# Note that the `multipart` package name conflicts with `multipart` (PyPI)
# so we should prefer importing from `python_multipart` when possible.
try:
from python_multipart import MultipartParser
if TYPE_CHECKING:
from python_multipart import multipart
except ImportError:
from multipart import MultipartParser # type: ignore[no-redef]
logger = logging.getLogger(__name__)
outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"])
@@ -1052,7 +1039,7 @@ class _MultipartParserProtocol(protocol.Protocol):
self.deferred = deferred
self.boundary = boundary
self.max_length = max_length
self.parser: Optional[MultipartParser] = None
self.parser: Optional[multipart.MultipartParser] = None
self.multipart_response = MultipartResponse()
self.has_redirect = False
self.in_json = False
@@ -1110,12 +1097,12 @@ class _MultipartParserProtocol(protocol.Protocol):
self.deferred.errback()
self.file_length += end - start
callbacks: "multipart.MultipartCallbacks" = {
callbacks: "multipart.multipart.MultipartCallbacks" = {
"on_header_field": on_header_field,
"on_header_value": on_header_value,
"on_part_data": on_part_data,
}
self.parser = MultipartParser(self.boundary, callbacks)
self.parser = multipart.MultipartParser(self.boundary, callbacks)
self.total_length += len(incoming_data)
if self.max_length is not None and self.total_length >= self.max_length:

View File

@@ -921,6 +921,15 @@ def set_cors_headers(request: "SynapseRequest") -> None:
b"Access-Control-Expose-Headers",
b"Synapse-Trace-Id, Server, ETag",
)
elif request.experimental_cors_msc3886:
request.setHeader(
b"Access-Control-Allow-Headers",
b"X-Requested-With, Content-Type, Authorization, Date, If-Match, If-None-Match",
)
request.setHeader(
b"Access-Control-Expose-Headers",
b"ETag, Location, X-Max-Bytes",
)
else:
request.setHeader(
b"Access-Control-Allow-Headers",

View File

@@ -21,7 +21,6 @@
import contextlib
import logging
import time
from http import HTTPStatus
from typing import TYPE_CHECKING, Any, Generator, Optional, Tuple, Union
import attr
@@ -95,6 +94,7 @@ class SynapseRequest(Request):
self.reactor = site.reactor
self._channel = channel # this is used by the tests
self.start_time = 0.0
self.experimental_cors_msc3886 = site.experimental_cors_msc3886
# The requester, if authenticated. For federation requests this is the
# server name, for client requests this is the Requester object.
@@ -140,41 +140,6 @@ class SynapseRequest(Request):
self.synapse_site.site_tag,
)
# Twisted machinery: this method is called by the Channel once the full request has
# been received, to dispatch the request to a resource.
#
# We're patching Twisted to bail/abort early when we see someone trying to upload
# `multipart/form-data` so we can avoid Twisted parsing the entire request body into
# in-memory (specific problem of this specific `Content-Type`). This protects us
# from an attacker uploading something bigger than the available RAM and crashing
# the server with a `MemoryError`, or carefully block just enough resources to cause
# all other requests to fail.
#
# FIXME: This can be removed once we Twisted releases a fix and we update to a
# version that is patched
def requestReceived(self, command: bytes, path: bytes, version: bytes) -> None:
if command == b"POST":
ctype = self.requestHeaders.getRawHeaders(b"content-type")
if ctype and b"multipart/form-data" in ctype[0]:
self.method, self.uri = command, path
self.clientproto = version
self.code = HTTPStatus.UNSUPPORTED_MEDIA_TYPE.value
self.code_message = bytes(
HTTPStatus.UNSUPPORTED_MEDIA_TYPE.phrase, "ascii"
)
self.responseHeaders.setRawHeaders(b"content-length", [b"0"])
logger.warning(
"Aborting connection from %s because `content-type: multipart/form-data` is unsupported: %s %s",
self.client,
command,
path,
)
self.write(b"")
self.loseConnection()
return
return super().requestReceived(command, path, version)
def handleContentChunk(self, data: bytes) -> None:
# we should have a `content` by now.
assert self.content, "handleContentChunk() called before gotLength()"
@@ -701,6 +666,10 @@ class SynapseSite(ProxySite):
request_id_header = config.http_options.request_id_header
self.experimental_cors_msc3886: bool = (
config.http_options.experimental_cors_msc3886
)
def request_factory(channel: HTTPChannel, queued: bool) -> Request:
return request_class(
channel,

View File

@@ -39,7 +39,7 @@ from twisted.internet.endpoints import (
)
from twisted.internet.interfaces import (
IPushProducer,
IReactorTime,
IReactorTCP,
IStreamClientEndpoint,
)
from twisted.internet.protocol import Factory, Protocol
@@ -113,7 +113,7 @@ class RemoteHandler(logging.Handler):
port: int,
maximum_buffer: int = 1000,
level: int = logging.NOTSET,
_reactor: Optional[IReactorTime] = None,
_reactor: Optional[IReactorTCP] = None,
):
super().__init__(level=level)
self.host = host

View File

@@ -259,7 +259,7 @@ class MediaRepository:
"""
media = await self.store.get_local_media(media_id)
if media is None:
raise NotFoundError("Unknown media ID")
raise SynapseError(404, "Unknow media ID", errcode=Codes.NOT_FOUND)
if media.user_id != auth_user.to_string():
raise SynapseError(

View File

@@ -67,11 +67,6 @@ class ThumbnailError(Exception):
class Thumbnailer:
FORMATS = {"image/jpeg": "JPEG", "image/png": "PNG"}
# Which image formats we allow Pillow to open.
# This should intentionally be kept restrictive, because the decoder of any
# format in this list becomes part of our trusted computing base.
PILLOW_FORMATS = ("jpeg", "png", "webp", "gif")
@staticmethod
def set_limits(max_image_pixels: int) -> None:
Image.MAX_IMAGE_PIXELS = max_image_pixels
@@ -81,7 +76,7 @@ class Thumbnailer:
self._closed = False
try:
self.image = Image.open(input_path, formats=self.PILLOW_FORMATS)
self.image = Image.open(input_path)
except OSError as e:
# If an error occurs opening the image, a thumbnail won't be able to
# be generated.

View File

@@ -371,7 +371,7 @@ class BulkPushRuleEvaluator:
"Deferred[Tuple[int, Tuple[dict, Optional[int]], Dict[str, Dict[str, JsonValue]], Mapping[str, ProfileInfo]]]",
gather_results(
(
run_in_background( # type: ignore[call-overload]
run_in_background( # type: ignore[call-arg]
self.store.get_number_joined_users_in_room,
event.room_id, # type: ignore[arg-type]
),
@@ -382,10 +382,10 @@ class BulkPushRuleEvaluator:
event_id_to_event,
),
run_in_background(self._related_events, event),
run_in_background( # type: ignore[call-overload]
run_in_background( # type: ignore[call-arg]
self.store.get_subset_users_in_room_with_profiles,
event.room_id,
rules_by_user.keys(),
event.room_id, # type: ignore[arg-type]
rules_by_user.keys(), # type: ignore[arg-type]
),
),
consumeErrors=True,

View File

@@ -127,11 +127,6 @@ class HttpPusher(Pusher):
if self.data is None:
raise PusherConfigException("'data' key can not be null for HTTP pusher")
# Check if badge counts should be disabled for this push gateway
self.disable_badge_count = self.hs.config.experimental.msc4076_enabled and bool(
self.data.get("org.matrix.msc4076.disable_badge_count", False)
)
self.name = "%s/%s/%s" % (
pusher_config.user_name,
pusher_config.app_id,
@@ -466,10 +461,9 @@ class HttpPusher(Pusher):
content: JsonDict = {
"event_id": event.event_id,
"room_id": event.room_id,
"counts": {"unread": badge},
"prio": priority,
}
if not self.disable_badge_count:
content["counts"] = {"unread": badge}
# event_id_only doesn't include the tweaks, so override them.
tweaks = {}
else:
@@ -484,11 +478,11 @@ class HttpPusher(Pusher):
"type": event.type,
"sender": event.user_id,
"prio": priority,
}
if not self.disable_badge_count:
content["counts"] = {
"counts": {
"unread": badge,
}
# 'missed_calls': 2
},
}
if event.type == "m.room.member" and event.is_state():
content["membership"] = event.content["membership"]
content["user_is_target"] = event.state_key == self.user_id

View File

@@ -74,13 +74,9 @@ async def get_context_for_event(
room_state = []
if ev.content.get("membership") == Membership.INVITE:
invite_room_state = ev.unsigned.get("invite_room_state", [])
if isinstance(invite_room_state, list):
room_state = invite_room_state
room_state = ev.unsigned.get("invite_room_state", [])
elif ev.content.get("membership") == Membership.KNOCK:
knock_room_state = ev.unsigned.get("knock_room_state", [])
if isinstance(knock_room_state, list):
room_state = knock_room_state
room_state = ev.unsigned.get("knock_room_state", [])
# Ideally we'd reuse the logic in `calculate_room_name`, but that gets
# complicated to handle partial events vs pulling events from the DB.

View File

@@ -495,7 +495,7 @@ class LockReleasedCommand(Command):
class NewActiveTaskCommand(_SimpleCommand):
"""Sent to inform instance handling background tasks that a new task is ready to run.
"""Sent to inform instance handling background tasks that a new active task is available to run.
Format::

View File

@@ -727,7 +727,7 @@ class ReplicationCommandHandler:
) -> None:
"""Called when get a new NEW_ACTIVE_TASK command."""
if self._task_scheduler:
self._task_scheduler.on_new_task(cmd.data)
self._task_scheduler.launch_task_by_id(cmd.data)
def new_connection(self, connection: IReplicationConnection) -> None:
"""Called when we have a new connection."""

View File

@@ -332,7 +332,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
BackgroundUpdateRestServlet(hs).register(http_server)
BackgroundUpdateStartJobRestServlet(hs).register(http_server)
ExperimentalFeaturesRestServlet(hs).register(http_server)
SuspendAccountRestServlet(hs).register(http_server)
if hs.config.experimental.msc3823_account_suspension:
SuspendAccountRestServlet(hs).register(http_server)
def register_servlets_for_client_rest_resource(

View File

@@ -43,15 +43,12 @@ class ExperimentalFeature(str, Enum):
MSC3881 = "msc3881"
MSC3575 = "msc3575"
MSC4222 = "msc4222"
def is_globally_enabled(self, config: "HomeServerConfig") -> bool:
if self is ExperimentalFeature.MSC3881:
return config.experimental.msc3881_enabled
if self is ExperimentalFeature.MSC3575:
return config.experimental.msc3575_enabled
if self is ExperimentalFeature.MSC4222:
return config.experimental.msc4222_enabled
assert_never(self)

View File

@@ -114,19 +114,15 @@ class DeleteDevicesRestServlet(RestServlet):
else:
raise e
if requester.app_service and requester.app_service.msc4190_device_management:
# MSC4190 can skip UIA for this endpoint
pass
else:
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
body.dict(exclude_unset=True),
"remove device(s) from your account",
# Users might call this multiple times in a row while cleaning up
# devices, allow a single UI auth session to be re-used.
can_skip_ui_auth=True,
)
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
body.dict(exclude_unset=True),
"remove device(s) from your account",
# Users might call this multiple times in a row while cleaning up
# devices, allow a single UI auth session to be re-used.
can_skip_ui_auth=True,
)
await self.device_handler.delete_devices(
requester.user.to_string(), body.devices
@@ -179,6 +175,9 @@ class DeviceRestServlet(RestServlet):
async def on_DELETE(
self, request: SynapseRequest, device_id: str
) -> Tuple[int, JsonDict]:
if self._msc3861_oauth_delegation_enabled:
raise UnrecognizedRequestError(code=404)
requester = await self.auth.get_user_by_req(request)
try:
@@ -193,24 +192,15 @@ class DeviceRestServlet(RestServlet):
else:
raise
if requester.app_service and requester.app_service.msc4190_device_management:
# MSC4190 allows appservices to delete devices through this endpoint without UIA
# It's also allowed with MSC3861 enabled
pass
else:
if self._msc3861_oauth_delegation_enabled:
raise UnrecognizedRequestError(code=404)
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
body.dict(exclude_unset=True),
"remove a device from your account",
# Users might call this multiple times in a row while cleaning up
# devices, allow a single UI auth session to be re-used.
can_skip_ui_auth=True,
)
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
body.dict(exclude_unset=True),
"remove a device from your account",
# Users might call this multiple times in a row while cleaning up
# devices, allow a single UI auth session to be re-used.
can_skip_ui_auth=True,
)
await self.device_handler.delete_devices(
requester.user.to_string(), [device_id]
@@ -226,16 +216,6 @@ class DeviceRestServlet(RestServlet):
requester = await self.auth.get_user_by_req(request, allow_guest=True)
body = parse_and_validate_json_object_from_request(request, self.PutBody)
# MSC4190 allows appservices to create devices through this endpoint
if requester.app_service and requester.app_service.msc4190_device_management:
created = await self.device_handler.upsert_device(
user_id=requester.user.to_string(),
device_id=device_id,
display_name=body.display_name,
)
return 201 if created else 200, {}
await self.device_handler.update_device(
requester.user.to_string(), device_id, body.dict()
)

Some files were not shown because too many files have changed in this diff Show More