mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
74 Commits
madlittlem
...
erikj/fix_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b37ad05a62 | ||
|
|
d739d0f71d | ||
|
|
a58f09acc7 | ||
|
|
cee9da0da5 | ||
|
|
a9c4d1c8ac | ||
|
|
8c653e1dd6 | ||
|
|
cd7d90bd28 | ||
|
|
02aa7adf4c | ||
|
|
3943d2fde7 | ||
|
|
93cc955051 | ||
|
|
4587decd67 | ||
|
|
4c67d20af7 | ||
|
|
80e39fd834 | ||
|
|
573bdbc824 | ||
|
|
79c02cada0 | ||
|
|
81b080f7a2 | ||
|
|
84ec15c47e | ||
|
|
0202e5f210 | ||
|
|
f73edbe4d2 | ||
|
|
ec4d136965 | ||
|
|
ddd1d79d03 | ||
|
|
d0a474d312 | ||
|
|
8291aa8fd7 | ||
|
|
1092a35a2a | ||
|
|
c5e89f5fae | ||
|
|
e918f683d4 | ||
|
|
4efd1056ca | ||
|
|
0f32408c80 | ||
|
|
9d837daa8a | ||
|
|
d72843056b | ||
|
|
e80dad5fa9 | ||
|
|
97284689ea | ||
|
|
c812a79422 | ||
|
|
850ff14613 | ||
|
|
e0fdb862cb | ||
|
|
73dc05c993 | ||
|
|
bfb197c596 | ||
|
|
f387f47a6a | ||
|
|
a4c503674f | ||
|
|
2637b26cfe | ||
|
|
db59067e78 | ||
|
|
7feb07c3e9 | ||
|
|
54e0086abd | ||
|
|
9916932e98 | ||
|
|
f4943b875b | ||
|
|
92fcca8ed7 | ||
|
|
c486ec8bc2 | ||
|
|
20fc9fcc33 | ||
|
|
2f41f6d947 | ||
|
|
f377cee7ec | ||
|
|
cacd4fd7bd | ||
|
|
c7a1d0aa1a | ||
|
|
c92639df21 | ||
|
|
d0fc1e904a | ||
|
|
77eafd47df | ||
|
|
2a321bac35 | ||
|
|
eda735e4bb | ||
|
|
e1f5da65e1 | ||
|
|
a4438c9bc1 | ||
|
|
9266ba72b5 | ||
|
|
61aadb158f | ||
|
|
75698a3e53 | ||
|
|
46bd7e136d | ||
|
|
eac170b21b | ||
|
|
211c31dbd7 | ||
|
|
361bdafb87 | ||
|
|
1c2b18a704 | ||
|
|
2c9ed5e510 | ||
|
|
9c0a3963bc | ||
|
|
0932c77539 | ||
|
|
5580a820ae | ||
|
|
541a009564 | ||
|
|
b5493899c5 | ||
|
|
da7d71e2a2 |
@@ -36,11 +36,11 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
||||
# First calculate the various trial jobs.
|
||||
#
|
||||
# For PRs, we only run each type of test with the oldest Python version supported (which
|
||||
# is Python 3.8 right now)
|
||||
# is Python 3.9 right now)
|
||||
|
||||
trial_sqlite_tests = [
|
||||
{
|
||||
"python-version": "3.8",
|
||||
"python-version": "3.9",
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
@@ -53,12 +53,12 @@ if not IS_PR:
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.9", "3.10", "3.11", "3.12", "3.13")
|
||||
for version in ("3.10", "3.11", "3.12", "3.13")
|
||||
)
|
||||
|
||||
trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.8",
|
||||
"python-version": "3.9",
|
||||
"database": "postgres",
|
||||
"postgres-version": "11",
|
||||
"extras": "all",
|
||||
@@ -77,7 +77,7 @@ if not IS_PR:
|
||||
|
||||
trial_no_extra_tests = [
|
||||
{
|
||||
"python-version": "3.8",
|
||||
"python-version": "3.9",
|
||||
"database": "sqlite",
|
||||
"extras": "",
|
||||
}
|
||||
@@ -99,24 +99,24 @@ set_output("trial_test_matrix", test_matrix)
|
||||
|
||||
# First calculate the various sytest jobs.
|
||||
#
|
||||
# For each type of test we only run on focal on PRs
|
||||
# For each type of test we only run on bullseye on PRs
|
||||
|
||||
|
||||
sytest_tests = [
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
"reactor": "asyncio",
|
||||
@@ -127,11 +127,11 @@ if not IS_PR:
|
||||
sytest_tests.extend(
|
||||
[
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"reactor": "asyncio",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"postgres": "postgres",
|
||||
"reactor": "asyncio",
|
||||
},
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# this script is run by GitHub Actions in a plain `focal` container; it
|
||||
# this script is run by GitHub Actions in a plain `jammy` container; it
|
||||
# - installs the minimal system requirements, and poetry;
|
||||
# - patches the project definition file to refer to old versions only;
|
||||
# - creates a venv with these old versions using poetry; and finally
|
||||
|
||||
4
.github/workflows/latest_deps.yml
vendored
4
.github/workflows/latest_deps.yml
vendored
@@ -132,9 +132,9 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sytest-tag: focal
|
||||
- sytest-tag: bullseye
|
||||
|
||||
- sytest-tag: focal
|
||||
- sytest-tag: bullseye
|
||||
postgres: postgres
|
||||
workers: workers
|
||||
redis: redis
|
||||
|
||||
44
.github/workflows/release-artifacts.yml
vendored
44
.github/workflows/release-artifacts.yml
vendored
@@ -91,10 +91,19 @@ jobs:
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
- name: Artifact name
|
||||
id: artifact-name
|
||||
# We can't have colons in the upload name of the artifact, so we convert
|
||||
# e.g. `debian:sid` to `sid`.
|
||||
env:
|
||||
DISTRO: ${{ matrix.distro }}
|
||||
run: |
|
||||
echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Upload debs as artifacts
|
||||
uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: debs
|
||||
name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
|
||||
path: debs/*
|
||||
|
||||
build-wheels:
|
||||
@@ -102,7 +111,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-12]
|
||||
os: [ubuntu-22.04, macos-13]
|
||||
arch: [x86_64, aarch64]
|
||||
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
||||
# It is not read by the rest of the workflow.
|
||||
@@ -112,9 +121,9 @@ jobs:
|
||||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-12"
|
||||
os: "macos-13"
|
||||
# Don't build aarch64 wheels on mac.
|
||||
- os: "macos-12"
|
||||
- os: "macos-13"
|
||||
arch: aarch64
|
||||
# Don't build aarch64 wheels on PR CI.
|
||||
- is_pr: true
|
||||
@@ -144,7 +153,7 @@ jobs:
|
||||
|
||||
- name: Only build a single wheel on PR
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
run: echo "CIBW_BUILD="cp39-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
@@ -156,9 +165,9 @@ jobs:
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI: true
|
||||
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
|
||||
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Wheel
|
||||
name: Wheel-${{ matrix.os }}-${{ matrix.arch }}
|
||||
path: ./wheelhouse/*.whl
|
||||
|
||||
build-sdist:
|
||||
@@ -177,7 +186,7 @@ jobs:
|
||||
- name: Build sdist
|
||||
run: python -m build --sdist
|
||||
|
||||
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Sdist
|
||||
path: dist/*.tar.gz
|
||||
@@ -194,19 +203,20 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all workflow run artifacts
|
||||
uses: actions/download-artifact@v3 # Don't upgrade to v4, it should match upload-artifact
|
||||
uses: actions/download-artifact@v4
|
||||
- name: Build a tarball for the debs
|
||||
run: tar -cvJf debs.tar.xz debs
|
||||
# We need to merge all the debs uploads into one folder, then compress
|
||||
# that.
|
||||
run: |
|
||||
mkdir debs
|
||||
mv debs*/* debs/
|
||||
tar -cvJf debs.tar.xz debs
|
||||
- name: Attach to release
|
||||
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
|
||||
uses: softprops/action-gh-release@v2.0.5
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
files: |
|
||||
Sdist/*
|
||||
Wheel/*
|
||||
Wheel*/*
|
||||
debs.tar.xz
|
||||
# if it's not already published, keep the release as a draft.
|
||||
draft: true
|
||||
# mark it as a prerelease if the tag contains 'rc'.
|
||||
prerelease: ${{ contains(github.ref, 'rc') }}
|
||||
|
||||
14
.github/workflows/tests.yml
vendored
14
.github/workflows/tests.yml
vendored
@@ -397,7 +397,7 @@ jobs:
|
||||
needs:
|
||||
- linting-done
|
||||
- changes
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -409,12 +409,12 @@ jobs:
|
||||
# their build dependencies
|
||||
- run: |
|
||||
sudo apt-get -qq update
|
||||
sudo apt-get -qq install build-essential libffi-dev python-dev \
|
||||
sudo apt-get -qq install build-essential libffi-dev python3-dev \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.8'
|
||||
python-version: '3.9'
|
||||
|
||||
- name: Prepare old deps
|
||||
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
|
||||
@@ -458,7 +458,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["pypy-3.8"]
|
||||
python-version: ["pypy-3.9"]
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
@@ -580,11 +580,11 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.8"
|
||||
- python-version: "3.9"
|
||||
postgres-version: "11"
|
||||
|
||||
- python-version: "3.11"
|
||||
postgres-version: "15"
|
||||
- python-version: "3.13"
|
||||
postgres-version: "17"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
|
||||
4
.github/workflows/twisted_trunk.yml
vendored
4
.github/workflows/twisted_trunk.yml
vendored
@@ -99,11 +99,11 @@ jobs:
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
|
||||
# We're using debian:bullseye because it uses Python 3.9 which is our minimum supported Python version.
|
||||
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
|
||||
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
|
||||
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
|
||||
image: matrixdotorg/sytest-synapse:focal
|
||||
image: matrixdotorg/sytest-synapse:bullseye
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
|
||||
121
CHANGES.md
121
CHANGES.md
@@ -1,3 +1,124 @@
|
||||
# Synapse 1.120.0 (2024-11-26)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix a bug introduced in Synapse v1.120rc1 which would cause the newly-introduced `delete_old_otks` job to fail in worker-mode deployments. ([\#17960](https://github.com/element-hq/synapse/issues/17960))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.120.0rc1 (2024-11-20)
|
||||
|
||||
This release enables the enforcement of authenticated media by default, with exemptions for media that is already present in the
|
||||
homeserver's media store.
|
||||
|
||||
Most homeservers operating in the public federation will not be impacted by this change, given that
|
||||
the large homeserver `matrix.org` enabled this in September 2024 and therefore most clients and servers
|
||||
will already have updated as a result.
|
||||
|
||||
Some server administrators may still wish to disable this enforcement for the time being, in the interest of compatibility with older clients
|
||||
and older federated homeservers.
|
||||
See the [upgrade notes](https://element-hq.github.io/synapse/v1.120/upgrade.html#authenticated-media-is-now-enforced-by-default) for more information.
|
||||
|
||||
### Features
|
||||
|
||||
- Enforce authenticated media by default. Administrators can revert this by configuring `enable_authenticated_media` to `false`. In a future release of Synapse, this option will be removed and become always-on. ([\#17889](https://github.com/element-hq/synapse/issues/17889))
|
||||
- Add a one-off task to delete old One-Time Keys, to guard against us having old OTKs in the database that the client has long forgotten about. ([\#17934](https://github.com/element-hq/synapse/issues/17934))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Clarify the semantics of the `enable_authenticated_media` configuration option. ([\#17913](https://github.com/element-hq/synapse/issues/17913))
|
||||
- Add documentation about backing up Synapse. ([\#17931](https://github.com/element-hq/synapse/issues/17931))
|
||||
|
||||
### Deprecations and Removals
|
||||
|
||||
- Remove support for [MSC3886: Simple client rendezvous capability](https://github.com/matrix-org/matrix-spec-proposals/pull/3886), which has been superseded by [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108) and therefore closed. ([\#17638](https://github.com/element-hq/synapse/issues/17638))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Addressed some typos in docs and returned error message for unknown MXC ID. ([\#17865](https://github.com/element-hq/synapse/issues/17865))
|
||||
- Unpin the upload release GHA action. ([\#17923](https://github.com/element-hq/synapse/issues/17923))
|
||||
- Bump macOS version used to build wheels during release, as current version used is end-of-life. ([\#17924](https://github.com/element-hq/synapse/issues/17924))
|
||||
- Move server event filtering logic to Rust. ([\#17928](https://github.com/element-hq/synapse/issues/17928))
|
||||
- Support new package name of PyPI package `python-multipart` 0.0.13 so that distro packagers do not need to work around name conflict with PyPI package `multipart`. ([\#17932](https://github.com/element-hq/synapse/issues/17932))
|
||||
- Speed up slow initial sliding syncs on large servers. ([\#17946](https://github.com/element-hq/synapse/issues/17946))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump anyhow from 1.0.92 to 1.0.93. ([\#17920](https://github.com/element-hq/synapse/issues/17920))
|
||||
* Bump bleach from 6.1.0 to 6.2.0. ([\#17918](https://github.com/element-hq/synapse/issues/17918))
|
||||
* Bump immutabledict from 4.2.0 to 4.2.1. ([\#17941](https://github.com/element-hq/synapse/issues/17941))
|
||||
* Bump packaging from 24.1 to 24.2. ([\#17940](https://github.com/element-hq/synapse/issues/17940))
|
||||
* Bump phonenumbers from 8.13.49 to 8.13.50. ([\#17942](https://github.com/element-hq/synapse/issues/17942))
|
||||
* Bump pygithub from 2.4.0 to 2.5.0. ([\#17917](https://github.com/element-hq/synapse/issues/17917))
|
||||
* Bump ruff from 0.7.2 to 0.7.3. ([\#17919](https://github.com/element-hq/synapse/issues/17919))
|
||||
* Bump serde from 1.0.214 to 1.0.215. ([\#17938](https://github.com/element-hq/synapse/issues/17938))
|
||||
|
||||
# Synapse 1.119.0 (2024-11-13)
|
||||
|
||||
No significant changes since 1.119.0rc2.
|
||||
|
||||
### Python 3.8 support dropped
|
||||
|
||||
Python 3.8 is [end-of-life](https://devguide.python.org/versions/) and is no longer supported by Synapse. The minimum supported Python version is now 3.9.
|
||||
|
||||
If you are running Synapse with Python 3.8, please upgrade to Python 3.9 (or greater) before upgrading Synapse.
|
||||
|
||||
|
||||
# Synapse 1.119.0rc2 (2024-11-11)
|
||||
|
||||
Note that due to packaging issues there was no v1.119.0rc1.
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
- Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API. ([\#17374](https://github.com/element-hq/synapse/issues/17374))
|
||||
- Add experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2). ([\#17888](https://github.com/element-hq/synapse/issues/17888))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug with sliding sync where `$LAZY`-loading room members would not return `required_state` membership in incremental syncs. ([\#17809](https://github.com/element-hq/synapse/issues/17809))
|
||||
- Check if user has membership in a room before tagging it. Contributed by Lama Alosaimi. ([\#17839](https://github.com/element-hq/synapse/issues/17839))
|
||||
- Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in
|
||||
the config option `run_background_tasks_on`. ([\#17847](https://github.com/element-hq/synapse/issues/17847))
|
||||
- Fix bug where some presence and typing timeouts can expire early. ([\#17850](https://github.com/element-hq/synapse/issues/17850))
|
||||
- Fix detection when the built Rust library was outdated when using source installations. ([\#17861](https://github.com/element-hq/synapse/issues/17861))
|
||||
- Fix a long-standing bug in Synapse which could cause one-time keys to be issued in the incorrect order, causing message decryption failures. ([\#17903](https://github.com/element-hq/synapse/pull/17903))
|
||||
- Fix experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2) where we would return the full state on incremental syncs when using lazy loaded members and there were no new events in the timeline. ([\#17915](https://github.com/element-hq/synapse/pull/17915))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Remove support for python 3.8. ([\#17908](https://github.com/element-hq/synapse/issues/17908))
|
||||
- Add a test for downloading and thumbnailing a CMYK JPEG. ([\#17786](https://github.com/element-hq/synapse/issues/17786))
|
||||
- Refactor database calls to remove `Generator` usage. ([\#17813](https://github.com/element-hq/synapse/issues/17813), [\#17814](https://github.com/element-hq/synapse/issues/17814), [\#17815](https://github.com/element-hq/synapse/issues/17815), [\#17816](https://github.com/element-hq/synapse/issues/17816), [\#17817](https://github.com/element-hq/synapse/issues/17817), [\#17818](https://github.com/element-hq/synapse/issues/17818), [\#17890](https://github.com/element-hq/synapse/issues/17890))
|
||||
- Include the destination in the error of 'Destination mismatch' on federation requests. ([\#17830](https://github.com/element-hq/synapse/issues/17830))
|
||||
- The nix flake inside the repository no longer tracks nixpkgs/master to not catch the latest bugs from a PR merged 5 minutes ago. ([\#17852](https://github.com/element-hq/synapse/issues/17852))
|
||||
- Minor speed-up of sliding sync by computing extensions results in parallel. ([\#17884](https://github.com/element-hq/synapse/issues/17884))
|
||||
- Bump the default Python version in the Synapse Dockerfile from 3.11 -> 3.12. ([\#17887](https://github.com/element-hq/synapse/issues/17887))
|
||||
- Remove usage of internal header encoding API. ([\#17894](https://github.com/element-hq/synapse/issues/17894))
|
||||
- Use unique name for each os.arch variant when uploading Wheel artifacts. ([\#17905](https://github.com/element-hq/synapse/issues/17905))
|
||||
- Fix tests to run with latest Twisted. ([\#17906](https://github.com/element-hq/synapse/pull/17906), [\#17907](https://github.com/element-hq/synapse/pull/17907), [\#17911](https://github.com/element-hq/synapse/pull/17911))
|
||||
- Update version constraint to allow the latest poetry-core 1.9.1. ([\#17902](https://github.com/element-hq/synapse/pull/17902))
|
||||
- Update the portdb CI to use Python 3.13 and Postgres 17 as latest dependencies. ([\#17909](https://github.com/element-hq/synapse/pull/17909))
|
||||
- Add an index to `current_state_delta_stream` table. ([\#17912](https://github.com/element-hq/synapse/issues/17912))
|
||||
- Fix building and attaching release artifacts during the release process. ([\#17921](https://github.com/element-hq/synapse/issues/17921))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump actions/download-artifact & actions/upload-artifact from 3 to 4 in /.github/workflows. ([\#17657](https://github.com/element-hq/synapse/issues/17657))
|
||||
* Bump anyhow from 1.0.89 to 1.0.92. ([\#17858](https://github.com/element-hq/synapse/issues/17858), [\#17876](https://github.com/element-hq/synapse/issues/17876), [\#17901](https://github.com/element-hq/synapse/issues/17901))
|
||||
* Bump bytes from 1.7.2 to 1.8.0. ([\#17877](https://github.com/element-hq/synapse/issues/17877))
|
||||
* Bump cryptography from 43.0.1 to 43.0.3. ([\#17853](https://github.com/element-hq/synapse/issues/17853))
|
||||
* Bump mypy-zope from 1.0.7 to 1.0.8. ([\#17898](https://github.com/element-hq/synapse/issues/17898))
|
||||
* Bump phonenumbers from 8.13.47 to 8.13.49. ([\#17880](https://github.com/element-hq/synapse/issues/17880), [\#17899](https://github.com/element-hq/synapse/issues/17899))
|
||||
* Bump python-multipart from 0.0.12 to 0.0.16. ([\#17879](https://github.com/element-hq/synapse/issues/17879))
|
||||
* Bump regex from 1.11.0 to 1.11.1. ([\#17874](https://github.com/element-hq/synapse/issues/17874))
|
||||
* Bump ruff from 0.6.9 to 0.7.2. ([\#17868](https://github.com/element-hq/synapse/issues/17868), [\#17897](https://github.com/element-hq/synapse/issues/17897))
|
||||
* Bump serde from 1.0.210 to 1.0.214. ([\#17875](https://github.com/element-hq/synapse/issues/17875), [\#17900](https://github.com/element-hq/synapse/issues/17900))
|
||||
* Bump serde_json from 1.0.128 to 1.0.132. ([\#17857](https://github.com/element-hq/synapse/issues/17857))
|
||||
* Bump types-psycopg2 from 2.9.21.20240819 to 2.9.21.20241019. ([\#17855](https://github.com/element-hq/synapse/issues/17855))
|
||||
* Bump types-setuptools from 75.1.0.20241014 to 75.2.0.20241019. ([\#17856](https://github.com/element-hq/synapse/issues/17856))
|
||||
|
||||
# Synapse 1.118.0 (2024-10-29)
|
||||
|
||||
No significant changes since 1.118.0rc1.
|
||||
|
||||
174
Cargo.lock
generated
174
Cargo.lock
generated
@@ -13,9 +13,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.91"
|
||||
version = "1.0.93"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8"
|
||||
checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -35,12 +35,6 @@ version = "0.21.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
version = "0.10.6"
|
||||
@@ -162,9 +156,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.4.1"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
|
||||
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
||||
|
||||
[[package]]
|
||||
name = "hex"
|
||||
@@ -222,16 +216,6 @@ version = "0.2.154"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.22"
|
||||
@@ -265,29 +249,6 @@ version = "1.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.12.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb"
|
||||
dependencies = [
|
||||
"lock_api",
|
||||
"parking_lot_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.9.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"smallvec",
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "portable-atomic"
|
||||
version = "1.6.0"
|
||||
@@ -311,16 +272,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3"
|
||||
version = "0.21.2"
|
||||
version = "0.23.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a5e00b96a521718e08e03b1a622f01c8a8deb50719335de3f60b3b3950f069d8"
|
||||
checksum = "f54b3d09cbdd1f8c20650b28e7b09e338881482f4aa908a5f61a00c98fba2690"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
"indoc",
|
||||
"libc",
|
||||
"memoffset",
|
||||
"parking_lot",
|
||||
"once_cell",
|
||||
"portable-atomic",
|
||||
"pyo3-build-config",
|
||||
"pyo3-ffi",
|
||||
@@ -330,9 +291,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-build-config"
|
||||
version = "0.21.2"
|
||||
version = "0.23.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7883df5835fafdad87c0d888b266c8ec0f4c9ca48a5bed6bbb592e8dedee1b50"
|
||||
checksum = "3015cf985888fe66cfb63ce0e321c603706cd541b7aec7ddd35c281390af45d8"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"target-lexicon",
|
||||
@@ -340,9 +301,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-ffi"
|
||||
version = "0.21.2"
|
||||
version = "0.23.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "01be5843dc60b916ab4dad1dca6d20b9b4e6ddc8e15f50c47fe6d85f1fb97403"
|
||||
checksum = "6fca7cd8fd809b5ac4eefb89c1f98f7a7651d3739dfb341ca6980090f554c270"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"pyo3-build-config",
|
||||
@@ -350,9 +311,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.10.0"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2af49834b8d2ecd555177e63b273b708dea75150abc6f5341d0a6e1a9623976c"
|
||||
checksum = "3eb421dc86d38d08e04b927b02424db480be71b777fa3a56f32e2f2a3a1a3b08"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
@@ -361,9 +322,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros"
|
||||
version = "0.21.2"
|
||||
version = "0.23.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77b34069fc0682e11b31dbd10321cbf94808394c56fd996796ce45217dfac53c"
|
||||
checksum = "34e657fa5379a79151b6ff5328d9216a84f55dc93b17b08e7c3609a969b73aa0"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"pyo3-macros-backend",
|
||||
@@ -373,9 +334,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros-backend"
|
||||
version = "0.21.2"
|
||||
version = "0.23.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08260721f32db5e1a5beae69a55553f56b99bd0e1c3e6e0a5e8851a9d0f5a85c"
|
||||
checksum = "295548d5ffd95fd1981d2d3cf4458831b21d60af046b729b6fd143b0ba7aee2f"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
@@ -386,9 +347,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pythonize"
|
||||
version = "0.21.1"
|
||||
version = "0.23.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d0664248812c38cc55a4ed07f88e4df516ce82604b93b1ffdc041aa77a6cb3c"
|
||||
checksum = "91a6ee7a084f913f98d70cdc3ebec07e852b735ae3059a1500db2661265da9ff"
|
||||
dependencies = [
|
||||
"pyo3",
|
||||
"serde",
|
||||
@@ -433,15 +394,6 @@ dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.11.1"
|
||||
@@ -477,26 +429,20 @@ version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.213"
|
||||
version = "1.0.215"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1"
|
||||
checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.213"
|
||||
version = "1.0.215"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5"
|
||||
checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -505,9 +451,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.132"
|
||||
version = "1.0.133"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03"
|
||||
checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
@@ -537,12 +483,6 @@ dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.13.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
|
||||
|
||||
[[package]]
|
||||
name = "subtle"
|
||||
version = "2.5.0"
|
||||
@@ -694,67 +634,3 @@ dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0"
|
||||
|
||||
1
changelog.d/17253.misc
Normal file
1
changelog.d/17253.misc
Normal file
@@ -0,0 +1 @@
|
||||
[MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108): Add a `Content-Type` header on the `PUT` response to work around a faulty behavior in some caching reverse proxies.
|
||||
@@ -1 +0,0 @@
|
||||
Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API.
|
||||
@@ -1 +0,0 @@
|
||||
Add a test for downloading and thumbnailing a CMYK JPEG.
|
||||
@@ -1 +0,0 @@
|
||||
Avoid lost data on some database query retries.
|
||||
@@ -1 +0,0 @@
|
||||
Avoid lost data on some database query retries.
|
||||
@@ -1 +0,0 @@
|
||||
Avoid lost data on some database query retries.
|
||||
@@ -1 +0,0 @@
|
||||
Avoid lost data on some database query retries.
|
||||
@@ -1 +0,0 @@
|
||||
Avoid lost data on some database query retries.
|
||||
@@ -1 +0,0 @@
|
||||
Avoid lost data on some database query retries.
|
||||
@@ -1 +0,0 @@
|
||||
Include the destination in the error of 'Destination mismatch' on federation requests.
|
||||
@@ -1 +0,0 @@
|
||||
Check if user has membership in a room before tagging it. Contributed by Lama Alosaimi.
|
||||
@@ -1,2 +0,0 @@
|
||||
Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in
|
||||
the config option `run_background_tasks_on`.
|
||||
@@ -1 +0,0 @@
|
||||
Fix detection when the built Rust library was outdated when using source installations.
|
||||
1
changelog.d/17872.doc
Normal file
1
changelog.d/17872.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add OIDC example configuration for Forgejo (fork of Gitea).
|
||||
@@ -1 +0,0 @@
|
||||
Minor speed-up of sliding sync by computing extensions results in parallel.
|
||||
@@ -1 +0,0 @@
|
||||
Remove usage of internal header encoding API.
|
||||
1
changelog.d/17933.bugfix
Normal file
1
changelog.d/17933.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix long-standing bug where read receipts could get overly delayed being sent over federation.
|
||||
1
changelog.d/17936.misc
Normal file
1
changelog.d/17936.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix incorrect comment in new schema delta.
|
||||
1
changelog.d/17944.misc
Normal file
1
changelog.d/17944.misc
Normal file
@@ -0,0 +1 @@
|
||||
Raise setuptools_rust version cap to 1.10.2.
|
||||
1
changelog.d/17945.misc
Normal file
1
changelog.d/17945.misc
Normal file
@@ -0,0 +1 @@
|
||||
Enable encrypted appservice related experimental features in the complement docker image.
|
||||
1
changelog.d/17952.misc
Normal file
1
changelog.d/17952.misc
Normal file
@@ -0,0 +1 @@
|
||||
Return whether the user is suspended when querying the user account in the Admin API.
|
||||
1
changelog.d/17953.doc
Normal file
1
changelog.d/17953.doc
Normal file
@@ -0,0 +1 @@
|
||||
Link to element-docker-demo from contrib/docker*.
|
||||
1
changelog.d/17966.misc
Normal file
1
changelog.d/17966.misc
Normal file
@@ -0,0 +1 @@
|
||||
Bump pyo3 and dependencies to v0.23.2.
|
||||
1
changelog.d/17970.bugfix
Normal file
1
changelog.d/17970.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix release process to not create duplicate releases.
|
||||
@@ -30,3 +30,6 @@ docker-compose up -d
|
||||
### More information
|
||||
|
||||
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)
|
||||
|
||||
**For a more comprehensive Docker Compose example showcasing a full Matrix 2.0 stack, please see
|
||||
https://github.com/element-hq/element-docker-demo**
|
||||
@@ -8,6 +8,9 @@ All examples and snippets assume that your Synapse service is called `synapse` i
|
||||
|
||||
An example Docker Compose file can be found [here](docker-compose.yaml).
|
||||
|
||||
**For a more comprehensive Docker Compose example, showcasing a full Matrix 2.0 stack (originally based on this
|
||||
docker-compose.yaml), please see https://github.com/element-hq/element-docker-demo**
|
||||
|
||||
## Worker Service Examples in Docker Compose
|
||||
|
||||
In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`).
|
||||
|
||||
30
debian/changelog
vendored
30
debian/changelog
vendored
@@ -1,3 +1,33 @@
|
||||
matrix-synapse-py3 (1.120.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.120.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Nov 2024 13:10:23 +0000
|
||||
|
||||
matrix-synapse-py3 (1.120.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.120.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Nov 2024 15:02:21 +0000
|
||||
|
||||
matrix-synapse-py3 (1.119.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.119.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 13 Nov 2024 13:57:51 +0000
|
||||
|
||||
matrix-synapse-py3 (1.119.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.119.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 11 Nov 2024 14:33:02 +0000
|
||||
|
||||
matrix-synapse-py3 (1.119.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.119.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 06 Nov 2024 08:59:43 -0700
|
||||
|
||||
matrix-synapse-py3 (1.118.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.118.0.
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
# `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in
|
||||
# in `poetry export` in the past.
|
||||
|
||||
ARG PYTHON_VERSION=3.11
|
||||
ARG PYTHON_VERSION=3.12
|
||||
|
||||
###
|
||||
### Stage 0: generate requirements.txt
|
||||
|
||||
@@ -104,6 +104,16 @@ experimental_features:
|
||||
msc3967_enabled: true
|
||||
# Expose a room summary for public rooms
|
||||
msc3266_enabled: true
|
||||
# Send to-device messages to application services
|
||||
msc2409_to_device_messages_enabled: true
|
||||
# Allow application services to masquerade devices
|
||||
msc3202_device_masquerading: true
|
||||
# Sending device list changes, one-time key counts and fallback key usage to application services
|
||||
msc3202_transaction_extensions: true
|
||||
# Proxy OTK claim requests to exclusive ASes
|
||||
msc3983_appservice_otk_claims: true
|
||||
# Proxy key queries to exclusive ASes
|
||||
msc3984_appservice_key_query: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
|
||||
@@ -54,6 +54,7 @@
|
||||
- [Using `synctl` with Workers](synctl_workers.md)
|
||||
- [Systemd](systemd-with-workers/README.md)
|
||||
- [Administration](usage/administration/README.md)
|
||||
- [Backups](usage/administration/backups.md)
|
||||
- [Admin API](usage/administration/admin_api/README.md)
|
||||
- [Account Validity](admin_api/account_validity.md)
|
||||
- [Background Updates](usage/administration/admin_api/background_updates.md)
|
||||
|
||||
@@ -5,6 +5,7 @@ basis. The currently supported features are:
|
||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
|
||||
- [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222): adding `state_after` to sync v2
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
|
||||
@@ -55,7 +55,8 @@ It returns a JSON body like the following:
|
||||
}
|
||||
],
|
||||
"user_type": null,
|
||||
"locked": false
|
||||
"locked": false,
|
||||
"suspended": false
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common
|
||||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:bullseye
|
||||
```
|
||||
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
|
||||
|
||||
|
||||
@@ -336,6 +336,36 @@ but it has a `response_types_supported` which excludes "code" (which we rely on,
|
||||
is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)),
|
||||
so we have to disable discovery and configure the URIs manually.
|
||||
|
||||
### Forgejo
|
||||
|
||||
Forgejo is a fork of Gitea that can act as an OAuth2 provider.
|
||||
|
||||
The implementation of OAuth2 is improved compared to Gitea, as it provides a correctly defined `subject_claim` and `scopes`.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: forgejo
|
||||
idp_name: Forgejo
|
||||
discover: false
|
||||
issuer: "https://your-forgejo.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: client_secret_post
|
||||
scopes: ["openid", "profile", "email", "groups"]
|
||||
authorization_endpoint: "https://your-forgejo.com/login/oauth/authorize"
|
||||
token_endpoint: "https://your-forgejo.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://your-forgejo.com/api/v1/user"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "sub"
|
||||
picture_claim: "picture"
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
email_template: "{{ user.email }}"
|
||||
```
|
||||
|
||||
### GitHub
|
||||
|
||||
[GitHub][github-idp] is a bit special as it is not an OpenID Connect compliant provider, but
|
||||
|
||||
@@ -100,6 +100,10 @@ database:
|
||||
keepalives_count: 3
|
||||
```
|
||||
|
||||
## Backups
|
||||
|
||||
Don't forget to [back up](./usage/administration/backups.md#database) your database!
|
||||
|
||||
## Tuning Postgres
|
||||
|
||||
The default settings should be fine for most deployments. For larger
|
||||
|
||||
@@ -208,7 +208,7 @@ When following this route please make sure that the [Platform-specific prerequis
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.8 or later, up to Python 3.11.
|
||||
- Python 3.9 or later, up to Python 3.13.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
If building on an uncommon architecture for which pre-built wheels are
|
||||
@@ -656,6 +656,10 @@ This also requires the optional `lxml` python dependency to be installed. This
|
||||
in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
|
||||
means `apt-get install libxml2-dev`, or equivalent for your OS.
|
||||
|
||||
### Backups
|
||||
|
||||
Don't forget to take [backups](../usage/administration/backups.md) of your new server!
|
||||
|
||||
### Troubleshooting Installation
|
||||
|
||||
`pip` seems to leak *lots* of memory during installation. For instance, a Linux
|
||||
|
||||
@@ -117,6 +117,51 @@ each upgrade are complete before moving on to the next upgrade, to avoid
|
||||
stacking them up. You can monitor the currently running background updates with
|
||||
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||
|
||||
# Upgrading to v1.120.0
|
||||
|
||||
## Removal of experimental MSC3886 feature
|
||||
|
||||
[MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886)
|
||||
has been closed (and will not enter the Matrix spec). As such, we are
|
||||
removing the experimental support for it in this release.
|
||||
|
||||
The `experimental_features.msc3886_endpoint` configuration option has
|
||||
been removed.
|
||||
|
||||
## Authenticated media is now enforced by default
|
||||
|
||||
The [`enable_authenticated_media`] configuration option now defaults to true.
|
||||
|
||||
This means that clients and remote (federated) homeservers now need to use
|
||||
the authenticated media endpoints in order to download media from your
|
||||
homeserver.
|
||||
|
||||
As an exception, existing media that was stored on the server prior to
|
||||
this option changing to `true` will still be accessible over the
|
||||
unauthenticated endpoints.
|
||||
|
||||
The matrix.org homeserver has already been running with this option enabled
|
||||
since September 2024, so most common clients and homeservers should already
|
||||
be compatible.
|
||||
|
||||
With that said, administrators who wish to disable this feature for broader
|
||||
compatibility can still do so by manually configuring
|
||||
`enable_authenticated_media: False`.
|
||||
|
||||
[`enable_authenticated_media`]: usage/configuration/config_documentation.md#enable_authenticated_media
|
||||
|
||||
|
||||
# Upgrading to v1.119.0
|
||||
|
||||
## Minimum supported Python version
|
||||
|
||||
The minimum supported Python version has been increased from v3.8 to v3.9.
|
||||
You will need Python 3.9+ to run Synapse v1.119.0 (due out Nov 7th, 2024).
|
||||
|
||||
If you use current versions of the Matrix.org-distributed Docker images, no action is required.
|
||||
Please note that support for Ubuntu `focal` was dropped as well since it uses Python 3.8.
|
||||
|
||||
|
||||
# Upgrading to v1.111.0
|
||||
|
||||
## New worker endpoints for authenticated client and federation media
|
||||
|
||||
125
docs/usage/administration/backups.md
Normal file
125
docs/usage/administration/backups.md
Normal file
@@ -0,0 +1,125 @@
|
||||
# How to back up a Synapse homeserver
|
||||
|
||||
It is critical to maintain good backups of your server, to guard against
|
||||
hardware failure as well as potential corruption due to bugs or administrator
|
||||
error.
|
||||
|
||||
This page documents the things you will need to consider backing up as part of
|
||||
a Synapse installation.
|
||||
|
||||
## Configuration files
|
||||
|
||||
Keep a copy of your configuration file (`homeserver.yaml`), as well as any
|
||||
auxiliary config files it refers to such as the
|
||||
[`log_config`](../configuration/config_documentation.md#log_config) file,
|
||||
[`app_service_config_files`](../configuration/config_documentation.md#app_service_config_files).
|
||||
Often, all such config files will be kept in a single directory such as
|
||||
`/etc/synapse`, which will make this easier.
|
||||
|
||||
## Server signing key
|
||||
|
||||
Your server has a [signing
|
||||
key](../configuration/config_documentation.md#signing_key_path) which it uses
|
||||
to sign events and outgoing federation requests. It is easiest to back it up
|
||||
with your configuration files, but an alternative is to have Synapse create a
|
||||
new signing key if you have to restore.
|
||||
|
||||
If you do decide to replace the signing key, you should add the old *public*
|
||||
key to
|
||||
[`old_signing_keys`](../configuration/config_documentation.md#old_signing_keys).
|
||||
|
||||
## Database
|
||||
|
||||
Synapse's support for SQLite is only suitable for testing purposes, so for the
|
||||
purposes of this document, we'll assume you are using
|
||||
[PostgreSQL](../../postgres.md).
|
||||
|
||||
A full discussion of backup strategies for PostgreSQL is out of scope for this
|
||||
document; see the [PostgreSQL
|
||||
documentation](https://www.postgresql.org/docs/current/backup.html) for
|
||||
detailed information.
|
||||
|
||||
### Synapse-specfic details
|
||||
|
||||
* Be very careful not to restore into a database that already has tables
|
||||
present. At best, this will error; at worst, it will lead to subtle database
|
||||
inconsistencies.
|
||||
|
||||
* The `e2e_one_time_keys_json` table should **not** be backed up, or if it is
|
||||
backed up, should be
|
||||
[`TRUNCATE`d](https://www.postgresql.org/docs/current/sql-truncate.html)
|
||||
after restoring the database before Synapse is started.
|
||||
|
||||
[Background: restoring the database to an older backup can cause
|
||||
used one-time-keys to be re-issued, causing subsequent [message decryption
|
||||
errors](https://github.com/element-hq/element-meta/issues/2155). Clearing
|
||||
all one-time-keys from the database ensures that this cannot happen, and
|
||||
will prompt clients to generate and upload new one-time-keys.]
|
||||
|
||||
### Quick and easy database backup and restore
|
||||
|
||||
Typically, the easiest solution is to use `pg_dump` to take a copy of the whole
|
||||
database. We recommend `pg_dump`'s custom dump format, as it produces
|
||||
significantly smaller backup files.
|
||||
|
||||
```shell
|
||||
sudo -u postgres pg_dump -Fc --exclude-table-data e2e_one_time_keys_json synapse > synapse.dump
|
||||
```
|
||||
|
||||
There is no need to stop Postgres or Synapse while `pg_dump` is running: it
|
||||
will take a consistent snapshot of the databse.
|
||||
|
||||
To restore, you will need to recreate the database as described in [Using
|
||||
Postgres](../../postgres.md#set-up-database),
|
||||
then load the dump into it with `pg_restore`:
|
||||
|
||||
```shell
|
||||
sudo -u postgres createdb --encoding=UTF8 --locale=C --template=template0 --owner=synapse_user synapse
|
||||
sudo -u postgres pg_restore -d synapse < synapse.dump
|
||||
```
|
||||
|
||||
(If you forgot to exclude `e2e_one_time_keys_json` during `pg_dump`, remember
|
||||
to connect to the new database and `TRUNCATE e2e_one_time_keys_json;` before
|
||||
starting Synapse.)
|
||||
|
||||
To reiterate: do **not** restore a dump over an existing database.
|
||||
|
||||
Again, if you plan to run your homeserver at any sort of production level, we
|
||||
recommend studying the PostgreSQL documentation on backup options.
|
||||
|
||||
## Media store
|
||||
|
||||
Synapse keeps a copy of media uploaded by users, including avatars and message
|
||||
attachments, in its [Media
|
||||
store](../configuration/config_documentation.md#media-store).
|
||||
|
||||
It is a directory on the local disk, containing the following directories:
|
||||
|
||||
* `local_content`: this is content uploaded by your local users. As a general
|
||||
rule, you should back this up: it may represent the only copy of those
|
||||
media files anywhere in the federation, and if they are lost, users will
|
||||
see errors when viewing user or room avatars, and messages with attachments.
|
||||
|
||||
* `local_thumbnails`: "thumbnails" of images uploaded by your users. If
|
||||
[`dynamic_thumbnails`](../configuration/config_documentation.md#dynamic_thumbnails)
|
||||
is enabled, these will be regenerated if they are removed from the disk, and
|
||||
there is therefore no need to back them up.
|
||||
|
||||
If `dynamic_thumbnails` is *not* enabled (the default): although this can
|
||||
theoretically be regenerated from `local_content`, there is no tooling to do
|
||||
so. We recommend that these are backed up too.
|
||||
|
||||
* `remote_content`: this is a cache of content that was uploaded by a user on
|
||||
another server, and has since been requested by a user on your own server.
|
||||
|
||||
Typically there is no need to back up this directory: if a file in this directory
|
||||
is removed, Synapse will attempt to fetch it again from the remote
|
||||
server.
|
||||
|
||||
* `remote_thumbnails`: thumbnails of images uploaded by users on other
|
||||
servers. As with `remote_content`, there is normally no need to back this
|
||||
up.
|
||||
|
||||
* `url_cache`, `url_cache_thumbnails`: temporary caches of files downloaded
|
||||
by the [URL previews](../../setup/installation.md#url-previews) feature.
|
||||
These do not need to be backed up.
|
||||
@@ -1887,12 +1887,33 @@ Config options related to Synapse's media store.
|
||||
|
||||
When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy
|
||||
unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false
|
||||
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but
|
||||
this will change to true in a future Synapse release.
|
||||
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to true (previously false). In a future release of Synapse, this option will be removed and become always-on.
|
||||
|
||||
In all cases, authenticated requests to download media will succeed, but for unauthenticated requests, this
|
||||
case-by-case breakdown describes whether media downloads are permitted:
|
||||
|
||||
* `enable_authenticated_media = False`:
|
||||
* unauthenticated client or homeserver requesting local media: allowed
|
||||
* unauthenticated client or homeserver requesting remote media: allowed as long as the media is in the cache,
|
||||
or as long as the remote homeserver does not require authentication to retrieve the media
|
||||
* `enable_authenticated_media = True`:
|
||||
* unauthenticated client or homeserver requesting local media:
|
||||
allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist);
|
||||
otherwise denied.
|
||||
* unauthenticated client or homeserver requesting remote media: the same as for local media;
|
||||
allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist);
|
||||
otherwise denied.
|
||||
|
||||
It is especially notable that media downloaded before this option existed (in older Synapse versions), or whilst this option was set to `False`,
|
||||
will perpetually be available over the legacy, unauthenticated endpoint, even after this option is set to `True`.
|
||||
This is for backwards compatibility with older clients and homeservers that do not yet support requesting authenticated media;
|
||||
those older clients or homeservers will not be cut off from media they can already see.
|
||||
|
||||
_Changed in Synapse 1.120:_ This option now defaults to `True` when not set, whereas before this version it defaulted to `False`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_authenticated_media: true
|
||||
enable_authenticated_media: false
|
||||
```
|
||||
---
|
||||
### `enable_media_repo`
|
||||
@@ -3108,6 +3129,15 @@ it was last used.
|
||||
It is possible to build an entry from an old `signing.key` file using the
|
||||
`export_signing_key` script which is provided with synapse.
|
||||
|
||||
If you have lost the private key file, you can ask another server you trust to
|
||||
tell you the public keys it has seen from your server. To fetch the keys from
|
||||
`matrix.org`, try something like:
|
||||
|
||||
```
|
||||
curl https://matrix-federation.matrix.org/_matrix/key/v2/query/myserver.example.com |
|
||||
jq '.server_keys | map(.verify_keys) | add'
|
||||
```
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
old_signing_keys:
|
||||
@@ -4371,9 +4401,9 @@ It is possible to scale the processes that handle sending outbound federation re
|
||||
by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
|
||||
a `federation_sender_instances` map. Doing so will remove handling of this function from
|
||||
the main process. Multiple workers can be added to this map, in which case the work is
|
||||
balanced across them.
|
||||
balanced across them.
|
||||
|
||||
The way that the load balancing works is any outbound federation request will be assigned
|
||||
The way that the load balancing works is any outbound federation request will be assigned
|
||||
to a federation sender worker based on the hash of the destination server name. This
|
||||
means that all requests being sent to the same destination will be processed by the same
|
||||
worker instance. Multiple `federation_sender_instances` are useful if there is a federation
|
||||
@@ -4730,7 +4760,7 @@ This setting has the following sub-options:
|
||||
* `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only
|
||||
for direct messages. Defaults to false.
|
||||
* `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false.
|
||||
* `worker_to_run_on`: Which worker to run this module on. This must match
|
||||
* `worker_to_run_on`: Which worker to run this module on. This must match
|
||||
the "worker_name". If not set or `null`, invites will be accepted on the
|
||||
main process.
|
||||
|
||||
|
||||
56
flake.lock
generated
56
flake.lock
generated
@@ -56,24 +56,6 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681202837,
|
||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gitignore": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
@@ -186,27 +168,27 @@
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1690535733,
|
||||
"narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
|
||||
"lastModified": 1729265718,
|
||||
"narHash": "sha256-4HQI+6LsO3kpWTYuVGIzhJs1cetFcwT7quWCk/6rqeo=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
|
||||
"rev": "ccc0c2126893dd20963580b6478d1a10a4512185",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "master",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681358109,
|
||||
"narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=",
|
||||
"lastModified": 1728538411,
|
||||
"narHash": "sha256-f0SBJz1eZ2yOuKUr5CA9BHULGXVSn6miBuUWdTyhUhU=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9",
|
||||
"rev": "b69de56fac8c2b6f8fd27f2eca01dcda8e0a4221",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -249,20 +231,19 @@
|
||||
"devenv": "devenv",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"rust-overlay": "rust-overlay",
|
||||
"systems": "systems_3"
|
||||
"systems": "systems_2"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nixpkgs": "nixpkgs_3"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1693966243,
|
||||
"narHash": "sha256-a2CA1aMIPE67JWSVIGoGtD3EGlFdK9+OlJQs0FOWCKY=",
|
||||
"lastModified": 1731897198,
|
||||
"narHash": "sha256-Ou7vLETSKwmE/HRQz4cImXXJBr/k9gp4J4z/PF8LzTE=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "a8b4bb4cbb744baaabc3e69099f352f99164e2c1",
|
||||
"rev": "0be641045af6d8666c11c2c40e45ffc9667839b5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -300,21 +281,6 @@
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
|
||||
26
flake.nix
26
flake.nix
@@ -3,13 +3,13 @@
|
||||
# (https://github.com/matrix-org/complement) Matrix homeserver test suites are also
|
||||
# installed automatically.
|
||||
#
|
||||
# You must have already installed Nix (https://nixos.org) on your system to use this.
|
||||
# Nix can be installed on Linux or MacOS; NixOS is not required. Windows is not
|
||||
# directly supported, but Nix can be installed inside of WSL2 or even Docker
|
||||
# You must have already installed Nix (https://nixos.org/download/) on your system to use this.
|
||||
# Nix can be installed on any Linux distribiution or MacOS; NixOS is not required.
|
||||
# Windows is not directly supported, but Nix can be installed inside of WSL2 or even Docker
|
||||
# containers. Please refer to https://nixos.org/download for details.
|
||||
#
|
||||
# You must also enable support for flakes in Nix. See the following for how to
|
||||
# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes
|
||||
# do so permanently: https://wiki.nixos.org/wiki/Flakes#Other_Distros,_without_Home-Manager
|
||||
#
|
||||
# Be warned: you'll need over 3.75 GB of free space to download all the dependencies.
|
||||
#
|
||||
@@ -20,7 +20,7 @@
|
||||
# locally from "services", such as PostgreSQL and Redis.
|
||||
#
|
||||
# You should now be dropped into a new shell with all programs and dependencies
|
||||
# availabile to you!
|
||||
# available to you!
|
||||
#
|
||||
# You can start up pre-configured local Synapse, PostgreSQL and Redis instances by
|
||||
# running: `devenv up`. To stop them, use Ctrl-C.
|
||||
@@ -39,9 +39,9 @@
|
||||
|
||||
{
|
||||
inputs = {
|
||||
# Use the master/unstable branch of nixpkgs. Used to fetch the latest
|
||||
# Use the rolling/unstable branch of nixpkgs. Used to fetch the latest
|
||||
# available versions of packages.
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/master";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
|
||||
systems.url = "github:nix-systems/default";
|
||||
# A development environment manager built on Nix. See https://devenv.sh.
|
||||
@@ -50,7 +50,7 @@
|
||||
rust-overlay.url = "github:oxalica/rust-overlay";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
|
||||
outputs = { nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
|
||||
let
|
||||
forEachSystem = nixpkgs.lib.genAttrs (import systems);
|
||||
in {
|
||||
@@ -82,7 +82,7 @@
|
||||
#
|
||||
# NOTE: We currently need to set the Rust version unnecessarily high
|
||||
# in order to work around https://github.com/matrix-org/synapse/issues/15939
|
||||
(rust-bin.stable."1.71.1".default.override {
|
||||
(rust-bin.stable."1.82.0".default.override {
|
||||
# Additionally install the "rust-src" extension to allow diving into the
|
||||
# Rust source code in an IDE (rust-analyzer will also make use of it).
|
||||
extensions = [ "rust-src" ];
|
||||
@@ -126,7 +126,7 @@
|
||||
# Automatically activate the poetry virtualenv upon entering the shell.
|
||||
languages.python.poetry.activate.enable = true;
|
||||
# Install all extra Python dependencies; this is needed to run the unit
|
||||
# tests and utilitise all Synapse features.
|
||||
# tests and utilise all Synapse features.
|
||||
languages.python.poetry.install.arguments = ["--extras all"];
|
||||
# Install the 'matrix-synapse' package from the local checkout.
|
||||
languages.python.poetry.install.installRootPackage = true;
|
||||
@@ -163,8 +163,8 @@
|
||||
# Create a postgres user called 'synapse_user' which has ownership
|
||||
# over the 'synapse' database.
|
||||
services.postgres.initialScript = ''
|
||||
CREATE USER synapse_user;
|
||||
ALTER DATABASE synapse OWNER TO synapse_user;
|
||||
CREATE USER synapse_user;
|
||||
ALTER DATABASE synapse OWNER TO synapse_user;
|
||||
'';
|
||||
|
||||
# Redis is needed in order to run Synapse in worker mode.
|
||||
@@ -205,7 +205,7 @@
|
||||
# corresponding Nix packages on https://search.nixos.org/packages.
|
||||
#
|
||||
# This was done until `./install-deps.pl --dryrun` produced no output.
|
||||
env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [
|
||||
env.PERL5LIB = "${with pkgs.perl538Packages; makePerlPath [
|
||||
DBI
|
||||
ClassMethodModifiers
|
||||
CryptEd25519
|
||||
|
||||
2
mypy.ini
2
mypy.ini
@@ -26,7 +26,7 @@ strict_equality = True
|
||||
|
||||
# Run mypy type checking with the minimum supported Python version to catch new usage
|
||||
# that isn't backwards-compatible (types, overloads, etc).
|
||||
python_version = 3.8
|
||||
python_version = 3.9
|
||||
|
||||
files =
|
||||
docker/,
|
||||
|
||||
139
poetry.lock
generated
139
poetry.lock
generated
@@ -11,9 +11,6 @@ files = [
|
||||
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""}
|
||||
|
||||
[[package]]
|
||||
name = "attrs"
|
||||
version = "24.2.0"
|
||||
@@ -107,21 +104,20 @@ typecheck = ["mypy"]
|
||||
|
||||
[[package]]
|
||||
name = "bleach"
|
||||
version = "6.1.0"
|
||||
version = "6.2.0"
|
||||
description = "An easy safelist-based HTML-sanitizing tool."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"},
|
||||
{file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"},
|
||||
{file = "bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e"},
|
||||
{file = "bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
six = ">=1.9.0"
|
||||
webencodings = "*"
|
||||
|
||||
[package.extras]
|
||||
css = ["tinycss2 (>=1.1.0,<1.3)"]
|
||||
css = ["tinycss2 (>=1.1.0,<1.5)"]
|
||||
|
||||
[[package]]
|
||||
name = "canonicaljson"
|
||||
@@ -728,13 +724,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "immutabledict"
|
||||
version = "4.2.0"
|
||||
version = "4.2.1"
|
||||
description = "Immutable wrapper around dictionaries (a fork of frozendict)"
|
||||
optional = false
|
||||
python-versions = ">=3.8,<4.0"
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "immutabledict-4.2.0-py3-none-any.whl", hash = "sha256:d728b2c2410d698d95e6200237feb50a695584d20289ad3379a439aa3d90baba"},
|
||||
{file = "immutabledict-4.2.0.tar.gz", hash = "sha256:e003fd81aad2377a5a758bf7e1086cf3b70b63e9a5cc2f46bce8d0a2b4727c5f"},
|
||||
{file = "immutabledict-4.2.1-py3-none-any.whl", hash = "sha256:c56a26ced38c236f79e74af3ccce53772827cef5c3bce7cab33ff2060f756373"},
|
||||
{file = "immutabledict-4.2.1.tar.gz", hash = "sha256:d91017248981c72eb66c8ff9834e99c2f53562346f23e7f51e7a5ebcf66a3bcc"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -874,9 +870,7 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
attrs = ">=22.2.0"
|
||||
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
|
||||
jsonschema-specifications = ">=2023.03.6"
|
||||
pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
|
||||
referencing = ">=0.28.4"
|
||||
rpds-py = ">=0.7.1"
|
||||
|
||||
@@ -896,7 +890,6 @@ files = [
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
|
||||
referencing = ">=0.28.0"
|
||||
|
||||
[[package]]
|
||||
@@ -912,7 +905,6 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""}
|
||||
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
|
||||
"jaraco.classes" = "*"
|
||||
jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""}
|
||||
pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""}
|
||||
@@ -1380,17 +1372,17 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "mypy-zope"
|
||||
version = "1.0.7"
|
||||
version = "1.0.8"
|
||||
description = "Plugin for mypy to support zope interfaces"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "mypy_zope-1.0.7-py3-none-any.whl", hash = "sha256:f19de249574319d81083b15f8a022c6b15583582f23340a860922141f1b651ca"},
|
||||
{file = "mypy_zope-1.0.7.tar.gz", hash = "sha256:32a79ce78647c0bea61e7e0c0eb1233fcb97bb94e8950cca73f17d3419c602f7"},
|
||||
{file = "mypy_zope-1.0.8-py3-none-any.whl", hash = "sha256:8794a77dae0c7e2f28b8ac48569091310b3ee45bb9d6cd4797dcb837c40f9976"},
|
||||
{file = "mypy_zope-1.0.8.tar.gz", hash = "sha256:854303a95aefc4289e8a0796808e002c2c7ecde0a10a8f7b8f48092f94ef9b9f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
mypy = ">=1.0.0,<1.12.0"
|
||||
mypy = ">=1.0.0,<1.13.0"
|
||||
"zope.interface" = "*"
|
||||
"zope.schema" = "*"
|
||||
|
||||
@@ -1426,13 +1418,13 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte
|
||||
|
||||
[[package]]
|
||||
name = "packaging"
|
||||
version = "24.1"
|
||||
version = "24.2"
|
||||
description = "Core utilities for Python packages"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
|
||||
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
|
||||
{file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
|
||||
{file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1451,13 +1443,13 @@ dev = ["jinja2"]
|
||||
|
||||
[[package]]
|
||||
name = "phonenumbers"
|
||||
version = "8.13.48"
|
||||
version = "8.13.50"
|
||||
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "phonenumbers-8.13.48-py2.py3-none-any.whl", hash = "sha256:5c51939acefa390eb74119750afb10a85d3c628dc83fd62c52d6f532fcf5d205"},
|
||||
{file = "phonenumbers-8.13.48.tar.gz", hash = "sha256:62d8df9b0f3c3c41571c6b396f044ddd999d61631534001b8be7fdf7ba1b18f3"},
|
||||
{file = "phonenumbers-8.13.50-py2.py3-none-any.whl", hash = "sha256:bb95dbc0d9979c51f7ad94bcd780784938958861fbb4b75a2fe39ccd3d58954a"},
|
||||
{file = "phonenumbers-8.13.50.tar.gz", hash = "sha256:e05ac6fb7b98c6d719a87ea895b9fc153673b4a51f455ec9afaf557ef4629da6"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1571,17 +1563,6 @@ files = [
|
||||
[package.extras]
|
||||
testing = ["pytest", "pytest-cov"]
|
||||
|
||||
[[package]]
|
||||
name = "pkgutil-resolve-name"
|
||||
version = "1.3.10"
|
||||
description = "Resolve a name to an object."
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"},
|
||||
{file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prometheus-client"
|
||||
version = "0.21.0"
|
||||
@@ -1803,13 +1784,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pygithub"
|
||||
version = "2.4.0"
|
||||
version = "2.5.0"
|
||||
description = "Use the full Github API v3"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "PyGithub-2.4.0-py3-none-any.whl", hash = "sha256:81935aa4bdc939fba98fee1cb47422c09157c56a27966476ff92775602b9ee24"},
|
||||
{file = "pygithub-2.4.0.tar.gz", hash = "sha256:6601e22627e87bac192f1e2e39c6e6f69a43152cfb8f307cee575879320b3051"},
|
||||
{file = "PyGithub-2.5.0-py3-none-any.whl", hash = "sha256:b0b635999a658ab8e08720bdd3318893ff20e2275f6446fcf35bf3f44f2c0fd2"},
|
||||
{file = "pygithub-2.5.0.tar.gz", hash = "sha256:e1613ac508a9be710920d26eb18b1905ebd9926aa49398e88151c1b526aad3cf"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1948,7 +1929,6 @@ files = [
|
||||
[package.dependencies]
|
||||
cryptography = ">=3.1"
|
||||
defusedxml = "*"
|
||||
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
|
||||
pyopenssl = "*"
|
||||
python-dateutil = "*"
|
||||
pytz = "*"
|
||||
@@ -2164,7 +2144,6 @@ files = [
|
||||
[package.dependencies]
|
||||
markdown-it-py = ">=2.2.0,<3.0.0"
|
||||
pygments = ">=2.13.0,<3.0.0"
|
||||
typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""}
|
||||
|
||||
[package.extras]
|
||||
jupyter = ["ipywidgets (>=7.5.1,<9)"]
|
||||
@@ -2277,29 +2256,29 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.7.1"
|
||||
version = "0.7.3"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.7.1-py3-none-linux_armv6l.whl", hash = "sha256:cb1bc5ed9403daa7da05475d615739cc0212e861b7306f314379d958592aaa89"},
|
||||
{file = "ruff-0.7.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27c1c52a8d199a257ff1e5582d078eab7145129aa02721815ca8fa4f9612dc35"},
|
||||
{file = "ruff-0.7.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:588a34e1ef2ea55b4ddfec26bbe76bc866e92523d8c6cdec5e8aceefeff02d99"},
|
||||
{file = "ruff-0.7.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94fc32f9cdf72dc75c451e5f072758b118ab8100727168a3df58502b43a599ca"},
|
||||
{file = "ruff-0.7.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:985818742b833bffa543a84d1cc11b5e6871de1b4e0ac3060a59a2bae3969250"},
|
||||
{file = "ruff-0.7.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32f1e8a192e261366c702c5fb2ece9f68d26625f198a25c408861c16dc2dea9c"},
|
||||
{file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:699085bf05819588551b11751eff33e9ca58b1b86a6843e1b082a7de40da1565"},
|
||||
{file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344cc2b0814047dc8c3a8ff2cd1f3d808bb23c6658db830d25147339d9bf9ea7"},
|
||||
{file = "ruff-0.7.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4316bbf69d5a859cc937890c7ac7a6551252b6a01b1d2c97e8fc96e45a7c8b4a"},
|
||||
{file = "ruff-0.7.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79d3af9dca4c56043e738a4d6dd1e9444b6d6c10598ac52d146e331eb155a8ad"},
|
||||
{file = "ruff-0.7.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c5c121b46abde94a505175524e51891f829414e093cd8326d6e741ecfc0a9112"},
|
||||
{file = "ruff-0.7.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8422104078324ea250886954e48f1373a8fe7de59283d747c3a7eca050b4e378"},
|
||||
{file = "ruff-0.7.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:56aad830af8a9db644e80098fe4984a948e2b6fc2e73891538f43bbe478461b8"},
|
||||
{file = "ruff-0.7.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:658304f02f68d3a83c998ad8bf91f9b4f53e93e5412b8f2388359d55869727fd"},
|
||||
{file = "ruff-0.7.1-py3-none-win32.whl", hash = "sha256:b517a2011333eb7ce2d402652ecaa0ac1a30c114fbbd55c6b8ee466a7f600ee9"},
|
||||
{file = "ruff-0.7.1-py3-none-win_amd64.whl", hash = "sha256:f38c41fcde1728736b4eb2b18850f6d1e3eedd9678c914dede554a70d5241307"},
|
||||
{file = "ruff-0.7.1-py3-none-win_arm64.whl", hash = "sha256:19aa200ec824c0f36d0c9114c8ec0087082021732979a359d6f3c390a6ff2a37"},
|
||||
{file = "ruff-0.7.1.tar.gz", hash = "sha256:9d8a41d4aa2dad1575adb98a82870cf5db5f76b2938cf2206c22c940034a36f4"},
|
||||
{file = "ruff-0.7.3-py3-none-linux_armv6l.whl", hash = "sha256:34f2339dc22687ec7e7002792d1f50712bf84a13d5152e75712ac08be565d344"},
|
||||
{file = "ruff-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:fb397332a1879b9764a3455a0bb1087bda876c2db8aca3a3cbb67b3dbce8cda0"},
|
||||
{file = "ruff-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:37d0b619546103274e7f62643d14e1adcbccb242efda4e4bdb9544d7764782e9"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59f0c3ee4d1a6787614e7135b72e21024875266101142a09a61439cb6e38a5"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:44eb93c2499a169d49fafd07bc62ac89b1bc800b197e50ff4633aed212569299"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d0242ce53f3a576c35ee32d907475a8d569944c0407f91d207c8af5be5dae4e"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6b6224af8b5e09772c2ecb8dc9f3f344c1aa48201c7f07e7315367f6dd90ac29"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c50f95a82b94421c964fae4c27c0242890a20fe67d203d127e84fbb8013855f5"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f3eff9961b5d2644bcf1616c606e93baa2d6b349e8aa8b035f654df252c8c67"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8963cab06d130c4df2fd52c84e9f10d297826d2e8169ae0c798b6221be1d1d2"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:61b46049d6edc0e4317fb14b33bd693245281a3007288b68a3f5b74a22a0746d"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:10ebce7696afe4644e8c1a23b3cf8c0f2193a310c18387c06e583ae9ef284de2"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3f36d56326b3aef8eeee150b700e519880d1aab92f471eefdef656fd57492aa2"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5d024301109a0007b78d57ab0ba190087b43dce852e552734ebf0b0b85e4fb16"},
|
||||
{file = "ruff-0.7.3-py3-none-win32.whl", hash = "sha256:4ba81a5f0c5478aa61674c5a2194de8b02652f17addf8dfc40c8937e6e7d79fc"},
|
||||
{file = "ruff-0.7.3-py3-none-win_amd64.whl", hash = "sha256:588a9ff2fecf01025ed065fe28809cd5a53b43505f48b69a1ac7707b1b7e4088"},
|
||||
{file = "ruff-0.7.3-py3-none-win_arm64.whl", hash = "sha256:1713e2c5545863cdbfe2cbce21f69ffaf37b813bfd1fb3b90dc9a6f1963f5a8c"},
|
||||
{file = "ruff-0.7.3.tar.gz", hash = "sha256:e1d1ba2e40b6e71a61b063354d04be669ab0d39c352461f3d789cac68b54a313"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2536,33 +2515,33 @@ twisted = ["twisted"]
|
||||
|
||||
[[package]]
|
||||
name = "tomli"
|
||||
version = "2.0.2"
|
||||
version = "2.1.0"
|
||||
description = "A lil' TOML parser"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"},
|
||||
{file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"},
|
||||
{file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"},
|
||||
{file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tornado"
|
||||
version = "6.4.1"
|
||||
version = "6.4.2"
|
||||
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"},
|
||||
{file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a017d239bd1bb0919f72af256a970624241f070496635784d9bf0db640d3fec"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c36e62ce8f63409301537222faffcef7dfc5284f27eec227389f2ad11b09d946"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca9eb02196e789c9cb5c3c7c0f04fb447dc2adffd95265b2c7223a8a615ccbf"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:304463bd0772442ff4d0f5149c6f1c2135a1fae045adf070821c6cdc76980634"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:c82c46813ba483a385ab2a99caeaedf92585a1f90defb5693351fa7e4ea0bf73"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:932d195ca9015956fa502c6b56af9eb06106140d844a335590c1ec7f5277d10c"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-win32.whl", hash = "sha256:2876cef82e6c5978fde1e0d5b1f919d756968d5b4282418f3146b79b58556482"},
|
||||
{file = "tornado-6.4.2-cp38-abi3-win_amd64.whl", hash = "sha256:908b71bf3ff37d81073356a5fadcc660eb10c1476ee6e2725588626ce7e5ca38"},
|
||||
{file = "tornado-6.4.2.tar.gz", hash = "sha256:92bad5b4746e9879fd7bf1eb21dce4e3fc5128d71601f80005afa39237ad620b"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3121,5 +3100,5 @@ user-search = ["pyicu"]
|
||||
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.8.0"
|
||||
content-hash = "aa1f6d97809596c23a6d160c0c5804971dad0ba49e34b137bbfb79df038fe6f0"
|
||||
python-versions = "^3.9.0"
|
||||
content-hash = "d71159b19349fdc0b7cd8e06e8c8778b603fc37b941c6df34ddc31746783d94d"
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
target-version = "py38"
|
||||
target-version = "py39"
|
||||
|
||||
[tool.ruff.lint]
|
||||
# See https://beta.ruff.rs/docs/rules/#error-e
|
||||
@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.118.0"
|
||||
version = "1.120.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -155,7 +155,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
||||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.8.0"
|
||||
python = "^3.9.0"
|
||||
|
||||
# Mandatory Dependencies
|
||||
# ----------------------
|
||||
@@ -178,7 +178,7 @@ Twisted = {extras = ["tls"], version = ">=18.9.0"}
|
||||
treq = ">=15.1"
|
||||
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
|
||||
pyOpenSSL = ">=16.0.0"
|
||||
PyYAML = ">=3.13"
|
||||
PyYAML = ">=5.3"
|
||||
pyasn1 = ">=0.1.9"
|
||||
pyasn1-modules = ">=0.0.7"
|
||||
bcrypt = ">=3.1.7"
|
||||
@@ -241,7 +241,7 @@ authlib = { version = ">=0.15.1", optional = true }
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
# Note: systemd-python 231 appears to have been yanked from pypi
|
||||
systemd-python = { version = ">=231", optional = true }
|
||||
lxml = { version = ">=4.2.0", optional = true }
|
||||
lxml = { version = ">=4.5.2", optional = true }
|
||||
sentry-sdk = { version = ">=0.7.2", optional = true }
|
||||
opentracing = { version = ">=2.2.0", optional = true }
|
||||
jaeger-client = { version = ">=4.0.0", optional = true }
|
||||
@@ -320,7 +320,7 @@ all = [
|
||||
# failing on new releases. Keeping lower bounds loose here means that dependabot
|
||||
# can bump versions without having to update the content-hash in the lockfile.
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
ruff = "0.7.1"
|
||||
ruff = "0.7.3"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
@@ -370,7 +370,7 @@ tomli = ">=1.2.3"
|
||||
# runtime errors caused by build system changes.
|
||||
# We are happy to raise these upper bounds upon request,
|
||||
# provided we check that it's safe to do so (i.e. that CI passes).
|
||||
requires = ["poetry-core>=1.1.0,<=1.9.0", "setuptools_rust>=1.3,<=1.8.1"]
|
||||
requires = ["poetry-core>=1.1.0,<=1.9.1", "setuptools_rust>=1.3,<=1.10.2"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
@@ -378,13 +378,13 @@ build-backend = "poetry.core.masonry.api"
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
|
||||
# We skip:
|
||||
# - CPython 3.6 and 3.7: EOLed
|
||||
# - PyPy 3.7: we only support Python 3.8+
|
||||
# - CPython 3.6, 3.7 and 3.8: EOLed
|
||||
# - PyPy 3.7 and 3.8: we only support Python 3.9+
|
||||
# - musllinux i686: excluded to reduce number of wheels we build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
|
||||
# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/14259
|
||||
skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
skip = "cp36* cp37* cp38* pp37* pp38* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
|
||||
|
||||
@@ -30,14 +30,14 @@ http = "1.1.0"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.17"
|
||||
mime = "0.3.17"
|
||||
pyo3 = { version = "0.21.0", features = [
|
||||
pyo3 = { version = "0.23.2", features = [
|
||||
"macros",
|
||||
"anyhow",
|
||||
"abi3",
|
||||
"abi3-py38",
|
||||
] }
|
||||
pyo3-log = "0.10.0"
|
||||
pythonize = "0.21.0"
|
||||
pyo3-log = "0.12.0"
|
||||
pythonize = "0.23.0"
|
||||
regex = "1.6.0"
|
||||
sha2 = "0.10.8"
|
||||
serde = { version = "1.0.144", features = ["derive"] }
|
||||
|
||||
@@ -32,14 +32,14 @@ use crate::push::utils::{glob_to_regex, GlobMatchType};
|
||||
|
||||
/// Called when registering modules with python.
|
||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let child_module = PyModule::new_bound(py, "acl")?;
|
||||
let child_module = PyModule::new(py, "acl")?;
|
||||
child_module.add_class::<ServerAclEvaluator>()?;
|
||||
|
||||
m.add_submodule(&child_module)?;
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import acl` work.
|
||||
py.import_bound("sys")?
|
||||
py.import("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.acl", child_module)?;
|
||||
|
||||
|
||||
107
rust/src/events/filter.rs
Normal file
107
rust/src/events/filter.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2024 New Vector, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*/
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use pyo3::{exceptions::PyValueError, pyfunction, PyResult};
|
||||
|
||||
use crate::{
|
||||
identifier::UserID,
|
||||
matrix_const::{
|
||||
HISTORY_VISIBILITY_INVITED, HISTORY_VISIBILITY_JOINED, MEMBERSHIP_INVITE, MEMBERSHIP_JOIN,
|
||||
},
|
||||
};
|
||||
|
||||
#[pyfunction(name = "event_visible_to_server")]
|
||||
pub fn event_visible_to_server_py(
|
||||
sender: String,
|
||||
target_server_name: String,
|
||||
history_visibility: String,
|
||||
erased_senders: HashMap<String, bool>,
|
||||
partial_state_invisible: bool,
|
||||
memberships: Vec<(String, String)>, // (state_key, membership)
|
||||
) -> PyResult<bool> {
|
||||
event_visible_to_server(
|
||||
sender,
|
||||
target_server_name,
|
||||
history_visibility,
|
||||
erased_senders,
|
||||
partial_state_invisible,
|
||||
memberships,
|
||||
)
|
||||
.map_err(|e| PyValueError::new_err(format!("{e}")))
|
||||
}
|
||||
|
||||
/// Return whether the target server is allowed to see the event.
|
||||
///
|
||||
/// For a fully stated room, the target server is allowed to see an event E if:
|
||||
/// - the state at E has world readable or shared history vis, OR
|
||||
/// - the state at E says that the target server is in the room.
|
||||
///
|
||||
/// For a partially stated room, the target server is allowed to see E if:
|
||||
/// - E was created by this homeserver, AND:
|
||||
/// - the partial state at E has world readable or shared history vis, OR
|
||||
/// - the partial state at E says that the target server is in the room.
|
||||
pub fn event_visible_to_server(
|
||||
sender: String,
|
||||
target_server_name: String,
|
||||
history_visibility: String,
|
||||
erased_senders: HashMap<String, bool>,
|
||||
partial_state_invisible: bool,
|
||||
memberships: Vec<(String, String)>, // (state_key, membership)
|
||||
) -> anyhow::Result<bool> {
|
||||
if let Some(&erased) = erased_senders.get(&sender) {
|
||||
if erased {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
if partial_state_invisible {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
if history_visibility != HISTORY_VISIBILITY_INVITED
|
||||
&& history_visibility != HISTORY_VISIBILITY_JOINED
|
||||
{
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let mut visible = false;
|
||||
for (state_key, membership) in memberships {
|
||||
let state_key = UserID::try_from(state_key.as_ref())
|
||||
.map_err(|e| anyhow::anyhow!(format!("invalid user_id ({state_key}): {e}")))?;
|
||||
if state_key.server_name() != target_server_name {
|
||||
return Err(anyhow::anyhow!(
|
||||
"state_key.server_name ({}) does not match target_server_name ({target_server_name})",
|
||||
state_key.server_name()
|
||||
));
|
||||
}
|
||||
|
||||
match membership.as_str() {
|
||||
MEMBERSHIP_INVITE => {
|
||||
if history_visibility == HISTORY_VISIBILITY_INVITED {
|
||||
visible = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
MEMBERSHIP_JOIN => {
|
||||
visible = true;
|
||||
break;
|
||||
}
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(visible)
|
||||
}
|
||||
@@ -41,9 +41,11 @@ use pyo3::{
|
||||
pybacked::PyBackedStr,
|
||||
pyclass, pymethods,
|
||||
types::{PyAnyMethods, PyDict, PyDictMethods, PyString},
|
||||
Bound, IntoPy, PyAny, PyObject, PyResult, Python,
|
||||
Bound, IntoPyObject, PyAny, PyObject, PyResult, Python,
|
||||
};
|
||||
|
||||
use crate::UnwrapInfallible;
|
||||
|
||||
/// Definitions of the various fields of the internal metadata.
|
||||
#[derive(Clone)]
|
||||
enum EventInternalMetadataData {
|
||||
@@ -60,31 +62,59 @@ enum EventInternalMetadataData {
|
||||
|
||||
impl EventInternalMetadataData {
|
||||
/// Convert the field to its name and python object.
|
||||
fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, PyObject) {
|
||||
fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, Bound<'a, PyAny>) {
|
||||
match self {
|
||||
EventInternalMetadataData::OutOfBandMembership(o) => {
|
||||
(pyo3::intern!(py, "out_of_band_membership"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::SendOnBehalfOf(o) => {
|
||||
(pyo3::intern!(py, "send_on_behalf_of"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::RecheckRedaction(o) => {
|
||||
(pyo3::intern!(py, "recheck_redaction"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::SoftFailed(o) => {
|
||||
(pyo3::intern!(py, "soft_failed"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::ProactivelySend(o) => {
|
||||
(pyo3::intern!(py, "proactively_send"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::Redacted(o) => {
|
||||
(pyo3::intern!(py, "redacted"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::TxnId(o) => (pyo3::intern!(py, "txn_id"), o.into_py(py)),
|
||||
EventInternalMetadataData::TokenId(o) => (pyo3::intern!(py, "token_id"), o.into_py(py)),
|
||||
EventInternalMetadataData::DeviceId(o) => {
|
||||
(pyo3::intern!(py, "device_id"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::OutOfBandMembership(o) => (
|
||||
pyo3::intern!(py, "out_of_band_membership"),
|
||||
o.into_pyobject(py)
|
||||
.unwrap_infallible()
|
||||
.to_owned()
|
||||
.into_any(),
|
||||
),
|
||||
EventInternalMetadataData::SendOnBehalfOf(o) => (
|
||||
pyo3::intern!(py, "send_on_behalf_of"),
|
||||
o.into_pyobject(py).unwrap_infallible().into_any(),
|
||||
),
|
||||
EventInternalMetadataData::RecheckRedaction(o) => (
|
||||
pyo3::intern!(py, "recheck_redaction"),
|
||||
o.into_pyobject(py)
|
||||
.unwrap_infallible()
|
||||
.to_owned()
|
||||
.into_any(),
|
||||
),
|
||||
EventInternalMetadataData::SoftFailed(o) => (
|
||||
pyo3::intern!(py, "soft_failed"),
|
||||
o.into_pyobject(py)
|
||||
.unwrap_infallible()
|
||||
.to_owned()
|
||||
.into_any(),
|
||||
),
|
||||
EventInternalMetadataData::ProactivelySend(o) => (
|
||||
pyo3::intern!(py, "proactively_send"),
|
||||
o.into_pyobject(py)
|
||||
.unwrap_infallible()
|
||||
.to_owned()
|
||||
.into_any(),
|
||||
),
|
||||
EventInternalMetadataData::Redacted(o) => (
|
||||
pyo3::intern!(py, "redacted"),
|
||||
o.into_pyobject(py)
|
||||
.unwrap_infallible()
|
||||
.to_owned()
|
||||
.into_any(),
|
||||
),
|
||||
EventInternalMetadataData::TxnId(o) => (
|
||||
pyo3::intern!(py, "txn_id"),
|
||||
o.into_pyobject(py).unwrap_infallible().into_any(),
|
||||
),
|
||||
EventInternalMetadataData::TokenId(o) => (
|
||||
pyo3::intern!(py, "token_id"),
|
||||
o.into_pyobject(py).unwrap_infallible().into_any(),
|
||||
),
|
||||
EventInternalMetadataData::DeviceId(o) => (
|
||||
pyo3::intern!(py, "device_id"),
|
||||
o.into_pyobject(py).unwrap_infallible().into_any(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -247,7 +277,7 @@ impl EventInternalMetadata {
|
||||
///
|
||||
/// Note that `outlier` and `stream_ordering` are stored in separate columns so are not returned here.
|
||||
fn get_dict(&self, py: Python<'_>) -> PyResult<PyObject> {
|
||||
let dict = PyDict::new_bound(py);
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
for entry in &self.data {
|
||||
let (key, value) = entry.to_python_pair(py);
|
||||
|
||||
@@ -22,21 +22,23 @@
|
||||
|
||||
use pyo3::{
|
||||
types::{PyAnyMethods, PyModule, PyModuleMethods},
|
||||
Bound, PyResult, Python,
|
||||
wrap_pyfunction, Bound, PyResult, Python,
|
||||
};
|
||||
|
||||
pub mod filter;
|
||||
mod internal_metadata;
|
||||
|
||||
/// Called when registering modules with python.
|
||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let child_module = PyModule::new_bound(py, "events")?;
|
||||
let child_module = PyModule::new(py, "events")?;
|
||||
child_module.add_class::<internal_metadata::EventInternalMetadata>()?;
|
||||
child_module.add_function(wrap_pyfunction!(filter::event_visible_to_server_py, m)?)?;
|
||||
|
||||
m.add_submodule(&child_module)?;
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import events` work.
|
||||
py.import_bound("sys")?
|
||||
py.import("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.events", child_module)?;
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ pub fn http_request_from_twisted(request: &Bound<'_, PyAny>) -> PyResult<Request
|
||||
let headers_iter = request
|
||||
.getattr("requestHeaders")?
|
||||
.call_method0("getAllRawHeaders")?
|
||||
.iter()?;
|
||||
.try_iter()?;
|
||||
|
||||
for header in headers_iter {
|
||||
let header = header?;
|
||||
|
||||
86
rust/src/identifier.rs
Normal file
86
rust/src/identifier.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2024 New Vector, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*/
|
||||
|
||||
//! # Matrix Identifiers
|
||||
//!
|
||||
//! This module contains definitions and utilities for working with matrix identifiers.
|
||||
|
||||
use std::{fmt, ops::Deref};
|
||||
|
||||
/// Errors that can occur when parsing a matrix identifier.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum IdentifierError {
|
||||
IncorrectSigil,
|
||||
MissingColon,
|
||||
}
|
||||
|
||||
impl fmt::Display for IdentifierError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
/// A Matrix user_id.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct UserID(String);
|
||||
|
||||
impl UserID {
|
||||
/// Returns the `localpart` of the user_id.
|
||||
pub fn localpart(&self) -> &str {
|
||||
&self[1..self.colon_pos()]
|
||||
}
|
||||
|
||||
/// Returns the `server_name` / `domain` of the user_id.
|
||||
pub fn server_name(&self) -> &str {
|
||||
&self[self.colon_pos() + 1..]
|
||||
}
|
||||
|
||||
/// Returns the position of the ':' inside of the user_id.
|
||||
/// Used when splitting the user_id into it's respective parts.
|
||||
fn colon_pos(&self) -> usize {
|
||||
self.find(':').unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for UserID {
|
||||
type Error = IdentifierError;
|
||||
|
||||
/// Will try creating a `UserID` from the provided `&str`.
|
||||
/// Can fail if the user_id is incorrectly formatted.
|
||||
fn try_from(s: &str) -> Result<Self, Self::Error> {
|
||||
if !s.starts_with('@') {
|
||||
return Err(IdentifierError::IncorrectSigil);
|
||||
}
|
||||
|
||||
if s.find(':').is_none() {
|
||||
return Err(IdentifierError::MissingColon);
|
||||
}
|
||||
|
||||
Ok(UserID(s.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for UserID {
|
||||
type Target = str;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for UserID {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
use std::convert::Infallible;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3_log::ResetHandle;
|
||||
@@ -6,6 +8,8 @@ pub mod acl;
|
||||
pub mod errors;
|
||||
pub mod events;
|
||||
pub mod http;
|
||||
pub mod identifier;
|
||||
pub mod matrix_const;
|
||||
pub mod push;
|
||||
pub mod rendezvous;
|
||||
|
||||
@@ -50,3 +54,16 @@ fn synapse_rust(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub trait UnwrapInfallible<T> {
|
||||
fn unwrap_infallible(self) -> T;
|
||||
}
|
||||
|
||||
impl<T> UnwrapInfallible<T> for Result<T, Infallible> {
|
||||
fn unwrap_infallible(self) -> T {
|
||||
match self {
|
||||
Ok(val) => val,
|
||||
Err(never) => match never {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
28
rust/src/matrix_const.rs
Normal file
28
rust/src/matrix_const.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2024 New Vector, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*/
|
||||
|
||||
//! # Matrix Constants
|
||||
//!
|
||||
//! This module contains definitions for constant values described by the matrix specification.
|
||||
|
||||
pub const HISTORY_VISIBILITY_WORLD_READABLE: &str = "world_readable";
|
||||
pub const HISTORY_VISIBILITY_SHARED: &str = "shared";
|
||||
pub const HISTORY_VISIBILITY_INVITED: &str = "invited";
|
||||
pub const HISTORY_VISIBILITY_JOINED: &str = "joined";
|
||||
|
||||
pub const MEMBERSHIP_BAN: &str = "ban";
|
||||
pub const MEMBERSHIP_LEAVE: &str = "leave";
|
||||
pub const MEMBERSHIP_KNOCK: &str = "knock";
|
||||
pub const MEMBERSHIP_INVITE: &str = "invite";
|
||||
pub const MEMBERSHIP_JOIN: &str = "join";
|
||||
@@ -167,6 +167,7 @@ impl PushRuleEvaluator {
|
||||
///
|
||||
/// Returns the set of actions, if any, that match (filtering out any
|
||||
/// `dont_notify` and `coalesce` actions).
|
||||
#[pyo3(signature = (push_rules, user_id=None, display_name=None))]
|
||||
pub fn run(
|
||||
&self,
|
||||
push_rules: &FilteredPushRules,
|
||||
@@ -236,6 +237,7 @@ impl PushRuleEvaluator {
|
||||
}
|
||||
|
||||
/// Check if the given condition matches.
|
||||
#[pyo3(signature = (condition, user_id=None, display_name=None))]
|
||||
fn matches(
|
||||
&self,
|
||||
condition: Condition,
|
||||
|
||||
@@ -65,8 +65,8 @@ use anyhow::{Context, Error};
|
||||
use log::warn;
|
||||
use pyo3::exceptions::PyTypeError;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::types::{PyBool, PyList, PyLong, PyString};
|
||||
use pythonize::{depythonize_bound, pythonize};
|
||||
use pyo3::types::{PyBool, PyInt, PyList, PyString};
|
||||
use pythonize::{depythonize, pythonize, PythonizeError};
|
||||
use serde::de::Error as _;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
@@ -79,7 +79,7 @@ pub mod utils;
|
||||
|
||||
/// Called when registering modules with python.
|
||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let child_module = PyModule::new_bound(py, "push")?;
|
||||
let child_module = PyModule::new(py, "push")?;
|
||||
child_module.add_class::<PushRule>()?;
|
||||
child_module.add_class::<PushRules>()?;
|
||||
child_module.add_class::<FilteredPushRules>()?;
|
||||
@@ -90,7 +90,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()>
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import push` work.
|
||||
py.import_bound("sys")?
|
||||
py.import("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.push", child_module)?;
|
||||
|
||||
@@ -182,12 +182,16 @@ pub enum Action {
|
||||
Unknown(Value),
|
||||
}
|
||||
|
||||
impl IntoPy<PyObject> for Action {
|
||||
fn into_py(self, py: Python<'_>) -> PyObject {
|
||||
impl<'py> IntoPyObject<'py> for Action {
|
||||
type Target = PyAny;
|
||||
type Output = Bound<'py, Self::Target>;
|
||||
type Error = PythonizeError;
|
||||
|
||||
fn into_pyobject(self, py: Python<'py>) -> Result<Self::Output, Self::Error> {
|
||||
// When we pass the `Action` struct to Python we want it to be converted
|
||||
// to a dict. We use `pythonize`, which converts the struct using the
|
||||
// `serde` serialization.
|
||||
pythonize(py, &self).expect("valid action")
|
||||
pythonize(py, &self)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -270,13 +274,13 @@ pub enum SimpleJsonValue {
|
||||
}
|
||||
|
||||
impl<'source> FromPyObject<'source> for SimpleJsonValue {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
|
||||
if let Ok(s) = ob.downcast::<PyString>() {
|
||||
Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string())))
|
||||
// A bool *is* an int, ensure we try bool first.
|
||||
} else if let Ok(b) = ob.downcast::<PyBool>() {
|
||||
Ok(SimpleJsonValue::Bool(b.extract()?))
|
||||
} else if let Ok(i) = ob.downcast::<PyLong>() {
|
||||
} else if let Ok(i) = ob.downcast::<PyInt>() {
|
||||
Ok(SimpleJsonValue::Int(i.extract()?))
|
||||
} else if ob.is_none() {
|
||||
Ok(SimpleJsonValue::Null)
|
||||
@@ -298,15 +302,19 @@ pub enum JsonValue {
|
||||
}
|
||||
|
||||
impl<'source> FromPyObject<'source> for JsonValue {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
|
||||
if let Ok(l) = ob.downcast::<PyList>() {
|
||||
match l.iter().map(SimpleJsonValue::extract).collect() {
|
||||
match l
|
||||
.iter()
|
||||
.map(|it| SimpleJsonValue::extract_bound(&it))
|
||||
.collect()
|
||||
{
|
||||
Ok(a) => Ok(JsonValue::Array(a)),
|
||||
Err(e) => Err(PyTypeError::new_err(format!(
|
||||
"Can't convert to JsonValue::Array: {e}"
|
||||
))),
|
||||
}
|
||||
} else if let Ok(v) = SimpleJsonValue::extract(ob) {
|
||||
} else if let Ok(v) = SimpleJsonValue::extract_bound(ob) {
|
||||
Ok(JsonValue::Value(v))
|
||||
} else {
|
||||
Err(PyTypeError::new_err(format!(
|
||||
@@ -363,15 +371,19 @@ pub enum KnownCondition {
|
||||
},
|
||||
}
|
||||
|
||||
impl IntoPy<PyObject> for Condition {
|
||||
fn into_py(self, py: Python<'_>) -> PyObject {
|
||||
pythonize(py, &self).expect("valid condition")
|
||||
impl<'source> IntoPyObject<'source> for Condition {
|
||||
type Target = PyAny;
|
||||
type Output = Bound<'source, Self::Target>;
|
||||
type Error = PythonizeError;
|
||||
|
||||
fn into_pyobject(self, py: Python<'source>) -> Result<Self::Output, Self::Error> {
|
||||
pythonize(py, &self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'source> FromPyObject<'source> for Condition {
|
||||
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
|
||||
Ok(depythonize_bound(ob.clone())?)
|
||||
Ok(depythonize(ob)?)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@ use anyhow::bail;
|
||||
use anyhow::Context;
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
use regex;
|
||||
use regex::Regex;
|
||||
use regex::RegexBuilder;
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ use pyo3::{
|
||||
exceptions::PyValueError,
|
||||
pyclass, pymethods,
|
||||
types::{PyAnyMethods, PyModule, PyModuleMethods},
|
||||
Bound, Py, PyAny, PyObject, PyResult, Python, ToPyObject,
|
||||
Bound, IntoPyObject, Py, PyAny, PyObject, PyResult, Python,
|
||||
};
|
||||
use ulid::Ulid;
|
||||
|
||||
@@ -37,6 +37,7 @@ use self::session::Session;
|
||||
use crate::{
|
||||
errors::{NotFoundError, SynapseError},
|
||||
http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt},
|
||||
UnwrapInfallible,
|
||||
};
|
||||
|
||||
mod session;
|
||||
@@ -125,7 +126,11 @@ impl RendezvousHandler {
|
||||
let base = Uri::try_from(format!("{base}_synapse/client/rendezvous"))
|
||||
.map_err(|_| PyValueError::new_err("Invalid base URI"))?;
|
||||
|
||||
let clock = homeserver.call_method0("get_clock")?.to_object(py);
|
||||
let clock = homeserver
|
||||
.call_method0("get_clock")?
|
||||
.into_pyobject(py)
|
||||
.unwrap_infallible()
|
||||
.unbind();
|
||||
|
||||
// Construct a Python object so that we can get a reference to the
|
||||
// evict method and schedule it to run.
|
||||
@@ -288,6 +293,13 @@ impl RendezvousHandler {
|
||||
let mut response = Response::new(Bytes::new());
|
||||
*response.status_mut() = StatusCode::ACCEPTED;
|
||||
prepare_headers(response.headers_mut(), session);
|
||||
|
||||
// Even though this isn't mandated by the MSC, we set a Content-Type on the response. It
|
||||
// doesn't do any harm as the body is empty, but this helps escape a bug in some reverse
|
||||
// proxy/cache setup which strips the ETag header if there is no Content-Type set.
|
||||
// Specifically, we noticed this behaviour when placing Synapse behind Cloudflare.
|
||||
response.headers_mut().typed_insert(ContentType::text());
|
||||
|
||||
http_response_to_twisted(twisted_request, response)?;
|
||||
|
||||
Ok(())
|
||||
@@ -311,7 +323,7 @@ impl RendezvousHandler {
|
||||
}
|
||||
|
||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let child_module = PyModule::new_bound(py, "rendezvous")?;
|
||||
let child_module = PyModule::new(py, "rendezvous")?;
|
||||
|
||||
child_module.add_class::<RendezvousHandler>()?;
|
||||
|
||||
@@ -319,7 +331,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()>
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import rendezvous` work.
|
||||
py.import_bound("sys")?
|
||||
py.import("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.rendezvous", child_module)?;
|
||||
|
||||
|
||||
@@ -28,9 +28,8 @@ from typing import Collection, Optional, Sequence, Set
|
||||
# example)
|
||||
DISTS = (
|
||||
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
|
||||
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
|
||||
"debian:bookworm", # (EOL 2026-06) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:sid", # (rolling distro, no EOL)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:noble", # 24.04 LTS (EOL 2029-06)
|
||||
"ubuntu:oracular", # 24.10 (EOL 2025-07)
|
||||
|
||||
@@ -39,8 +39,8 @@ ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
# Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the
|
||||
# if-statement completely.
|
||||
py_version = sys.version_info
|
||||
if py_version < (3, 8):
|
||||
print("Synapse requires Python 3.8 or above.")
|
||||
if py_version < (3, 9):
|
||||
print("Synapse requires Python 3.9 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Allow using the asyncio reactor via env var.
|
||||
|
||||
@@ -88,6 +88,7 @@ from synapse.storage.databases.main.relations import RelationsWorkerStore
|
||||
from synapse.storage.databases.main.room import RoomBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.search import SearchBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.sliding_sync import SlidingSyncStore
|
||||
from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.user_directory import (
|
||||
@@ -255,6 +256,7 @@ class Store(
|
||||
ReceiptsBackgroundUpdateStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
SlidingSyncStore,
|
||||
):
|
||||
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
||||
@@ -365,11 +365,6 @@ class ExperimentalConfig(Config):
|
||||
# MSC3874: Filtering /messages with rel_types / not_rel_types.
|
||||
self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False)
|
||||
|
||||
# MSC3886: Simple client rendezvous capability
|
||||
self.msc3886_endpoint: Optional[str] = experimental.get(
|
||||
"msc3886_endpoint", None
|
||||
)
|
||||
|
||||
# MSC3890: Remotely silence local notifications
|
||||
# Note: This option requires "experimental_features.msc3391_enabled" to be
|
||||
# set to "true", in order to communicate account data deletions to clients.
|
||||
@@ -450,3 +445,6 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC4210: Remove legacy mentions
|
||||
self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False)
|
||||
|
||||
# MSC4222: Adding `state_after` to sync v2
|
||||
self.msc4222_enabled: bool = experimental.get("msc4222_enabled", False)
|
||||
|
||||
@@ -272,9 +272,7 @@ class ContentRepositoryConfig(Config):
|
||||
remote_media_lifetime
|
||||
)
|
||||
|
||||
self.enable_authenticated_media = config.get(
|
||||
"enable_authenticated_media", False
|
||||
)
|
||||
self.enable_authenticated_media = config.get("enable_authenticated_media", True)
|
||||
|
||||
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
|
||||
assert data_dir_path is not None
|
||||
|
||||
@@ -215,9 +215,6 @@ class HttpListenerConfig:
|
||||
additional_resources: Dict[str, dict] = attr.Factory(dict)
|
||||
tag: Optional[str] = None
|
||||
request_id_header: Optional[str] = None
|
||||
# If true, the listener will return CORS response headers compatible with MSC3886:
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/3886
|
||||
experimental_cors_msc3886: bool = False
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
@@ -1004,7 +1001,6 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
||||
additional_resources=listener.get("additional_resources", {}),
|
||||
tag=listener.get("tag"),
|
||||
request_id_header=listener.get("request_id_header"),
|
||||
experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
|
||||
)
|
||||
|
||||
if socket_path:
|
||||
|
||||
@@ -140,7 +140,6 @@ from typing import (
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
@@ -170,7 +169,13 @@ from synapse.metrics.background_process_metrics import (
|
||||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
ReadReceipt,
|
||||
RoomStreamToken,
|
||||
StrCollection,
|
||||
get_domain_from_id,
|
||||
)
|
||||
from synapse.util import Clock
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import filter_destinations_by_retry_limiter
|
||||
@@ -297,12 +302,10 @@ class _DestinationWakeupQueue:
|
||||
# being woken up.
|
||||
_MAX_TIME_IN_QUEUE = 30.0
|
||||
|
||||
# The maximum duration in seconds between waking up consecutive destination
|
||||
# queues.
|
||||
_MAX_DELAY = 0.1
|
||||
|
||||
sender: "FederationSender" = attr.ib()
|
||||
clock: Clock = attr.ib()
|
||||
max_delay_s: int = attr.ib()
|
||||
|
||||
queue: "OrderedDict[str, Literal[None]]" = attr.ib(factory=OrderedDict)
|
||||
processing: bool = attr.ib(default=False)
|
||||
|
||||
@@ -332,7 +335,7 @@ class _DestinationWakeupQueue:
|
||||
# We also add an upper bound to the delay, to gracefully handle the
|
||||
# case where the queue only has a few entries in it.
|
||||
current_sleep_seconds = min(
|
||||
self._MAX_DELAY, self._MAX_TIME_IN_QUEUE / len(self.queue)
|
||||
self.max_delay_s, self._MAX_TIME_IN_QUEUE / len(self.queue)
|
||||
)
|
||||
|
||||
while self.queue:
|
||||
@@ -416,19 +419,14 @@ class FederationSender(AbstractFederationSender):
|
||||
self._is_processing = False
|
||||
self._last_poked_id = -1
|
||||
|
||||
# map from room_id to a set of PerDestinationQueues which we believe are
|
||||
# awaiting a call to flush_read_receipts_for_room. The presence of an entry
|
||||
# here for a given room means that we are rate-limiting RR flushes to that room,
|
||||
# and that there is a pending call to _flush_rrs_for_room in the system.
|
||||
self._queues_awaiting_rr_flush_by_room: Dict[str, Set[PerDestinationQueue]] = {}
|
||||
|
||||
self._rr_txn_interval_per_room_ms = (
|
||||
1000.0
|
||||
/ hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
|
||||
)
|
||||
|
||||
self._external_cache = hs.get_external_cache()
|
||||
self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock)
|
||||
|
||||
rr_txn_interval_per_room_s = (
|
||||
1.0 / hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
|
||||
)
|
||||
self._destination_wakeup_queue = _DestinationWakeupQueue(
|
||||
self, self.clock, max_delay_s=rr_txn_interval_per_room_s
|
||||
)
|
||||
|
||||
# Regularly wake up destinations that have outstanding PDUs to be caught up
|
||||
self.clock.looping_call_now(
|
||||
@@ -745,37 +743,48 @@ class FederationSender(AbstractFederationSender):
|
||||
|
||||
# Some background on the rate-limiting going on here.
|
||||
#
|
||||
# It turns out that if we attempt to send out RRs as soon as we get them from
|
||||
# a client, then we end up trying to do several hundred Hz of federation
|
||||
# transactions. (The number of transactions scales as O(N^2) on the size of a
|
||||
# room, since in a large room we have both more RRs coming in, and more servers
|
||||
# to send them to.)
|
||||
# It turns out that if we attempt to send out RRs as soon as we get them
|
||||
# from a client, then we end up trying to do several hundred Hz of
|
||||
# federation transactions. (The number of transactions scales as O(N^2)
|
||||
# on the size of a room, since in a large room we have both more RRs
|
||||
# coming in, and more servers to send them to.)
|
||||
#
|
||||
# This leads to a lot of CPU load, and we end up getting behind. The solution
|
||||
# currently adopted is as follows:
|
||||
# This leads to a lot of CPU load, and we end up getting behind. The
|
||||
# solution currently adopted is to differentiate between receipts and
|
||||
# destinations we should immediately send to, and those we can trickle
|
||||
# the receipts to.
|
||||
#
|
||||
# The first receipt in a given room is sent out immediately, at time T0. Any
|
||||
# further receipts are, in theory, batched up for N seconds, where N is calculated
|
||||
# based on the number of servers in the room to achieve a transaction frequency
|
||||
# of around 50Hz. So, for example, if there were 100 servers in the room, then
|
||||
# N would be 100 / 50Hz = 2 seconds.
|
||||
# The current logic is to send receipts out immediately if:
|
||||
# - the room is "small", i.e. there's only N servers to send receipts
|
||||
# to, and so sending out the receipts immediately doesn't cause too
|
||||
# much load; or
|
||||
# - the receipt is for an event that happened recently, as users
|
||||
# notice if receipts are delayed when they know other users are
|
||||
# currently reading the room; or
|
||||
# - the receipt is being sent to the server that sent the event, so
|
||||
# that users see receipts for their own receipts quickly.
|
||||
#
|
||||
# Then, after T+N, we flush out any receipts that have accumulated, and restart
|
||||
# the timer to flush out more receipts at T+2N, etc. If no receipts accumulate,
|
||||
# we stop the cycle and go back to the start.
|
||||
# For destinations that we should delay sending the receipt to, we queue
|
||||
# the receipts up to be sent in the next transaction, but don't trigger
|
||||
# a new transaction to be sent. We then add the destination to the
|
||||
# `DestinationWakeupQueue`, which will slowly iterate over each
|
||||
# destination and trigger a new transaction to be sent.
|
||||
#
|
||||
# However, in practice, it is often possible to flush out receipts earlier: in
|
||||
# particular, if we are sending a transaction to a given server anyway (for
|
||||
# example, because we have a PDU or a RR in another room to send), then we may
|
||||
# as well send out all of the pending RRs for that server. So it may be that
|
||||
# by the time we get to T+N, we don't actually have any RRs left to send out.
|
||||
# Nevertheless we continue to buffer up RRs for the room in question until we
|
||||
# reach the point that no RRs arrive between timer ticks.
|
||||
# However, in practice, it is often possible to send out delayed
|
||||
# receipts earlier: in particular, if we are sending a transaction to a
|
||||
# given server anyway (for example, because we have a PDU or a RR in
|
||||
# another room to send), then we may as well send out all of the pending
|
||||
# RRs for that server. So it may be that by the time we get to waking up
|
||||
# the destination, we don't actually have any RRs left to send out.
|
||||
#
|
||||
# For even more background, see https://github.com/matrix-org/synapse/issues/4730.
|
||||
# For even more background, see
|
||||
# https://github.com/matrix-org/synapse/issues/4730.
|
||||
|
||||
room_id = receipt.room_id
|
||||
|
||||
# Local read receipts always have 1 event ID.
|
||||
event_id = receipt.event_ids[0]
|
||||
|
||||
# Work out which remote servers should be poked and poke them.
|
||||
domains_set = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
|
||||
room_id
|
||||
@@ -797,49 +806,51 @@ class FederationSender(AbstractFederationSender):
|
||||
if not domains:
|
||||
return
|
||||
|
||||
queues_pending_flush = self._queues_awaiting_rr_flush_by_room.get(room_id)
|
||||
# We now split which domains we want to wake up immediately vs which we
|
||||
# want to delay waking up.
|
||||
immediate_domains: StrCollection
|
||||
delay_domains: StrCollection
|
||||
|
||||
# if there is no flush yet scheduled, we will send out these receipts with
|
||||
# immediate flushes, and schedule the next flush for this room.
|
||||
if queues_pending_flush is not None:
|
||||
logger.debug("Queuing receipt for: %r", domains)
|
||||
if len(domains) < 10:
|
||||
# For "small" rooms send to all domains immediately
|
||||
immediate_domains = domains
|
||||
delay_domains = ()
|
||||
else:
|
||||
logger.debug("Sending receipt to: %r", domains)
|
||||
self._schedule_rr_flush_for_room(room_id, len(domains))
|
||||
metadata = await self.store.get_metadata_for_event(
|
||||
receipt.room_id, event_id
|
||||
)
|
||||
assert metadata is not None
|
||||
|
||||
for domain in domains:
|
||||
sender_domain = get_domain_from_id(metadata.sender)
|
||||
|
||||
if self.clock.time_msec() - metadata.received_ts < 60_000:
|
||||
# We always send receipts for recent messages immediately
|
||||
immediate_domains = domains
|
||||
delay_domains = ()
|
||||
else:
|
||||
# Otherwise, we delay waking up all destinations except for the
|
||||
# sender's domain.
|
||||
immediate_domains = []
|
||||
delay_domains = []
|
||||
for domain in domains:
|
||||
if domain == sender_domain:
|
||||
immediate_domains.append(domain)
|
||||
else:
|
||||
delay_domains.append(domain)
|
||||
|
||||
for domain in immediate_domains:
|
||||
# Add to destination queue and wake the destination up
|
||||
queue = self._get_per_destination_queue(domain)
|
||||
queue.queue_read_receipt(receipt)
|
||||
queue.attempt_new_transaction()
|
||||
|
||||
for domain in delay_domains:
|
||||
# Add to destination queue...
|
||||
queue = self._get_per_destination_queue(domain)
|
||||
queue.queue_read_receipt(receipt)
|
||||
|
||||
# if there is already a RR flush pending for this room, then make sure this
|
||||
# destination is registered for the flush
|
||||
if queues_pending_flush is not None:
|
||||
queues_pending_flush.add(queue)
|
||||
else:
|
||||
queue.flush_read_receipts_for_room(room_id)
|
||||
|
||||
def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None:
|
||||
# that is going to cause approximately len(domains) transactions, so now back
|
||||
# off for that multiplied by RR_TXN_INTERVAL_PER_ROOM
|
||||
backoff_ms = self._rr_txn_interval_per_room_ms * n_domains
|
||||
|
||||
logger.debug("Scheduling RR flush in %s in %d ms", room_id, backoff_ms)
|
||||
self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id)
|
||||
self._queues_awaiting_rr_flush_by_room[room_id] = set()
|
||||
|
||||
def _flush_rrs_for_room(self, room_id: str) -> None:
|
||||
queues = self._queues_awaiting_rr_flush_by_room.pop(room_id)
|
||||
logger.debug("Flushing RRs in %s to %s", room_id, queues)
|
||||
|
||||
if not queues:
|
||||
# no more RRs arrived for this room; we are done.
|
||||
return
|
||||
|
||||
# schedule the next flush
|
||||
self._schedule_rr_flush_for_room(room_id, len(queues))
|
||||
|
||||
for queue in queues:
|
||||
queue.flush_read_receipts_for_room(room_id)
|
||||
# ... and schedule the destination to be woken up.
|
||||
self._destination_wakeup_queue.add_to_queue(domain)
|
||||
|
||||
async def send_presence_to_destinations(
|
||||
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
||||
|
||||
@@ -156,7 +156,6 @@ class PerDestinationQueue:
|
||||
# Each receipt can only have a single receipt per
|
||||
# (room ID, receipt type, user ID, thread ID) tuple.
|
||||
self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = []
|
||||
self._rrs_pending_flush = False
|
||||
|
||||
# stream_id of last successfully sent to-device message.
|
||||
# NB: may be a long or an int.
|
||||
@@ -258,15 +257,7 @@ class PerDestinationQueue:
|
||||
}
|
||||
)
|
||||
|
||||
def flush_read_receipts_for_room(self, room_id: str) -> None:
|
||||
# If there are any pending receipts for this room then force-flush them
|
||||
# in a new transaction.
|
||||
for edu in self._pending_receipt_edus:
|
||||
if room_id in edu:
|
||||
self._rrs_pending_flush = True
|
||||
self.attempt_new_transaction()
|
||||
# No use in checking remaining EDUs if the room was found.
|
||||
break
|
||||
self.mark_new_data()
|
||||
|
||||
def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
|
||||
self._pending_edus_keyed[(edu.edu_type, key)] = edu
|
||||
@@ -603,12 +594,9 @@ class PerDestinationQueue:
|
||||
self._destination, last_successful_stream_ordering
|
||||
)
|
||||
|
||||
def _get_receipt_edus(self, force_flush: bool, limit: int) -> Iterable[Edu]:
|
||||
def _get_receipt_edus(self, limit: int) -> Iterable[Edu]:
|
||||
if not self._pending_receipt_edus:
|
||||
return
|
||||
if not force_flush and not self._rrs_pending_flush:
|
||||
# not yet time for this lot
|
||||
return
|
||||
|
||||
# Send at most limit EDUs for receipts.
|
||||
for content in self._pending_receipt_edus[:limit]:
|
||||
@@ -747,7 +735,7 @@ class _TransactionQueueManager:
|
||||
)
|
||||
|
||||
# Add read receipt EDUs.
|
||||
pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
|
||||
pending_edus.extend(self.queue._get_receipt_edus(limit=5))
|
||||
edu_limit = MAX_EDUS_PER_TRANSACTION - len(pending_edus)
|
||||
|
||||
# Next, prioritize to-device messages so that existing encryption channels
|
||||
@@ -795,13 +783,6 @@ class _TransactionQueueManager:
|
||||
if not self._pdus and not pending_edus:
|
||||
return [], []
|
||||
|
||||
# if we've decided to send a transaction anyway, and we have room, we
|
||||
# may as well send any pending RRs
|
||||
if edu_limit:
|
||||
pending_edus.extend(
|
||||
self.queue._get_receipt_edus(force_flush=True, limit=edu_limit)
|
||||
)
|
||||
|
||||
if self._pdus:
|
||||
self._last_stream_ordering = self._pdus[
|
||||
-1
|
||||
|
||||
@@ -124,6 +124,7 @@ class AdminHandler:
|
||||
"consent_ts": user_info.consent_ts,
|
||||
"user_type": user_info.user_type,
|
||||
"is_guest": user_info.is_guest,
|
||||
"suspended": user_info.suspended,
|
||||
}
|
||||
|
||||
if self._msc3866_enabled:
|
||||
|
||||
@@ -39,6 +39,8 @@ from synapse.replication.http.devices import ReplicationUploadKeysForUserRestSer
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
ScheduledTask,
|
||||
TaskStatus,
|
||||
UserID,
|
||||
get_domain_from_id,
|
||||
get_verify_key_from_cross_signing_key,
|
||||
@@ -70,6 +72,7 @@ class E2eKeysHandler:
|
||||
self.is_mine = hs.is_mine
|
||||
self.clock = hs.get_clock()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
self._task_scheduler = hs.get_task_scheduler()
|
||||
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
||||
@@ -116,6 +119,10 @@ class E2eKeysHandler:
|
||||
hs.config.experimental.msc3984_appservice_key_query
|
||||
)
|
||||
|
||||
self._task_scheduler.register_action(
|
||||
self._delete_old_one_time_keys_task, "delete_old_otks"
|
||||
)
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def query_devices(
|
||||
@@ -615,7 +622,7 @@ class E2eKeysHandler:
|
||||
3. Attempt to fetch fallback keys from the database.
|
||||
|
||||
Args:
|
||||
local_query: An iterable of tuples of (user ID, device ID, algorithm).
|
||||
local_query: An iterable of tuples of (user ID, device ID, algorithm, number of keys).
|
||||
always_include_fallback_keys: True to always include fallback keys.
|
||||
|
||||
Returns:
|
||||
@@ -1574,6 +1581,45 @@ class E2eKeysHandler:
|
||||
return True
|
||||
return False
|
||||
|
||||
async def _delete_old_one_time_keys_task(
|
||||
self, task: ScheduledTask
|
||||
) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]:
|
||||
"""Scheduler task to delete old one time keys.
|
||||
|
||||
Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility
|
||||
that it could still have old OTKs that the client has dropped. This task is scheduled exactly once
|
||||
by a database schema delta file, and it clears out old one-time-keys that look like they came from libolm.
|
||||
"""
|
||||
last_user = task.result.get("from_user", "") if task.result else ""
|
||||
while True:
|
||||
# We process users in batches of 100
|
||||
users, rowcount = await self.store.delete_old_otks_for_next_user_batch(
|
||||
last_user, 100
|
||||
)
|
||||
if len(users) == 0:
|
||||
# We're done!
|
||||
return TaskStatus.COMPLETE, None, None
|
||||
|
||||
logger.debug(
|
||||
"Deleted %i old one-time-keys for users '%s'..'%s'",
|
||||
rowcount,
|
||||
users[0],
|
||||
users[-1],
|
||||
)
|
||||
last_user = users[-1]
|
||||
|
||||
# Store our progress
|
||||
await self._task_scheduler.update_task(
|
||||
task.id, result={"from_user": last_user}
|
||||
)
|
||||
|
||||
# Sleep a little before doing the next user.
|
||||
#
|
||||
# matrix.org has about 15M users in the e2e_one_time_keys_json table
|
||||
# (comprising 20M devices). We want this to take about a week, so we need
|
||||
# to do about one batch of 100 users every 4 seconds.
|
||||
await self.clock.sleep(4)
|
||||
|
||||
|
||||
def _check_cross_signing_key(
|
||||
key: JsonDict, user_id: str, key_type: str, signing_key: Optional[VerifyKey] = None
|
||||
|
||||
@@ -196,7 +196,9 @@ class MessageHandler:
|
||||
AuthError (403) if the user doesn't have permission to view
|
||||
members of this room.
|
||||
"""
|
||||
state_filter = state_filter or StateFilter.all()
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
if at_token:
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
from itertools import chain
|
||||
from typing import TYPE_CHECKING, AbstractSet, Dict, List, Mapping, Optional, Set, Tuple
|
||||
@@ -79,6 +80,15 @@ sync_processing_time = Histogram(
|
||||
["initial"],
|
||||
)
|
||||
|
||||
# Limit the number of state_keys we should remember sending down the connection for each
|
||||
# (room_id, user_id). We don't want to store and pull out too much data in the database.
|
||||
#
|
||||
# 100 is an arbitrary but small-ish number. The idea is that we probably won't send down
|
||||
# too many redundant member state events (that the client already knows about) for a
|
||||
# given ongoing conversation if we keep 100 around. Most rooms don't have 100 members
|
||||
# anyway and it takes a while to cycle through 100 members.
|
||||
MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER = 100
|
||||
|
||||
|
||||
class SlidingSyncHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
@@ -873,6 +883,14 @@ class SlidingSyncHandler:
|
||||
#
|
||||
# Calculate the `StateFilter` based on the `required_state` for the room
|
||||
required_state_filter = StateFilter.none()
|
||||
# The requested `required_state_map` with the lazy membership expanded and
|
||||
# `$ME` replaced with the user's ID. This allows us to see what membership we've
|
||||
# sent down to the client in the next request.
|
||||
#
|
||||
# Make a copy so we can modify it. Still need to be careful to make a copy of
|
||||
# the state key sets if we want to add/remove from them. We could make a deep
|
||||
# copy but this saves us some work.
|
||||
expanded_required_state_map = dict(room_sync_config.required_state_map)
|
||||
if room_membership_for_user_at_to_token.membership not in (
|
||||
Membership.INVITE,
|
||||
Membership.KNOCK,
|
||||
@@ -938,21 +956,48 @@ class SlidingSyncHandler:
|
||||
):
|
||||
lazy_load_room_members = True
|
||||
# Everyone in the timeline is relevant
|
||||
#
|
||||
# FIXME: We probably also care about invite, ban, kick, targets, etc
|
||||
# but the spec only mentions "senders".
|
||||
timeline_membership: Set[str] = set()
|
||||
if timeline_events is not None:
|
||||
for timeline_event in timeline_events:
|
||||
timeline_membership.add(timeline_event.sender)
|
||||
|
||||
# Update the required state filter so we pick up the new
|
||||
# membership
|
||||
for user_id in timeline_membership:
|
||||
required_state_types.append(
|
||||
(EventTypes.Member, user_id)
|
||||
)
|
||||
|
||||
# FIXME: We probably also care about invite, ban, kick, targets, etc
|
||||
# but the spec only mentions "senders".
|
||||
# Add an explicit entry for each user in the timeline
|
||||
#
|
||||
# Make a new set or copy of the state key set so we can
|
||||
# modify it without affecting the original
|
||||
# `required_state_map`
|
||||
expanded_required_state_map[EventTypes.Member] = (
|
||||
expanded_required_state_map.get(
|
||||
EventTypes.Member, set()
|
||||
)
|
||||
| timeline_membership
|
||||
)
|
||||
elif state_key == StateValues.ME:
|
||||
num_others += 1
|
||||
required_state_types.append((state_type, user.to_string()))
|
||||
# Replace `$ME` with the user's ID so we can deduplicate
|
||||
# when someone requests the same state with `$ME` or with
|
||||
# their user ID.
|
||||
#
|
||||
# Make a new set or copy of the state key set so we can
|
||||
# modify it without affecting the original
|
||||
# `required_state_map`
|
||||
expanded_required_state_map[EventTypes.Member] = (
|
||||
expanded_required_state_map.get(
|
||||
EventTypes.Member, set()
|
||||
)
|
||||
| {user.to_string()}
|
||||
)
|
||||
else:
|
||||
num_others += 1
|
||||
required_state_types.append((state_type, state_key))
|
||||
@@ -1016,8 +1061,8 @@ class SlidingSyncHandler:
|
||||
changed_required_state_map, added_state_filter = (
|
||||
_required_state_changes(
|
||||
user.to_string(),
|
||||
previous_room_config=prev_room_sync_config,
|
||||
room_sync_config=room_sync_config,
|
||||
prev_required_state_map=prev_room_sync_config.required_state_map,
|
||||
request_required_state_map=expanded_required_state_map,
|
||||
state_deltas=room_state_delta_id_map,
|
||||
)
|
||||
)
|
||||
@@ -1131,7 +1176,9 @@ class SlidingSyncHandler:
|
||||
# sensible order again.
|
||||
bump_stamp = 0
|
||||
|
||||
room_sync_required_state_map_to_persist = room_sync_config.required_state_map
|
||||
room_sync_required_state_map_to_persist: Mapping[str, AbstractSet[str]] = (
|
||||
expanded_required_state_map
|
||||
)
|
||||
if changed_required_state_map:
|
||||
room_sync_required_state_map_to_persist = changed_required_state_map
|
||||
|
||||
@@ -1185,7 +1232,10 @@ class SlidingSyncHandler:
|
||||
)
|
||||
|
||||
else:
|
||||
new_connection_state.room_configs[room_id] = room_sync_config
|
||||
new_connection_state.room_configs[room_id] = RoomSyncConfig(
|
||||
timeline_limit=room_sync_config.timeline_limit,
|
||||
required_state_map=room_sync_required_state_map_to_persist,
|
||||
)
|
||||
|
||||
set_tag(SynapseTags.RESULT_PREFIX + "initial", initial)
|
||||
|
||||
@@ -1320,8 +1370,8 @@ class SlidingSyncHandler:
|
||||
def _required_state_changes(
|
||||
user_id: str,
|
||||
*,
|
||||
previous_room_config: "RoomSyncConfig",
|
||||
room_sync_config: RoomSyncConfig,
|
||||
prev_required_state_map: Mapping[str, AbstractSet[str]],
|
||||
request_required_state_map: Mapping[str, AbstractSet[str]],
|
||||
state_deltas: StateMap[str],
|
||||
) -> Tuple[Optional[Mapping[str, AbstractSet[str]]], StateFilter]:
|
||||
"""Calculates the changes between the required state room config from the
|
||||
@@ -1342,10 +1392,6 @@ def _required_state_changes(
|
||||
and the state filter to use to fetch extra current state that we need to
|
||||
return.
|
||||
"""
|
||||
|
||||
prev_required_state_map = previous_room_config.required_state_map
|
||||
request_required_state_map = room_sync_config.required_state_map
|
||||
|
||||
if prev_required_state_map == request_required_state_map:
|
||||
# There has been no change. Return immediately.
|
||||
return None, StateFilter.none()
|
||||
@@ -1378,12 +1424,19 @@ def _required_state_changes(
|
||||
# client. Passed to `StateFilter.from_types(...)`
|
||||
added: List[Tuple[str, Optional[str]]] = []
|
||||
|
||||
# Convert the list of state deltas to map from type to state_keys that have
|
||||
# changed.
|
||||
changed_types_to_state_keys: Dict[str, Set[str]] = {}
|
||||
for event_type, state_key in state_deltas:
|
||||
changed_types_to_state_keys.setdefault(event_type, set()).add(state_key)
|
||||
|
||||
# First we calculate what, if anything, has been *added*.
|
||||
for event_type in (
|
||||
prev_required_state_map.keys() | request_required_state_map.keys()
|
||||
):
|
||||
old_state_keys = prev_required_state_map.get(event_type, set())
|
||||
request_state_keys = request_required_state_map.get(event_type, set())
|
||||
changed_state_keys = changed_types_to_state_keys.get(event_type, set())
|
||||
|
||||
if old_state_keys == request_state_keys:
|
||||
# No change to this type
|
||||
@@ -1393,8 +1446,55 @@ def _required_state_changes(
|
||||
# Nothing *added*, so we skip. Removals happen below.
|
||||
continue
|
||||
|
||||
# Always update changes to include the newly added keys
|
||||
changes[event_type] = request_state_keys
|
||||
# We only remove state keys from the effective state if they've been
|
||||
# removed from the request *and* the state has changed. This ensures
|
||||
# that if a client removes and then re-adds a state key, we only send
|
||||
# down the associated current state event if its changed (rather than
|
||||
# sending down the same event twice).
|
||||
invalidated_state_keys = (
|
||||
old_state_keys - request_state_keys
|
||||
) & changed_state_keys
|
||||
|
||||
# Figure out which state keys we should remember sending down the connection
|
||||
inheritable_previous_state_keys = (
|
||||
# Retain the previous state_keys that we've sent down before.
|
||||
# Wildcard and lazy state keys are not sticky from previous requests.
|
||||
(old_state_keys - {StateValues.WILDCARD, StateValues.LAZY})
|
||||
- invalidated_state_keys
|
||||
)
|
||||
|
||||
# Always update changes to include the newly added keys (we've expanded the set
|
||||
# of state keys), use the new requested set with whatever hasn't been
|
||||
# invalidated from the previous set.
|
||||
changes[event_type] = request_state_keys | inheritable_previous_state_keys
|
||||
# Limit the number of state_keys we should remember sending down the connection
|
||||
# for each (room_id, user_id). We don't want to store and pull out too much data
|
||||
# in the database. This is a happy-medium between remembering nothing and
|
||||
# everything. We can avoid sending redundant state down the connection most of
|
||||
# the time given that most rooms don't have 100 members anyway and it takes a
|
||||
# while to cycle through 100 members.
|
||||
#
|
||||
# Only remember up to (MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER)
|
||||
if len(changes[event_type]) > MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER:
|
||||
# Reset back to only the requested state keys
|
||||
changes[event_type] = request_state_keys
|
||||
|
||||
# Skip if there isn't any room to fill in the rest with previous state keys
|
||||
if len(request_state_keys) < MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER:
|
||||
# Fill the rest with previous state_keys. Ideally, we could sort
|
||||
# these by recency but it's just a set so just pick an arbitrary
|
||||
# subset (good enough).
|
||||
changes[event_type] = changes[event_type] | set(
|
||||
itertools.islice(
|
||||
inheritable_previous_state_keys,
|
||||
# Just taking the difference isn't perfect as there could be
|
||||
# overlap in the keys between the requested and previous but we
|
||||
# will decide to just take the easy route for now and avoid
|
||||
# additional set operations to figure it out.
|
||||
MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER
|
||||
- len(request_state_keys),
|
||||
)
|
||||
)
|
||||
|
||||
if StateValues.WILDCARD in old_state_keys:
|
||||
# We were previously fetching everything for this type, so we don't need to
|
||||
@@ -1421,12 +1521,6 @@ def _required_state_changes(
|
||||
|
||||
added_state_filter = StateFilter.from_types(added)
|
||||
|
||||
# Convert the list of state deltas to map from type to state_keys that have
|
||||
# changed.
|
||||
changed_types_to_state_keys: Dict[str, Set[str]] = {}
|
||||
for event_type, state_key in state_deltas:
|
||||
changed_types_to_state_keys.setdefault(event_type, set()).add(state_key)
|
||||
|
||||
# Figure out what changes we need to apply to the effective required state
|
||||
# config.
|
||||
for event_type, changed_state_keys in changed_types_to_state_keys.items():
|
||||
@@ -1437,15 +1531,23 @@ def _required_state_changes(
|
||||
# No change.
|
||||
continue
|
||||
|
||||
# If we see the `user_id` as a state_key, also add "$ME" to the list of state
|
||||
# that has changed to account for people requesting `required_state` with `$ME`
|
||||
# or their user ID.
|
||||
if user_id in changed_state_keys:
|
||||
changed_state_keys.add(StateValues.ME)
|
||||
|
||||
# We only remove state keys from the effective state if they've been
|
||||
# removed from the request *and* the state has changed. This ensures
|
||||
# that if a client removes and then re-adds a state key, we only send
|
||||
# down the associated current state event if its changed (rather than
|
||||
# sending down the same event twice).
|
||||
invalidated_state_keys = (
|
||||
old_state_keys - request_state_keys
|
||||
) & changed_state_keys
|
||||
|
||||
# We've expanded the set of state keys, ... (already handled above)
|
||||
if request_state_keys - old_state_keys:
|
||||
# We've expanded the set of state keys, so we just clobber the
|
||||
# current set with the new set.
|
||||
#
|
||||
# We could also ensure that we keep entries where the state hasn't
|
||||
# changed, but are no longer in the requested required state, but
|
||||
# that's a sufficient edge case that we can ignore (as its only a
|
||||
# performance optimization).
|
||||
changes[event_type] = request_state_keys
|
||||
continue
|
||||
|
||||
old_state_key_wildcard = StateValues.WILDCARD in old_state_keys
|
||||
@@ -1467,11 +1569,6 @@ def _required_state_changes(
|
||||
changes[event_type] = request_state_keys
|
||||
continue
|
||||
|
||||
# Handle "$ME" values by adding "$ME" if the state key matches the user
|
||||
# ID.
|
||||
if user_id in changed_state_keys:
|
||||
changed_state_keys.add(StateValues.ME)
|
||||
|
||||
# At this point there are no wildcards and no additions to the set of
|
||||
# state keys requested, only deletions.
|
||||
#
|
||||
@@ -1480,9 +1577,8 @@ def _required_state_changes(
|
||||
# that if a client removes and then re-adds a state key, we only send
|
||||
# down the associated current state event if its changed (rather than
|
||||
# sending down the same event twice).
|
||||
invalidated = (old_state_keys - request_state_keys) & changed_state_keys
|
||||
if invalidated:
|
||||
changes[event_type] = old_state_keys - invalidated
|
||||
if invalidated_state_keys:
|
||||
changes[event_type] = old_state_keys - invalidated_state_keys
|
||||
|
||||
if changes:
|
||||
# Update the required state config based on the changes.
|
||||
|
||||
@@ -143,6 +143,7 @@ class SyncConfig:
|
||||
filter_collection: FilterCollection
|
||||
is_guest: bool
|
||||
device_id: Optional[str]
|
||||
use_state_after: bool
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
@@ -1141,6 +1142,7 @@ class SyncHandler:
|
||||
since_token: Optional[StreamToken],
|
||||
end_token: StreamToken,
|
||||
full_state: bool,
|
||||
joined: bool,
|
||||
) -> MutableStateMap[EventBase]:
|
||||
"""Works out the difference in state between the end of the previous sync and
|
||||
the start of the timeline.
|
||||
@@ -1155,6 +1157,7 @@ class SyncHandler:
|
||||
the point just after their leave event.
|
||||
full_state: Whether to force returning the full state.
|
||||
`lazy_load_members` still applies when `full_state` is `True`.
|
||||
joined: whether the user is currently joined to the room
|
||||
|
||||
Returns:
|
||||
The state to return in the sync response for the room.
|
||||
@@ -1230,11 +1233,12 @@ class SyncHandler:
|
||||
if full_state:
|
||||
state_ids = await self._compute_state_delta_for_full_sync(
|
||||
room_id,
|
||||
sync_config.user,
|
||||
sync_config,
|
||||
batch,
|
||||
end_token,
|
||||
members_to_fetch,
|
||||
timeline_state,
|
||||
joined,
|
||||
)
|
||||
else:
|
||||
# If this is an initial sync then full_state should be set, and
|
||||
@@ -1244,6 +1248,7 @@ class SyncHandler:
|
||||
|
||||
state_ids = await self._compute_state_delta_for_incremental_sync(
|
||||
room_id,
|
||||
sync_config,
|
||||
batch,
|
||||
since_token,
|
||||
end_token,
|
||||
@@ -1316,20 +1321,24 @@ class SyncHandler:
|
||||
async def _compute_state_delta_for_full_sync(
|
||||
self,
|
||||
room_id: str,
|
||||
syncing_user: UserID,
|
||||
sync_config: SyncConfig,
|
||||
batch: TimelineBatch,
|
||||
end_token: StreamToken,
|
||||
members_to_fetch: Optional[Set[str]],
|
||||
timeline_state: StateMap[str],
|
||||
joined: bool,
|
||||
) -> StateMap[str]:
|
||||
"""Calculate the state events to be included in a full sync response.
|
||||
|
||||
As with `_compute_state_delta_for_incremental_sync`, the result will include
|
||||
the membership events for the senders of each event in `members_to_fetch`.
|
||||
|
||||
Note that whether this returns the state at the start or the end of the
|
||||
batch depends on `sync_config.use_state_after` (c.f. MSC4222).
|
||||
|
||||
Args:
|
||||
room_id: The room we are calculating for.
|
||||
syncing_user: The user that is calling `/sync`.
|
||||
sync_confg: The user that is calling `/sync`.
|
||||
batch: The timeline batch for the room that will be sent to the user.
|
||||
end_token: Token of the end of the current batch. Normally this will be
|
||||
the same as the global "now_token", but if the user has left the room,
|
||||
@@ -1338,10 +1347,11 @@ class SyncHandler:
|
||||
events in the timeline.
|
||||
timeline_state: The contribution to the room state from state events in
|
||||
`batch`. Only contains the last event for any given state key.
|
||||
joined: whether the user is currently joined to the room
|
||||
|
||||
Returns:
|
||||
A map from (type, state_key) to event_id, for each event that we believe
|
||||
should be included in the `state` part of the sync response.
|
||||
should be included in the `state` or `state_after` part of the sync response.
|
||||
"""
|
||||
if members_to_fetch is not None:
|
||||
# Lazy-loading of membership events is enabled.
|
||||
@@ -1359,7 +1369,7 @@ class SyncHandler:
|
||||
# is no guarantee that our membership will be in the auth events of
|
||||
# timeline events when the room is partial stated.
|
||||
state_filter = StateFilter.from_lazy_load_member_list(
|
||||
members_to_fetch.union((syncing_user.to_string(),))
|
||||
members_to_fetch.union((sync_config.user.to_string(),))
|
||||
)
|
||||
|
||||
# We are happy to use partial state to compute the `/sync` response.
|
||||
@@ -1373,6 +1383,61 @@ class SyncHandler:
|
||||
await_full_state = True
|
||||
lazy_load_members = False
|
||||
|
||||
# Check if we are wanting to return the state at the start or end of the
|
||||
# timeline. If at the end we can just use the current state.
|
||||
if sync_config.use_state_after:
|
||||
# If we're getting the state at the end of the timeline, we can just
|
||||
# use the current state of the room (and roll back any changes
|
||||
# between when we fetched the current state and `end_token`).
|
||||
#
|
||||
# For rooms we're not joined to, there might be a very large number
|
||||
# of deltas between `end_token` and "now", and so instead we fetch
|
||||
# the state at the end of the timeline.
|
||||
if joined:
|
||||
state_ids = await self._state_storage_controller.get_current_state_ids(
|
||||
room_id,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
# Now roll back the state by looking at the state deltas between
|
||||
# end_token and now.
|
||||
deltas = await self.store.get_current_state_deltas_for_room(
|
||||
room_id,
|
||||
from_token=end_token.room_key,
|
||||
to_token=self.store.get_room_max_token(),
|
||||
)
|
||||
if deltas:
|
||||
mutable_state_ids = dict(state_ids)
|
||||
|
||||
# We iterate over the deltas backwards so that if there are
|
||||
# multiple changes of the same type/state_key we'll
|
||||
# correctly pick the earliest delta.
|
||||
for delta in reversed(deltas):
|
||||
if delta.prev_event_id:
|
||||
mutable_state_ids[(delta.event_type, delta.state_key)] = (
|
||||
delta.prev_event_id
|
||||
)
|
||||
elif (delta.event_type, delta.state_key) in mutable_state_ids:
|
||||
mutable_state_ids.pop((delta.event_type, delta.state_key))
|
||||
|
||||
state_ids = mutable_state_ids
|
||||
|
||||
return state_ids
|
||||
|
||||
else:
|
||||
# Just use state groups to get the state at the end of the
|
||||
# timeline, i.e. the state at the leave/etc event.
|
||||
state_at_timeline_end = (
|
||||
await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
)
|
||||
return state_at_timeline_end
|
||||
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
@@ -1405,6 +1470,7 @@ class SyncHandler:
|
||||
async def _compute_state_delta_for_incremental_sync(
|
||||
self,
|
||||
room_id: str,
|
||||
sync_config: SyncConfig,
|
||||
batch: TimelineBatch,
|
||||
since_token: StreamToken,
|
||||
end_token: StreamToken,
|
||||
@@ -1419,8 +1485,12 @@ class SyncHandler:
|
||||
(`compute_state_delta`) is responsible for keeping track of which membership
|
||||
events we have already sent to the client, and hence ripping them out.
|
||||
|
||||
Note that whether this returns the state at the start or the end of the
|
||||
batch depends on `sync_config.use_state_after` (c.f. MSC4222).
|
||||
|
||||
Args:
|
||||
room_id: The room we are calculating for.
|
||||
sync_config
|
||||
batch: The timeline batch for the room that will be sent to the user.
|
||||
since_token: Token of the end of the previous batch.
|
||||
end_token: Token of the end of the current batch. Normally this will be
|
||||
@@ -1433,7 +1503,7 @@ class SyncHandler:
|
||||
|
||||
Returns:
|
||||
A map from (type, state_key) to event_id, for each event that we believe
|
||||
should be included in the `state` part of the sync response.
|
||||
should be included in the `state` or `state_after` part of the sync response.
|
||||
"""
|
||||
if members_to_fetch is not None:
|
||||
# Lazy-loading is enabled. Only return the state that is needed.
|
||||
@@ -1445,6 +1515,51 @@ class SyncHandler:
|
||||
await_full_state = True
|
||||
lazy_load_members = False
|
||||
|
||||
# Check if we are wanting to return the state at the start or end of the
|
||||
# timeline. If at the end we can just use the current state delta stream.
|
||||
if sync_config.use_state_after:
|
||||
delta_state_ids: MutableStateMap[str] = {}
|
||||
|
||||
if members_to_fetch:
|
||||
# We're lazy-loading, so the client might need some more member
|
||||
# events to understand the events in this timeline. So we always
|
||||
# fish out all the member events corresponding to the timeline
|
||||
# here. The caller will then dedupe any redundant ones.
|
||||
member_ids = await self._state_storage_controller.get_current_state_ids(
|
||||
room_id=room_id,
|
||||
state_filter=StateFilter.from_types(
|
||||
(EventTypes.Member, member) for member in members_to_fetch
|
||||
),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
delta_state_ids.update(member_ids)
|
||||
|
||||
# We don't do LL filtering for incremental syncs - see
|
||||
# https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
|
||||
# N.B. this slows down incr syncs as we are now processing way more
|
||||
# state in the server than if we were LLing.
|
||||
#
|
||||
# i.e. we return all state deltas, including membership changes that
|
||||
# we'd normally exclude due to LL.
|
||||
deltas = await self.store.get_current_state_deltas_for_room(
|
||||
room_id=room_id,
|
||||
from_token=since_token.room_key,
|
||||
to_token=end_token.room_key,
|
||||
)
|
||||
for delta in deltas:
|
||||
if delta.event_id is None:
|
||||
# There was a state reset and this state entry is no longer
|
||||
# present, but we have no way of informing the client about
|
||||
# this, so we just skip it for now.
|
||||
continue
|
||||
|
||||
# Note that deltas are in stream ordering, so if there are
|
||||
# multiple deltas for a given type/state_key we'll always pick
|
||||
# the latest one.
|
||||
delta_state_ids[(delta.event_type, delta.state_key)] = delta.event_id
|
||||
|
||||
return delta_state_ids
|
||||
|
||||
# For a non-gappy sync if the events in the timeline are simply a linear
|
||||
# chain (i.e. no merging/branching of the graph), then we know the state
|
||||
# delta between the end of the previous sync and start of the new one is
|
||||
@@ -2867,6 +2982,7 @@ class SyncHandler:
|
||||
since_token,
|
||||
room_builder.end_token,
|
||||
full_state=full_state,
|
||||
joined=room_builder.rtype == "joined",
|
||||
)
|
||||
else:
|
||||
# An out of band room won't have any state changes.
|
||||
|
||||
@@ -36,7 +36,6 @@ from typing import (
|
||||
)
|
||||
|
||||
import attr
|
||||
import multipart
|
||||
import treq
|
||||
from canonicaljson import encode_canonical_json
|
||||
from netaddr import AddrFormatError, IPAddress, IPSet
|
||||
@@ -93,6 +92,20 @@ from synapse.util.async_helpers import timeout_deferred
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
# Support both import names for the `python-multipart` (PyPI) library,
|
||||
# which renamed its package name from `multipart` to `python_multipart`
|
||||
# in 0.0.13 (though supports the old import name for compatibility).
|
||||
# Note that the `multipart` package name conflicts with `multipart` (PyPI)
|
||||
# so we should prefer importing from `python_multipart` when possible.
|
||||
try:
|
||||
from python_multipart import MultipartParser
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from python_multipart import multipart
|
||||
except ImportError:
|
||||
from multipart import MultipartParser # type: ignore[no-redef]
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"])
|
||||
@@ -1039,7 +1052,7 @@ class _MultipartParserProtocol(protocol.Protocol):
|
||||
self.deferred = deferred
|
||||
self.boundary = boundary
|
||||
self.max_length = max_length
|
||||
self.parser: Optional[multipart.MultipartParser] = None
|
||||
self.parser: Optional[MultipartParser] = None
|
||||
self.multipart_response = MultipartResponse()
|
||||
self.has_redirect = False
|
||||
self.in_json = False
|
||||
@@ -1097,12 +1110,12 @@ class _MultipartParserProtocol(protocol.Protocol):
|
||||
self.deferred.errback()
|
||||
self.file_length += end - start
|
||||
|
||||
callbacks: "multipart.multipart.MultipartCallbacks" = {
|
||||
callbacks: "multipart.MultipartCallbacks" = {
|
||||
"on_header_field": on_header_field,
|
||||
"on_header_value": on_header_value,
|
||||
"on_part_data": on_part_data,
|
||||
}
|
||||
self.parser = multipart.MultipartParser(self.boundary, callbacks)
|
||||
self.parser = MultipartParser(self.boundary, callbacks)
|
||||
|
||||
self.total_length += len(incoming_data)
|
||||
if self.max_length is not None and self.total_length >= self.max_length:
|
||||
|
||||
@@ -921,15 +921,6 @@ def set_cors_headers(request: "SynapseRequest") -> None:
|
||||
b"Access-Control-Expose-Headers",
|
||||
b"Synapse-Trace-Id, Server, ETag",
|
||||
)
|
||||
elif request.experimental_cors_msc3886:
|
||||
request.setHeader(
|
||||
b"Access-Control-Allow-Headers",
|
||||
b"X-Requested-With, Content-Type, Authorization, Date, If-Match, If-None-Match",
|
||||
)
|
||||
request.setHeader(
|
||||
b"Access-Control-Expose-Headers",
|
||||
b"ETag, Location, X-Max-Bytes",
|
||||
)
|
||||
else:
|
||||
request.setHeader(
|
||||
b"Access-Control-Allow-Headers",
|
||||
|
||||
@@ -94,7 +94,6 @@ class SynapseRequest(Request):
|
||||
self.reactor = site.reactor
|
||||
self._channel = channel # this is used by the tests
|
||||
self.start_time = 0.0
|
||||
self.experimental_cors_msc3886 = site.experimental_cors_msc3886
|
||||
|
||||
# The requester, if authenticated. For federation requests this is the
|
||||
# server name, for client requests this is the Requester object.
|
||||
@@ -666,10 +665,6 @@ class SynapseSite(ProxySite):
|
||||
|
||||
request_id_header = config.http_options.request_id_header
|
||||
|
||||
self.experimental_cors_msc3886: bool = (
|
||||
config.http_options.experimental_cors_msc3886
|
||||
)
|
||||
|
||||
def request_factory(channel: HTTPChannel, queued: bool) -> Request:
|
||||
return request_class(
|
||||
channel,
|
||||
|
||||
@@ -39,7 +39,7 @@ from twisted.internet.endpoints import (
|
||||
)
|
||||
from twisted.internet.interfaces import (
|
||||
IPushProducer,
|
||||
IReactorTCP,
|
||||
IReactorTime,
|
||||
IStreamClientEndpoint,
|
||||
)
|
||||
from twisted.internet.protocol import Factory, Protocol
|
||||
@@ -113,7 +113,7 @@ class RemoteHandler(logging.Handler):
|
||||
port: int,
|
||||
maximum_buffer: int = 1000,
|
||||
level: int = logging.NOTSET,
|
||||
_reactor: Optional[IReactorTCP] = None,
|
||||
_reactor: Optional[IReactorTime] = None,
|
||||
):
|
||||
super().__init__(level=level)
|
||||
self.host = host
|
||||
|
||||
@@ -259,7 +259,7 @@ class MediaRepository:
|
||||
"""
|
||||
media = await self.store.get_local_media(media_id)
|
||||
if media is None:
|
||||
raise SynapseError(404, "Unknow media ID", errcode=Codes.NOT_FOUND)
|
||||
raise NotFoundError("Unknown media ID")
|
||||
|
||||
if media.user_id != auth_user.to_string():
|
||||
raise SynapseError(
|
||||
|
||||
@@ -43,12 +43,15 @@ class ExperimentalFeature(str, Enum):
|
||||
|
||||
MSC3881 = "msc3881"
|
||||
MSC3575 = "msc3575"
|
||||
MSC4222 = "msc4222"
|
||||
|
||||
def is_globally_enabled(self, config: "HomeServerConfig") -> bool:
|
||||
if self is ExperimentalFeature.MSC3881:
|
||||
return config.experimental.msc3881_enabled
|
||||
if self is ExperimentalFeature.MSC3575:
|
||||
return config.experimental.msc3575_enabled
|
||||
if self is ExperimentalFeature.MSC4222:
|
||||
return config.experimental.msc4222_enabled
|
||||
|
||||
assert_never(self)
|
||||
|
||||
|
||||
@@ -34,51 +34,6 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# n.b [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) has now been closed.
|
||||
# However, we want to keep this implementation around for some time.
|
||||
# TODO: define an end-of-life date for this implementation.
|
||||
class MSC3886RendezvousServlet(RestServlet):
|
||||
"""
|
||||
This is a placeholder implementation of [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886)
|
||||
simple client rendezvous capability that is used by the "Sign in with QR" functionality.
|
||||
|
||||
This implementation only serves as a 307 redirect to a configured server rather than being a full implementation.
|
||||
|
||||
A module that implements the full functionality is available at: https://pypi.org/project/matrix-http-rendezvous-synapse/.
|
||||
|
||||
Request:
|
||||
|
||||
POST /rendezvous HTTP/1.1
|
||||
Content-Type: ...
|
||||
|
||||
...
|
||||
|
||||
Response:
|
||||
|
||||
HTTP/1.1 307
|
||||
Location: <configured endpoint>
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns(
|
||||
"/org.matrix.msc3886/rendezvous$", releases=[], v1=False, unstable=True
|
||||
)
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__()
|
||||
redirection_target: Optional[str] = hs.config.experimental.msc3886_endpoint
|
||||
assert (
|
||||
redirection_target is not None
|
||||
), "Servlet is only registered if there is a redirection target"
|
||||
self.endpoint = redirection_target.encode("utf-8")
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> None:
|
||||
respond_with_redirect(
|
||||
request, self.endpoint, statusCode=TEMPORARY_REDIRECT, cors=True
|
||||
)
|
||||
|
||||
# PUT, GET and DELETE are not implemented as they should be fulfilled by the redirect target.
|
||||
|
||||
|
||||
class MSC4108DelegationRendezvousServlet(RestServlet):
|
||||
PATTERNS = client_patterns(
|
||||
"/org.matrix.msc4108/rendezvous$", releases=[], v1=False, unstable=True
|
||||
@@ -114,9 +69,6 @@ class MSC4108RendezvousServlet(RestServlet):
|
||||
|
||||
|
||||
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
if hs.config.experimental.msc3886_endpoint is not None:
|
||||
MSC3886RendezvousServlet(hs).register(http_server)
|
||||
|
||||
if hs.config.experimental.msc4108_enabled:
|
||||
MSC4108RendezvousServlet(hs).register(http_server)
|
||||
|
||||
|
||||
@@ -152,6 +152,14 @@ class SyncRestServlet(RestServlet):
|
||||
filter_id = parse_string(request, "filter")
|
||||
full_state = parse_boolean(request, "full_state", default=False)
|
||||
|
||||
use_state_after = False
|
||||
if await self.store.is_feature_enabled(
|
||||
user.to_string(), ExperimentalFeature.MSC4222
|
||||
):
|
||||
use_state_after = parse_boolean(
|
||||
request, "org.matrix.msc4222.use_state_after", default=False
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"/sync: user=%r, timeout=%r, since=%r, "
|
||||
"set_presence=%r, filter_id=%r, device_id=%r",
|
||||
@@ -184,6 +192,7 @@ class SyncRestServlet(RestServlet):
|
||||
full_state,
|
||||
device_id,
|
||||
last_ignore_accdata_streampos,
|
||||
use_state_after,
|
||||
)
|
||||
|
||||
if filter_id is None:
|
||||
@@ -220,6 +229,7 @@ class SyncRestServlet(RestServlet):
|
||||
filter_collection=filter_collection,
|
||||
is_guest=requester.is_guest,
|
||||
device_id=device_id,
|
||||
use_state_after=use_state_after,
|
||||
)
|
||||
|
||||
since_token = None
|
||||
@@ -258,7 +268,7 @@ class SyncRestServlet(RestServlet):
|
||||
# We know that the the requester has an access token since appservices
|
||||
# cannot use sync.
|
||||
response_content = await self.encode_response(
|
||||
time_now, sync_result, requester, filter_collection
|
||||
time_now, sync_config, sync_result, requester, filter_collection
|
||||
)
|
||||
|
||||
logger.debug("Event formatting complete")
|
||||
@@ -268,6 +278,7 @@ class SyncRestServlet(RestServlet):
|
||||
async def encode_response(
|
||||
self,
|
||||
time_now: int,
|
||||
sync_config: SyncConfig,
|
||||
sync_result: SyncResult,
|
||||
requester: Requester,
|
||||
filter: FilterCollection,
|
||||
@@ -292,7 +303,7 @@ class SyncRestServlet(RestServlet):
|
||||
)
|
||||
|
||||
joined = await self.encode_joined(
|
||||
sync_result.joined, time_now, serialize_options
|
||||
sync_config, sync_result.joined, time_now, serialize_options
|
||||
)
|
||||
|
||||
invited = await self.encode_invited(
|
||||
@@ -304,7 +315,7 @@ class SyncRestServlet(RestServlet):
|
||||
)
|
||||
|
||||
archived = await self.encode_archived(
|
||||
sync_result.archived, time_now, serialize_options
|
||||
sync_config, sync_result.archived, time_now, serialize_options
|
||||
)
|
||||
|
||||
logger.debug("building sync response dict")
|
||||
@@ -372,6 +383,7 @@ class SyncRestServlet(RestServlet):
|
||||
@trace_with_opname("sync.encode_joined")
|
||||
async def encode_joined(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
rooms: List[JoinedSyncResult],
|
||||
time_now: int,
|
||||
serialize_options: SerializeEventConfig,
|
||||
@@ -380,6 +392,7 @@ class SyncRestServlet(RestServlet):
|
||||
Encode the joined rooms in a sync result
|
||||
|
||||
Args:
|
||||
sync_config
|
||||
rooms: list of sync results for rooms this user is joined to
|
||||
time_now: current time - used as a baseline for age calculations
|
||||
serialize_options: Event serializer options
|
||||
@@ -389,7 +402,11 @@ class SyncRestServlet(RestServlet):
|
||||
joined = {}
|
||||
for room in rooms:
|
||||
joined[room.room_id] = await self.encode_room(
|
||||
room, time_now, joined=True, serialize_options=serialize_options
|
||||
sync_config,
|
||||
room,
|
||||
time_now,
|
||||
joined=True,
|
||||
serialize_options=serialize_options,
|
||||
)
|
||||
|
||||
return joined
|
||||
@@ -477,6 +494,7 @@ class SyncRestServlet(RestServlet):
|
||||
@trace_with_opname("sync.encode_archived")
|
||||
async def encode_archived(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
rooms: List[ArchivedSyncResult],
|
||||
time_now: int,
|
||||
serialize_options: SerializeEventConfig,
|
||||
@@ -485,6 +503,7 @@ class SyncRestServlet(RestServlet):
|
||||
Encode the archived rooms in a sync result
|
||||
|
||||
Args:
|
||||
sync_config
|
||||
rooms: list of sync results for rooms this user is joined to
|
||||
time_now: current time - used as a baseline for age calculations
|
||||
serialize_options: Event serializer options
|
||||
@@ -494,13 +513,18 @@ class SyncRestServlet(RestServlet):
|
||||
joined = {}
|
||||
for room in rooms:
|
||||
joined[room.room_id] = await self.encode_room(
|
||||
room, time_now, joined=False, serialize_options=serialize_options
|
||||
sync_config,
|
||||
room,
|
||||
time_now,
|
||||
joined=False,
|
||||
serialize_options=serialize_options,
|
||||
)
|
||||
|
||||
return joined
|
||||
|
||||
async def encode_room(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
room: Union[JoinedSyncResult, ArchivedSyncResult],
|
||||
time_now: int,
|
||||
joined: bool,
|
||||
@@ -508,6 +532,7 @@ class SyncRestServlet(RestServlet):
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Args:
|
||||
sync_config
|
||||
room: sync result for a single room
|
||||
time_now: current time - used as a baseline for age calculations
|
||||
token_id: ID of the user's auth token - used for namespacing
|
||||
@@ -548,13 +573,20 @@ class SyncRestServlet(RestServlet):
|
||||
|
||||
account_data = room.account_data
|
||||
|
||||
# We either include a `state` or `state_after` field depending on
|
||||
# whether the client has opted in to the newer `state_after` behavior.
|
||||
if sync_config.use_state_after:
|
||||
state_key_name = "org.matrix.msc4222.state_after"
|
||||
else:
|
||||
state_key_name = "state"
|
||||
|
||||
result: JsonDict = {
|
||||
"timeline": {
|
||||
"events": serialized_timeline,
|
||||
"prev_batch": await room.timeline.prev_batch.to_string(self.store),
|
||||
"limited": room.timeline.limited,
|
||||
},
|
||||
"state": {"events": serialized_state},
|
||||
state_key_name: {"events": serialized_state},
|
||||
"account_data": {"events": account_data},
|
||||
}
|
||||
|
||||
@@ -688,6 +720,7 @@ class SlidingSyncE2eeRestServlet(RestServlet):
|
||||
filter_collection=self.only_member_events_filter_collection,
|
||||
is_guest=requester.is_guest,
|
||||
device_id=device_id,
|
||||
use_state_after=False, # We don't return any rooms so this flag is a no-op
|
||||
)
|
||||
|
||||
since_token = None
|
||||
|
||||
@@ -149,9 +149,6 @@ class VersionsRestServlet(RestServlet):
|
||||
"org.matrix.msc3881": msc3881_enabled,
|
||||
# Adds support for filtering /messages by event relation.
|
||||
"org.matrix.msc3874": self.config.experimental.msc3874_enabled,
|
||||
# Adds support for simple HTTP rendezvous as per MSC3886
|
||||
"org.matrix.msc3886": self.config.experimental.msc3886_endpoint
|
||||
is not None,
|
||||
# Adds support for relation-based redactions as per MSC3912.
|
||||
"org.matrix.msc3912": self.config.experimental.msc3912_enabled,
|
||||
# Whether recursively provide relations is supported.
|
||||
|
||||
@@ -94,7 +94,7 @@ class BaseUploadServlet(RestServlet):
|
||||
|
||||
# if headers.hasHeader(b"Content-Disposition"):
|
||||
# disposition = headers.getRawHeaders(b"Content-Disposition")[0]
|
||||
# TODO(markjh): parse content-dispostion
|
||||
# TODO(markjh): parse content-disposition
|
||||
|
||||
return content_length, upload_name, media_type
|
||||
|
||||
|
||||
@@ -254,6 +254,7 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
"auth",
|
||||
"deactivate_account",
|
||||
"delayed_events",
|
||||
"e2e_keys", # for the `delete_old_otks` scheduled-task handler
|
||||
"message",
|
||||
"pagination",
|
||||
"profile",
|
||||
|
||||
@@ -234,8 +234,11 @@ class StateStorageController:
|
||||
RuntimeError if we don't have a state group for one or more of the events
|
||||
(ie they are outliers or unknown)
|
||||
"""
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
await_full_state = True
|
||||
if state_filter and not state_filter.must_await_full_state(self._is_mine_id):
|
||||
if not state_filter.must_await_full_state(self._is_mine_id):
|
||||
await_full_state = False
|
||||
|
||||
event_to_groups = await self.get_state_group_for_events(
|
||||
@@ -244,7 +247,7 @@ class StateStorageController:
|
||||
|
||||
groups = set(event_to_groups.values())
|
||||
group_to_state = await self.stores.state._get_state_for_groups(
|
||||
groups, state_filter or StateFilter.all()
|
||||
groups, state_filter
|
||||
)
|
||||
|
||||
state_event_map = await self.stores.main.get_events(
|
||||
@@ -292,10 +295,11 @@ class StateStorageController:
|
||||
RuntimeError if we don't have a state group for one or more of the events
|
||||
(ie they are outliers or unknown)
|
||||
"""
|
||||
if (
|
||||
await_full_state
|
||||
and state_filter
|
||||
and not state_filter.must_await_full_state(self._is_mine_id)
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
if await_full_state and not state_filter.must_await_full_state(
|
||||
self._is_mine_id
|
||||
):
|
||||
# Full state is not required if the state filter is restrictive enough.
|
||||
await_full_state = False
|
||||
@@ -306,7 +310,7 @@ class StateStorageController:
|
||||
|
||||
groups = set(event_to_groups.values())
|
||||
group_to_state = await self.stores.state._get_state_for_groups(
|
||||
groups, state_filter or StateFilter.all()
|
||||
groups, state_filter
|
||||
)
|
||||
|
||||
event_to_state = {
|
||||
@@ -335,9 +339,10 @@ class StateStorageController:
|
||||
RuntimeError if we don't have a state group for the event (ie it is an
|
||||
outlier or is unknown)
|
||||
"""
|
||||
state_map = await self.get_state_for_events(
|
||||
[event_id], state_filter or StateFilter.all()
|
||||
)
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
state_map = await self.get_state_for_events([event_id], state_filter)
|
||||
return state_map[event_id]
|
||||
|
||||
@trace
|
||||
@@ -365,9 +370,12 @@ class StateStorageController:
|
||||
RuntimeError if we don't have a state group for the event (ie it is an
|
||||
outlier or is unknown)
|
||||
"""
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
state_map = await self.get_state_ids_for_events(
|
||||
[event_id],
|
||||
state_filter or StateFilter.all(),
|
||||
state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
return state_map[event_id]
|
||||
@@ -388,9 +396,12 @@ class StateStorageController:
|
||||
at the event and `state_filter` is not satisfied by partial state.
|
||||
Defaults to `True`.
|
||||
"""
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
state_ids = await self.get_state_ids_for_event(
|
||||
event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
@@ -426,6 +437,9 @@ class StateStorageController:
|
||||
at the last event in the room before `stream_position` and
|
||||
`state_filter` is not satisfied by partial state. Defaults to `True`.
|
||||
"""
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
# FIXME: This gets the state at the latest event before the stream ordering,
|
||||
# which might not be the same as the "current state" of the room at the time
|
||||
# of the stream token if there were multiple forward extremities at the time.
|
||||
@@ -442,7 +456,7 @@ class StateStorageController:
|
||||
if last_event_id:
|
||||
state = await self.get_state_after_event(
|
||||
last_event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
@@ -500,9 +514,10 @@ class StateStorageController:
|
||||
Returns:
|
||||
Dict of state group to state map.
|
||||
"""
|
||||
return await self.stores.state._get_state_for_groups(
|
||||
groups, state_filter or StateFilter.all()
|
||||
)
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
return await self.stores.state._get_state_for_groups(groups, state_filter)
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
@@ -583,12 +598,13 @@ class StateStorageController:
|
||||
Returns:
|
||||
The current state of the room.
|
||||
"""
|
||||
if await_full_state and (
|
||||
not state_filter or state_filter.must_await_full_state(self._is_mine_id)
|
||||
):
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
if await_full_state and state_filter.must_await_full_state(self._is_mine_id):
|
||||
await self._partial_state_room_tracker.await_full_state(room_id)
|
||||
|
||||
if state_filter and not state_filter.is_full():
|
||||
if state_filter is not None and not state_filter.is_full():
|
||||
return await self.stores.main.get_partial_filtered_current_state_ids(
|
||||
room_id, state_filter
|
||||
)
|
||||
|
||||
@@ -322,6 +322,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
||||
self._attempt_to_invalidate_cache(
|
||||
"get_unread_event_push_actions_by_room_for_user", (room_id,)
|
||||
)
|
||||
self._attempt_to_invalidate_cache("get_metadata_for_event", (room_id, event_id))
|
||||
|
||||
self._attempt_to_invalidate_cache("_get_max_event_pos", (room_id,))
|
||||
|
||||
@@ -446,6 +447,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
||||
self._attempt_to_invalidate_cache("_get_state_group_for_event", None)
|
||||
|
||||
self._attempt_to_invalidate_cache("get_event_ordering", None)
|
||||
self._attempt_to_invalidate_cache("get_metadata_for_event", (room_id,))
|
||||
self._attempt_to_invalidate_cache("is_partial_state_event", None)
|
||||
self._attempt_to_invalidate_cache("_get_joined_profile_from_event_id", None)
|
||||
|
||||
|
||||
@@ -99,6 +99,13 @@ class EndToEndKeyBackgroundStore(SQLBaseStore):
|
||||
unique=True,
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
update_name="add_otk_ts_added_index",
|
||||
index_name="e2e_one_time_keys_json_user_id_device_id_algorithm_ts_added_idx",
|
||||
table="e2e_one_time_keys_json",
|
||||
columns=("user_id", "device_id", "algorithm", "ts_added_ms"),
|
||||
)
|
||||
|
||||
|
||||
class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorkerStore):
|
||||
def __init__(
|
||||
@@ -1122,7 +1129,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
"""Take a list of one time keys out of the database.
|
||||
|
||||
Args:
|
||||
query_list: An iterable of tuples of (user ID, device ID, algorithm).
|
||||
query_list: An iterable of tuples of (user ID, device ID, algorithm, number of keys).
|
||||
|
||||
Returns:
|
||||
A tuple (results, missing) of:
|
||||
@@ -1310,9 +1317,14 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
OTK was found.
|
||||
"""
|
||||
|
||||
# Return the oldest keys from this device (based on `ts_added_ms`).
|
||||
# Doing so means that keys are issued in the same order they were uploaded,
|
||||
# which reduces the chances of a client expiring its copy of a (private)
|
||||
# key while the public key is still on the server, waiting to be issued.
|
||||
sql = """
|
||||
SELECT key_id, key_json FROM e2e_one_time_keys_json
|
||||
WHERE user_id = ? AND device_id = ? AND algorithm = ?
|
||||
ORDER BY ts_added_ms
|
||||
LIMIT ?
|
||||
"""
|
||||
|
||||
@@ -1354,13 +1366,22 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
A list of tuples (user_id, device_id, algorithm, key_id, key_json)
|
||||
for each OTK claimed.
|
||||
"""
|
||||
# Find, delete, and return the oldest keys from each device (based on
|
||||
# `ts_added_ms`).
|
||||
#
|
||||
# Doing so means that keys are issued in the same order they were uploaded,
|
||||
# which reduces the chances of a client expiring its copy of a (private)
|
||||
# key while the public key is still on the server, waiting to be issued.
|
||||
sql = """
|
||||
WITH claims(user_id, device_id, algorithm, claim_count) AS (
|
||||
VALUES ?
|
||||
), ranked_keys AS (
|
||||
SELECT
|
||||
user_id, device_id, algorithm, key_id, claim_count,
|
||||
ROW_NUMBER() OVER (PARTITION BY (user_id, device_id, algorithm)) AS r
|
||||
ROW_NUMBER() OVER (
|
||||
PARTITION BY (user_id, device_id, algorithm)
|
||||
ORDER BY ts_added_ms
|
||||
) AS r
|
||||
FROM e2e_one_time_keys_json
|
||||
JOIN claims USING (user_id, device_id, algorithm)
|
||||
)
|
||||
@@ -1432,6 +1453,54 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
impl,
|
||||
)
|
||||
|
||||
async def delete_old_otks_for_next_user_batch(
|
||||
self, after_user_id: str, number_of_users: int
|
||||
) -> Tuple[List[str], int]:
|
||||
"""Deletes old OTKs belonging to the next batch of users
|
||||
|
||||
Returns:
|
||||
`(users, rows)`, where:
|
||||
* `users` is the user IDs of the updated users. An empty list if we are done.
|
||||
* `rows` is the number of deleted rows
|
||||
"""
|
||||
|
||||
def impl(txn: LoggingTransaction) -> Tuple[List[str], int]:
|
||||
# Find a batch of users
|
||||
txn.execute(
|
||||
"""
|
||||
SELECT DISTINCT(user_id) FROM e2e_one_time_keys_json
|
||||
WHERE user_id > ?
|
||||
ORDER BY user_id
|
||||
LIMIT ?
|
||||
""",
|
||||
(after_user_id, number_of_users),
|
||||
)
|
||||
users = [row[0] for row in txn.fetchall()]
|
||||
if len(users) == 0:
|
||||
return users, 0
|
||||
|
||||
# Delete any old OTKs belonging to those users.
|
||||
#
|
||||
# We only actually consider OTKs whose key ID is 6 characters long. These
|
||||
# keys were likely made by libolm rather than Vodozemac; libolm only kept
|
||||
# 100 private OTKs, so was far more vulnerable than Vodozemac to throwing
|
||||
# away keys prematurely.
|
||||
clause, args = make_in_list_sql_clause(
|
||||
txn.database_engine, "user_id", users
|
||||
)
|
||||
sql = f"""
|
||||
DELETE FROM e2e_one_time_keys_json
|
||||
WHERE {clause} AND ts_added_ms < ? AND length(key_id) = 6
|
||||
"""
|
||||
args.append(self._clock.time_msec() - (7 * 24 * 3600 * 1000))
|
||||
txn.execute(sql, args)
|
||||
|
||||
return users, txn.rowcount
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"delete_old_otks_for_next_user_batch", impl
|
||||
)
|
||||
|
||||
|
||||
class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
|
||||
def __init__(
|
||||
|
||||
@@ -193,6 +193,14 @@ class _EventRow:
|
||||
outlier: bool
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class EventMetadata:
|
||||
"""Event metadata returned by `get_metadata_for_event(..)`"""
|
||||
|
||||
sender: str
|
||||
received_ts: int
|
||||
|
||||
|
||||
class EventRedactBehaviour(Enum):
|
||||
"""
|
||||
What to do when retrieving a redacted event from the database.
|
||||
@@ -2580,3 +2588,22 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
|
||||
)
|
||||
)
|
||||
|
||||
@cached(tree=True)
|
||||
async def get_metadata_for_event(
|
||||
self, room_id: str, event_id: str
|
||||
) -> Optional[EventMetadata]:
|
||||
row = await self.db_pool.simple_select_one(
|
||||
table="events",
|
||||
keyvalues={"room_id": room_id, "event_id": event_id},
|
||||
retcols=("sender", "received_ts"),
|
||||
allow_none=True,
|
||||
desc="get_metadata_for_event",
|
||||
)
|
||||
if row is None:
|
||||
return None
|
||||
|
||||
return EventMetadata(
|
||||
sender=row[0],
|
||||
received_ts=row[1],
|
||||
)
|
||||
|
||||
@@ -2550,7 +2550,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
|
||||
still contains events with partial state.
|
||||
"""
|
||||
try:
|
||||
async with self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id:
|
||||
async with (
|
||||
self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id
|
||||
):
|
||||
await self.db_pool.runInteraction(
|
||||
"clear_partial_state_room",
|
||||
self._clear_partial_state_room_txn,
|
||||
|
||||
@@ -21,7 +21,11 @@ import attr
|
||||
from synapse.api.errors import SlidingSyncUnknownPosition
|
||||
from synapse.logging.opentracing import log_kv
|
||||
from synapse.storage._base import SQLBaseStore, db_to_json
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
)
|
||||
from synapse.types import MultiWriterStreamToken, RoomStreamToken
|
||||
from synapse.types.handlers.sliding_sync import (
|
||||
HaveSentRoom,
|
||||
@@ -35,12 +39,28 @@ from synapse.util import json_encoder
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main import DataStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncStore(SQLBaseStore):
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
update_name="sliding_sync_connection_room_configs_required_state_id_idx",
|
||||
index_name="sliding_sync_connection_room_configs_required_state_id_idx",
|
||||
table="sliding_sync_connection_room_configs",
|
||||
columns=("required_state_id",),
|
||||
)
|
||||
|
||||
async def get_latest_bump_stamp_for_room(
|
||||
self,
|
||||
room_id: str,
|
||||
|
||||
@@ -572,10 +572,10 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
Returns:
|
||||
Map from type/state_key to event ID.
|
||||
"""
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
where_clause, where_args = (
|
||||
state_filter or StateFilter.all()
|
||||
).make_sql_filter_clause()
|
||||
where_clause, where_args = (state_filter).make_sql_filter_clause()
|
||||
|
||||
if not where_clause:
|
||||
# We delegate to the cached version
|
||||
@@ -584,7 +584,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
def _get_filtered_current_state_ids_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> StateMap[str]:
|
||||
results = StateMapWrapper(state_filter=state_filter or StateFilter.all())
|
||||
results = StateMapWrapper(state_filter=state_filter)
|
||||
|
||||
sql = """
|
||||
SELECT type, state_key, event_id FROM current_state_events
|
||||
@@ -681,7 +681,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
context: EventContext,
|
||||
) -> None:
|
||||
"""Update the state group for a partial state event"""
|
||||
async with self._un_partial_stated_events_stream_id_gen.get_next() as un_partial_state_event_stream_id:
|
||||
async with (
|
||||
self._un_partial_stated_events_stream_id_gen.get_next() as un_partial_state_event_stream_id
|
||||
):
|
||||
await self.db_pool.runInteraction(
|
||||
"update_state_for_partial_state_event",
|
||||
self._update_state_for_partial_state_event_txn,
|
||||
|
||||
@@ -20,18 +20,26 @@
|
||||
#
|
||||
|
||||
import logging
|
||||
from typing import List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.database import LoggingTransaction, make_in_list_sql_clause
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
make_in_list_sql_clause,
|
||||
)
|
||||
from synapse.storage.databases.main.stream import _filter_results_by_stream
|
||||
from synapse.types import RoomStreamToken, StrCollection
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.iterutils import batch_iter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -54,6 +62,21 @@ class StateDeltasStore(SQLBaseStore):
|
||||
# attribute. TODO: can we get static analysis to enforce this?
|
||||
_curr_state_delta_stream_cache: StreamChangeCache
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
update_name="current_state_delta_stream_room_index",
|
||||
index_name="current_state_delta_stream_room_idx",
|
||||
table="current_state_delta_stream",
|
||||
columns=("room_id", "stream_id"),
|
||||
)
|
||||
|
||||
async def get_partial_current_state_deltas(
|
||||
self, prev_stream_id: int, max_stream_id: int
|
||||
) -> Tuple[int, List[StateDelta]]:
|
||||
|
||||
@@ -112,8 +112,8 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore):
|
||||
Returns:
|
||||
Map from state_group to a StateMap at that point.
|
||||
"""
|
||||
|
||||
state_filter = state_filter or StateFilter.all()
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
results: Dict[int, MutableStateMap[str]] = {group: {} for group in groups}
|
||||
|
||||
|
||||
@@ -284,7 +284,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
|
||||
Returns:
|
||||
Dict of state group to state map.
|
||||
"""
|
||||
state_filter = state_filter or StateFilter.all()
|
||||
if state_filter is None:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
member_filter, non_member_filter = state_filter.get_member_split()
|
||||
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2024 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
|
||||
-- Add an index on (user_id, device_id, algorithm, ts_added_ms) on e2e_one_time_keys_json, so that OTKs can
|
||||
-- efficiently be issued in the same order they were uploaded.
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(8803, 'add_otk_ts_added_index', '{}');
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user