Compare commits

..

4 Commits

Author SHA1 Message Date
Eric Eastwood
c75fb2eeb6 Add changelog 2025-11-20 17:36:21 -06:00
Eric Eastwood
ad94b9103e Use consistent indentation 2025-11-20 17:31:42 -06:00
Eric Eastwood
588c2b4db9 Use return instead of exit
This is useful so that the script can be sourced without exiting the calling subshell.
2025-11-20 17:29:07 -06:00
Eric Eastwood
573accd0df Run complement.sh logic as function
This is useful as later we can refactor the early `exit` calls to `return` calls
so that the script can be sourced without exiting the calling subshell.
2025-11-20 17:25:50 -06:00
237 changed files with 2384 additions and 6914 deletions

View File

@@ -7,4 +7,4 @@ if command -v yum &> /dev/null; then
fi fi
# Install a Rust toolchain # Install a Rust toolchain
curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.82.0 -y --profile minimal

146
.ci/scripts/auditwheel_wrapper.py Executable file
View File

@@ -0,0 +1,146 @@
#!/usr/bin/env python
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2023 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
# Originally licensed under the Apache License, Version 2.0:
# <http://www.apache.org/licenses/LICENSE-2.0>.
#
# [This file includes modifications made by New Vector Limited]
#
#
# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
# compatible wheel, if so rename the wheel before repairing it.
import argparse
import os
import subprocess
from zipfile import ZipFile
from packaging.tags import Tag
from packaging.utils import parse_wheel_filename
from packaging.version import Version
def check_is_abi3_compatible(wheel_file: str) -> None:
"""Check the contents of the built wheel for any `.so` files that are *not*
abi3 compatible.
"""
with ZipFile(wheel_file, "r") as wheel:
for file in wheel.namelist():
if not file.endswith(".so"):
continue
if not file.endswith(".abi3.so"):
raise Exception(f"Found non-abi3 lib: {file}")
def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
"""Replaces the cpython wheel file with a ABI3 compatible wheel"""
if tag.abi == "abi3":
# Nothing to do.
return wheel_file
check_is_abi3_compatible(wheel_file)
# HACK: it seems that some older versions of pip will consider a wheel marked
# as macosx_11_0 as incompatible with Big Sur. I haven't done the full archaeology
# here; there are some clues in
# https://github.com/pantsbuild/pants/pull/12857
# https://github.com/pypa/pip/issues/9138
# https://github.com/pypa/packaging/pull/319
# Empirically this seems to work, note that macOS 11 and 10.16 are the same,
# both versions are valid for backwards compatibility.
platform = tag.platform.replace("macosx_11_0", "macosx_10_16")
abi3_tag = Tag(tag.interpreter, "abi3", platform)
dirname = os.path.dirname(wheel_file)
new_wheel_file = os.path.join(
dirname,
f"{name}-{version}-{abi3_tag}.whl",
)
os.rename(wheel_file, new_wheel_file)
print("Renamed wheel to", new_wheel_file)
return new_wheel_file
def main(wheel_file: str, dest_dir: str, archs: str | None) -> None:
"""Entry point"""
# Parse the wheel file name into its parts. Note that `parse_wheel_filename`
# normalizes the package name (i.e. it converts matrix_synapse ->
# matrix-synapse), which is not what we want.
_, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
name = os.path.basename(wheel_file).split("-")[0]
if len(tags) != 1:
# We expect only a wheel file with only a single tag
raise Exception(f"Unexpectedly found multiple tags: {tags}")
tag = next(iter(tags))
if build:
# We don't use build tags in Synapse
raise Exception(f"Unexpected build tag: {build}")
# If the wheel is for cpython then convert it into an abi3 wheel.
if tag.interpreter.startswith("cp"):
wheel_file = cpython(wheel_file, name, version, tag)
# Finally, repair the wheel.
if archs is not None:
# If we are given archs then we are on macos and need to use
# `delocate-listdeps`.
subprocess.run(["delocate-listdeps", wheel_file], check=True)
subprocess.run(
["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
check=True,
)
else:
subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
parser.add_argument(
"--wheel-dir",
"-w",
metavar="WHEEL_DIR",
help="Directory to store delocated wheels",
required=True,
)
parser.add_argument(
"--require-archs",
metavar="archs",
default=None,
)
parser.add_argument(
"wheel_file",
metavar="WHEEL_FILE",
)
args = parser.parse_args()
wheel_file = args.wheel_file
wheel_dir = args.wheel_dir
archs = args.require_archs
main(wheel_file, wheel_dir, archs)

39
.ci/scripts/prepare_old_deps.sh Executable file
View File

@@ -0,0 +1,39 @@
#!/usr/bin/env bash
# this script is run by GitHub Actions in a plain `jammy` container; it
# - installs the minimal system requirements, and poetry;
# - patches the project definition file to refer to old versions only;
# - creates a venv with these old versions using poetry; and finally
# - invokes `trial` to run the tests with old deps.
set -ex
# Prevent virtualenv from auto-updating pip to an incompatible version
export VIRTUALENV_NO_DOWNLOAD=1
# TODO: in the future, we could use an implementation of
# https://github.com/python-poetry/poetry/issues/3527
# https://github.com/pypa/pip/issues/8085
# to select the lowest possible versions, rather than resorting to this sed script.
# Patch the project definitions in-place:
# - `-E` use extended regex syntax.
# - Don't modify the line that defines required Python versions.
# - Replace all lower and tilde bounds with exact bounds.
# - Replace all caret bounds with exact bounds.
# - Delete all lines referring to psycopg2 - so no testing of postgres support.
# - Use pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Omit systemd: we're not logging to journal here.
sed -i -E '
/^\s*requires-python\s*=/b
s/[~>]=/==/g
s/\^/==/g
/psycopg2/d
s/pyOpenSSL\s*==\s*16\.0\.0"/pyOpenSSL==17.0.0"/
/systemd/d
' pyproject.toml
echo "::group::Patched pyproject.toml"
cat pyproject.toml
echo "::endgroup::"

View File

@@ -1,92 +1,23 @@
version: 2 version: 2
# As dependabot is currently only run on a weekly basis, we raise the
# open-pull-requests-limit to 10 (from the default of 5) to better ensure we
# don't continuously grow a backlog of updates.
updates: updates:
- # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem - # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
package-ecosystem: "pip" package-ecosystem: "pip"
directory: "/" directory: "/"
open-pull-requests-limit: 10
schedule: schedule:
interval: "weekly" interval: "weekly"
# Group patch updates to packages together into a single PR, as they rarely
# if ever contain breaking changes that need to be reviewed separately.
#
# Less PRs means a streamlined review process.
#
# Python packages follow semantic versioning, and tend to only introduce
# breaking changes in major version bumps. Thus, we'll group minor and patch
# versions together.
groups:
minor-and-patches:
applies-to: version-updates
patterns:
- "*"
update-types:
- "minor"
- "patch"
# Prevent pulling packages that were recently updated to help mitigate
# supply chain attacks. 14 days was taken from the recommendation at
# https://blog.yossarian.net/2025/11/21/We-should-all-be-using-dependency-cooldowns
# where the author noted that 9/10 attacks would have been mitigated by a
# two week cooldown.
#
# The cooldown only applies to general updates; security updates will still
# be pulled in as soon as possible.
cooldown:
default-days: 14
- package-ecosystem: "docker" - package-ecosystem: "docker"
directory: "/docker" directory: "/docker"
open-pull-requests-limit: 10
schedule: schedule:
interval: "weekly" interval: "weekly"
# For container versions, breaking changes are also typically only introduced in major
# package bumps.
groups:
minor-and-patches:
applies-to: version-updates
patterns:
- "*"
update-types:
- "minor"
- "patch"
cooldown:
default-days: 14
- package-ecosystem: "github-actions" - package-ecosystem: "github-actions"
directory: "/" directory: "/"
open-pull-requests-limit: 10
schedule: schedule:
interval: "weekly" interval: "weekly"
# Similarly for GitHub Actions, breaking changes are typically only introduced in major
# package bumps.
groups:
minor-and-patches:
applies-to: version-updates
patterns:
- "*"
update-types:
- "minor"
- "patch"
cooldown:
default-days: 14
- package-ecosystem: "cargo" - package-ecosystem: "cargo"
directory: "/" directory: "/"
open-pull-requests-limit: 10
versioning-strategy: "lockfile-only" versioning-strategy: "lockfile-only"
schedule: schedule:
interval: "weekly" interval: "weekly"
# The Rust ecosystem is special in that breaking changes are often introduced
# in minor version bumps, as packages typically stay pre-1.0 for a long time.
# Thus we specifically keep minor version bumps separate in their own PRs.
groups:
patches:
applies-to: version-updates
patterns:
- "*"
update-types:
- "patch"
cooldown:
default-days: 14

View File

@@ -31,7 +31,7 @@ jobs:
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Extract version from pyproject.toml - name: Extract version from pyproject.toml
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see # Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
@@ -123,7 +123,7 @@ jobs:
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0 uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Calculate docker image tag - name: Calculate docker image tag
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v5.9.0
with: with:
images: ${{ matrix.repository }} images: ${{ matrix.repository }}
flavor: | flavor: |

34
.github/workflows/docs-pr-netlify.yaml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: Deploy documentation PR preview
on:
workflow_run:
workflows: [ "Prepare documentation PR preview" ]
types:
- completed
jobs:
netlify:
if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
runs-on: ubuntu-latest
steps:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
uses: dawidd6/action-download-artifact@ac66b43f0e6a346234dd65d4d0c8fbb31cb316e5 # v11
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}
name: book
path: book
- name: 📤 Deploy to Netlify
uses: matrix-org/netlify-pr-preview@9805cd123fc9a7e421e35340a05e1ebc5dee46b5 # v3
with:
path: book
owner: ${{ github.event.workflow_run.head_repository.owner.login }}
branch: ${{ github.event.workflow_run.head_branch }}
revision: ${{ github.event.workflow_run.head_sha }}
token: ${{ secrets.NETLIFY_AUTH_TOKEN }}
site_id: ${{ secrets.NETLIFY_SITE_ID }}
desc: Documentation preview
deployment_env: PR Documentation Preview

View File

@@ -13,7 +13,7 @@ jobs:
name: GitHub Pages name: GitHub Pages
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
# Fetch all history so that the schema_versions script works. # Fetch all history so that the schema_versions script works.
fetch-depth: 0 fetch-depth: 0
@@ -24,7 +24,7 @@ jobs:
mdbook-version: '0.4.17' mdbook-version: '0.4.17'
- name: Setup python - name: Setup python
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: "3.x" python-version: "3.x"
@@ -50,7 +50,7 @@ jobs:
name: Check links in documentation name: Check links in documentation
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Setup mdbook - name: Setup mdbook
uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0 uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0

View File

@@ -50,7 +50,7 @@ jobs:
needs: needs:
- pre - pre
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
# Fetch all history so that the schema_versions script works. # Fetch all history so that the schema_versions script works.
fetch-depth: 0 fetch-depth: 0
@@ -64,7 +64,7 @@ jobs:
run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js
- name: Setup python - name: Setup python
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: "3.x" python-version: "3.x"

View File

@@ -18,14 +18,14 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
components: clippy, rustfmt components: clippy, rustfmt
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- name: Setup Poetry - name: Setup Poetry
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0

View File

@@ -42,12 +42,12 @@ jobs:
if: needs.check_repo.outputs.should_run_workflow == 'true' if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
# The dev dependencies aren't exposed in the wheel metadata (at least with current # The dev dependencies aren't exposed in the wheel metadata (at least with current
# poetry-core versions), so we install with poetry. # poetry-core versions), so we install with poetry.
@@ -77,13 +77,13 @@ jobs:
postgres-version: "14" postgres-version: "14"
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- run: sudo apt-get -qq install xmlsec1 - run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }} - name: Set up PostgreSQL ${{ matrix.postgres-version }}
@@ -93,7 +93,7 @@ jobs:
-e POSTGRES_PASSWORD=postgres \ -e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \ -e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }} postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: "3.x" python-version: "3.x"
- run: pip install .[all,test] - run: pip install .[all,test]
@@ -152,13 +152,13 @@ jobs:
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }} BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- name: Ensure sytest runs `pip install` - name: Ensure sytest runs `pip install`
# Delete the lockfile so sytest will `pip install` rather than `poetry install` # Delete the lockfile so sytest will `pip install` rather than `poetry install`
@@ -202,14 +202,14 @@ jobs:
steps: steps:
- name: Check out synapse codebase - name: Check out synapse codebase
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
path: synapse path: synapse
- name: Prepare Complement's Prerequisites - name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
with: with:
cache-dependency-path: complement/go.sum cache-dependency-path: complement/go.sum
go-version-file: complement/go.mod go-version-file: complement/go.mod
@@ -234,7 +234,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2 - uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -16,8 +16,8 @@ jobs:
name: "Check locked dependencies have sdists" name: "Check locked dependencies have sdists"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: '3.x' python-version: '3.x'
- run: pip install tomli - run: pip install tomli

View File

@@ -33,17 +33,17 @@ jobs:
packages: write packages: write
steps: steps:
- name: Checkout specific branch (debug build) - name: Checkout specific branch (debug build)
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
with: with:
ref: ${{ inputs.branch }} ref: ${{ inputs.branch }}
- name: Checkout clean copy of develop (scheduled build) - name: Checkout clean copy of develop (scheduled build)
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
if: github.event_name == 'schedule' if: github.event_name == 'schedule'
with: with:
ref: develop ref: develop
- name: Checkout clean copy of master (on-push) - name: Checkout clean copy of master (on-push)
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
if: github.event_name == 'push' if: github.event_name == 'push'
with: with:
ref: master ref: master
@@ -55,7 +55,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Work out labels for complement image - name: Work out labels for complement image
id: meta id: meta
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v5.9.0
with: with:
images: ghcr.io/${{ github.repository }}/complement-synapse images: ghcr.io/${{ github.repository }}/complement-synapse
tags: | tags: |

View File

@@ -5,7 +5,7 @@ name: Build release artifacts
on: on:
# we build on PRs and develop to (hopefully) get early warning # we build on PRs and develop to (hopefully) get early warning
# of things breaking (but only build one set of debs). PRs skip # of things breaking (but only build one set of debs). PRs skip
# building wheels on ARM. # building wheels on macOS & ARM.
pull_request: pull_request:
push: push:
branches: ["develop", "release-*"] branches: ["develop", "release-*"]
@@ -27,8 +27,8 @@ jobs:
name: "Calculate list of debian distros" name: "Calculate list of debian distros"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: "3.x" python-version: "3.x"
- id: set-distros - id: set-distros
@@ -55,7 +55,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
path: src path: src
@@ -74,7 +74,7 @@ jobs:
${{ runner.os }}-buildx- ${{ runner.os }}-buildx-
- name: Set up python - name: Set up python
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: "3.x" python-version: "3.x"
@@ -114,20 +114,27 @@ jobs:
os: os:
- ubuntu-24.04 - ubuntu-24.04
- ubuntu-24.04-arm - ubuntu-24.04-arm
- macos-14 # This uses arm64
- macos-15-intel # This uses x86-64
# is_pr is a flag used to exclude certain jobs from the matrix on PRs. # is_pr is a flag used to exclude certain jobs from the matrix on PRs.
# It is not read by the rest of the workflow. # It is not read by the rest of the workflow.
is_pr: is_pr:
- ${{ startsWith(github.ref, 'refs/pull/') }} - ${{ startsWith(github.ref, 'refs/pull/') }}
exclude: exclude:
# Don't build macos wheels on PR CI.
- is_pr: true
os: "macos-15-intel"
- is_pr: true
os: "macos-14"
# Don't build aarch64 wheels on PR CI. # Don't build aarch64 wheels on PR CI.
- is_pr: true - is_pr: true
os: "ubuntu-24.04-arm" os: "ubuntu-24.04-arm"
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
# setup-python@v4 doesn't impose a default python version. Need to use 3.x # setup-python@v4 doesn't impose a default python version. Need to use 3.x
# here, because `python` on osx points to Python 2.7. # here, because `python` on osx points to Python 2.7.
@@ -163,8 +170,8 @@ jobs:
if: ${{ !startsWith(github.ref, 'refs/pull/') }} if: ${{ !startsWith(github.ref, 'refs/pull/') }}
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: "3.10" python-version: "3.10"

View File

@@ -14,8 +14,8 @@ jobs:
name: Ensure Synapse config schema is valid name: Ensure Synapse config schema is valid
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: "3.x" python-version: "3.x"
- name: Install check-jsonschema - name: Install check-jsonschema
@@ -40,8 +40,8 @@ jobs:
name: Ensure generated documentation is up-to-date name: Ensure generated documentation is up-to-date
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: "3.x" python-version: "3.x"
- name: Install PyYAML - name: Install PyYAML

View File

@@ -86,12 +86,12 @@ jobs:
if: ${{ needs.changes.outputs.linting == 'true' }} if: ${{ needs.changes.outputs.linting == 'true' }}
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with: with:
python-version: "3.x" python-version: "3.x"
@@ -106,18 +106,18 @@ jobs:
if: ${{ needs.changes.outputs.linting == 'true' }} if: ${{ needs.changes.outputs.linting == 'true' }}
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: "3.x" python-version: "3.x"
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20' 'sqlglot>=28.0.0'" - run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors - run: scripts-dev/check_schema_delta.py --force-colors
check-lockfile: check-lockfile:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: "3.x" python-version: "3.x"
- run: .ci/scripts/check_lockfile.py - run: .ci/scripts/check_lockfile.py
@@ -129,7 +129,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Setup Poetry - name: Setup Poetry
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
@@ -151,13 +151,13 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- name: Setup Poetry - name: Setup Poetry
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
@@ -187,20 +187,19 @@ jobs:
lint-crlf: lint-crlf:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check line endings - name: Check line endings
run: scripts-dev/check_line_terminators.sh run: scripts-dev/check_line_terminators.sh
lint-newsfile: lint-newsfile:
# Only run on pull_request events, targeting develop/release branches, and skip when the PR author is dependabot[bot]. if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
if: ${{ github.event_name == 'pull_request' && (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.event.pull_request.user.login != 'dependabot[bot]' }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: "3.x" python-version: "3.x"
- run: "pip install 'towncrier>=18.6.0rc1'" - run: "pip install 'towncrier>=18.6.0rc1'"
@@ -214,14 +213,14 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }} if: ${{ needs.changes.outputs.rust == 'true' }}
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
components: clippy components: clippy
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- run: cargo clippy -- -D warnings - run: cargo clippy -- -D warnings
@@ -233,14 +232,14 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }} if: ${{ needs.changes.outputs.rust == 'true' }}
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: nightly-2025-04-23 toolchain: nightly-2025-04-23
components: clippy components: clippy
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- run: cargo clippy --all-features -- -D warnings - run: cargo clippy --all-features -- -D warnings
@@ -251,13 +250,13 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- name: Setup Poetry - name: Setup Poetry
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
@@ -287,7 +286,7 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }} if: ${{ needs.changes.outputs.rust == 'true' }}
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
@@ -296,7 +295,7 @@ jobs:
# `.rustfmt.toml`. # `.rustfmt.toml`.
toolchain: nightly-2025-04-23 toolchain: nightly-2025-04-23
components: rustfmt components: rustfmt
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- run: cargo fmt --check - run: cargo fmt --check
@@ -307,8 +306,8 @@ jobs:
needs: changes needs: changes
if: ${{ needs.changes.outputs.linting_readme == 'true' }} if: ${{ needs.changes.outputs.linting_readme == 'true' }}
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: "3.x" python-version: "3.x"
- run: "pip install rstcheck" - run: "pip install rstcheck"
@@ -355,8 +354,8 @@ jobs:
needs: linting-done needs: linting-done
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: "3.x" python-version: "3.x"
- id: get-matrix - id: get-matrix
@@ -376,7 +375,7 @@ jobs:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }} job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- run: sudo apt-get -qq install xmlsec1 - run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }} - name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }} if: ${{ matrix.job.postgres-version }}
@@ -394,7 +393,7 @@ jobs:
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with: with:
@@ -432,13 +431,13 @@ jobs:
- changes - changes
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
# There aren't wheels for some of the older deps, so we need to install # There aren't wheels for some of the older deps, so we need to install
# their build dependencies # their build dependencies
@@ -447,17 +446,19 @@ jobs:
sudo apt-get -qq install build-essential libffi-dev python3-dev \ sudo apt-get -qq install build-essential libffi-dev python3-dev \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: '3.10' python-version: '3.10'
- name: Prepare old deps - name: Prepare old deps
# Note: we install using `uv` here, not poetry or pip to allow us to test with the if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
# minimum version of all dependencies, both those explicitly specified and those run: .ci/scripts/prepare_old_deps.sh
# implicitly brought in by the explicit dependencies.
run: | # Note: we install using `pip` here, not poetry. `poetry install` ignores the
pip install uv # build-system section (https://github.com/python-poetry/poetry/issues/6154), but
uv pip install --system --resolution=lowest .[all,test] # we explicitly want to test that you can `pip install` using the oldest version
# of poetry-core and setuptools-rust.
- run: pip install .[all,test]
# We nuke the local copy, as we've installed synapse into the virtualenv # We nuke the local copy, as we've installed synapse into the virtualenv
# (rather than use an editable install, which we no longer support). If we # (rather than use an editable install, which we no longer support). If we
@@ -495,7 +496,7 @@ jobs:
extras: ["all"] extras: ["all"]
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
# Install libs necessary for PyPy to build binary wheels for dependencies # Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev - run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
@@ -545,7 +546,7 @@ jobs:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }} job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Prepare test blacklist - name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
@@ -553,7 +554,7 @@ jobs:
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- name: Run SyTest - name: Run SyTest
run: /bootstrap.sh synapse run: /bootstrap.sh synapse
@@ -592,7 +593,7 @@ jobs:
--health-retries 5 --health-retries 5
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- run: sudo apt-get -qq install xmlsec1 postgresql-client - run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with: with:
@@ -636,7 +637,7 @@ jobs:
--health-retries 5 --health-retries 5
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Add PostgreSQL apt repository - name: Add PostgreSQL apt repository
# We need a version of pg_dump that can handle the version of # We need a version of pg_dump that can handle the version of
# PostgreSQL being tested against. The Ubuntu package repository lags # PostgreSQL being tested against. The Ubuntu package repository lags
@@ -691,7 +692,7 @@ jobs:
steps: steps:
- name: Checkout synapse codebase - name: Checkout synapse codebase
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
path: synapse path: synapse
@@ -699,12 +700,12 @@ jobs:
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- name: Prepare Complement's Prerequisites - name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
with: with:
cache-dependency-path: complement/go.sum cache-dependency-path: complement/go.sum
go-version-file: complement/go.mod go-version-file: complement/go.mod
@@ -727,13 +728,13 @@ jobs:
- changes - changes
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- run: cargo test - run: cargo test
@@ -747,13 +748,13 @@ jobs:
- changes - changes
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: nightly-2022-12-01 toolchain: nightly-2022-12-01
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- run: cargo bench --no-run - run: cargo bench --no-run

View File

@@ -22,7 +22,7 @@ jobs:
# This field is case-sensitive. # This field is case-sensitive.
TARGET_STATUS: Needs info TARGET_STATUS: Needs info
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
# Only clone the script file we care about, instead of the whole repo. # Only clone the script file we care about, instead of the whole repo.
sparse-checkout: .ci/scripts/triage_labelled_issue.sh sparse-checkout: .ci/scripts/triage_labelled_issue.sh

View File

@@ -43,13 +43,13 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with: with:
@@ -70,14 +70,14 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- run: sudo apt-get -qq install xmlsec1 - run: sudo apt-get -qq install xmlsec1
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with: with:
@@ -117,13 +117,13 @@ jobs:
- ${{ github.workspace }}:/src - ${{ github.workspace }}:/src
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- name: Patch dependencies - name: Patch dependencies
# Note: The poetry commands want to create a virtualenv in /src/.venv/, # Note: The poetry commands want to create a virtualenv in /src/.venv/,
@@ -175,14 +175,14 @@ jobs:
steps: steps:
- name: Run actions/checkout@v4 for synapse - name: Run actions/checkout@v4 for synapse
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
path: synapse path: synapse
- name: Prepare Complement's Prerequisites - name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
with: with:
cache-dependency-path: complement/go.sum cache-dependency-path: complement/go.sum
go-version-file: complement/go.mod go-version-file: complement/go.mod
@@ -217,7 +217,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2 - uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,107 +1,5 @@
# Synapse 1.144.0 (2025-12-09)
## Deprecation of MacOS Python wheels
The team has decided to deprecate and stop publishing python wheels for MacOS.
Synapse docker images will continue to work on MacOS, as will building Synapse
from source (though note this requires a Rust compiler).
## Unstable mutual rooms endpoint is now behind an experimental feature flag
Admins using the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`),
please check [the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/develop/docs/upgrade.md#upgrading-to-v11440) as this release contains changes
that disable that endpoint by default.
No significant changes since 1.144.0rc1.
# Synapse 1.144.0rc1 (2025-12-02)
Admins using the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`), please check [the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/develop/docs/upgrade.md#upgrading-to-v11440) as this release contains changes that disable that endpoint by default.
## Features
- Add experimentatal implememntation of [MSC4380](https://github.com/matrix-org/matrix-spec-proposals/pull/4380) (invite blocking). ([\#19203](https://github.com/element-hq/synapse/issues/19203))
- Allow restarting delayed event timeouts on workers. ([\#19207](https://github.com/element-hq/synapse/issues/19207))
## Bugfixes
- Fix a bug in the database function for fetching state deltas that could result in unnecessarily long query times. ([\#18960](https://github.com/element-hq/synapse/issues/18960))
- Fix v12 rooms when running with `use_frozen_dicts: True`. ([\#19235](https://github.com/element-hq/synapse/issues/19235))
- Fix bug where invalid `canonical_alias` content would return 500 instead of 400. ([\#19240](https://github.com/element-hq/synapse/issues/19240))
- Fix bug where `Duration` was logged incorrectly. ([\#19267](https://github.com/element-hq/synapse/issues/19267))
## Improved Documentation
- Document in the `--config-path` help how multiple files are merged - by merging them shallowly. ([\#19243](https://github.com/element-hq/synapse/issues/19243))
## Deprecations and Removals
- Stop building release wheels for MacOS. ([\#19225](https://github.com/element-hq/synapse/issues/19225))
## Internal Changes
- Improve event filtering for Simplified Sliding Sync. ([\#17782](https://github.com/element-hq/synapse/issues/17782))
- Export `SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES` environment variable from `scripts-dev/complement.sh`. ([\#19208](https://github.com/element-hq/synapse/issues/19208))
- Refactor `scripts-dev/complement.sh` logic to avoid `exit` to facilitate being able to source it from other scripts (composable). ([\#19209](https://github.com/element-hq/synapse/issues/19209))
- Expire sliding sync connections that are too old or have too much pending data. ([\#19211](https://github.com/element-hq/synapse/issues/19211))
- Require an experimental feature flag to be enabled in order for the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`) to be available. ([\#19219](https://github.com/element-hq/synapse/issues/19219))
- Prevent changelog check CI running on @dependabot's PRs even when a human has modified the branch. ([\#19220](https://github.com/element-hq/synapse/issues/19220))
- Auto-fix trailing spaces in multi-line strings and comments when running the lint script. ([\#19221](https://github.com/element-hq/synapse/issues/19221))
- Move towards using a dedicated `Duration` type. ([\#19223](https://github.com/element-hq/synapse/issues/19223), [\#19229](https://github.com/element-hq/synapse/issues/19229))
- Improve robustness of the SQL schema linting in CI. ([\#19224](https://github.com/element-hq/synapse/issues/19224))
- Add log to determine whether clients are using `/messages` as expected. ([\#19226](https://github.com/element-hq/synapse/issues/19226))
- Simplify README and add ESS Getting started section. ([\#19228](https://github.com/element-hq/synapse/issues/19228), [\#19259](https://github.com/element-hq/synapse/issues/19259))
- Add a unit test for ensuring associated refresh tokens are erased when a device is deleted. ([\#19230](https://github.com/element-hq/synapse/issues/19230))
- Prompt user to consider adding future deprecations to the changelog in release script. ([\#19239](https://github.com/element-hq/synapse/issues/19239))
- Fix check of the Rust compiled code being outdated when using source checkout and `.egg-info`. ([\#19251](https://github.com/element-hq/synapse/issues/19251))
- Stop building macos wheels in CI pipeline. ([\#19263](https://github.com/element-hq/synapse/issues/19263))
### Updates to locked dependencies
* Bump Swatinem/rust-cache from 2.8.1 to 2.8.2. ([\#19244](https://github.com/element-hq/synapse/issues/19244))
* Bump actions/checkout from 5.0.0 to 6.0.0. ([\#19213](https://github.com/element-hq/synapse/issues/19213))
* Bump actions/setup-go from 6.0.0 to 6.1.0. ([\#19214](https://github.com/element-hq/synapse/issues/19214))
* Bump actions/setup-python from 6.0.0 to 6.1.0. ([\#19245](https://github.com/element-hq/synapse/issues/19245))
* Bump attrs from 25.3.0 to 25.4.0. ([\#19215](https://github.com/element-hq/synapse/issues/19215))
* Bump docker/metadata-action from 5.9.0 to 5.10.0. ([\#19246](https://github.com/element-hq/synapse/issues/19246))
* Bump http from 1.3.1 to 1.4.0. ([\#19249](https://github.com/element-hq/synapse/issues/19249))
* Bump pydantic from 2.12.4 to 2.12.5. ([\#19250](https://github.com/element-hq/synapse/issues/19250))
* Bump pyopenssl from 25.1.0 to 25.3.0. ([\#19248](https://github.com/element-hq/synapse/issues/19248))
* Bump rpds-py from 0.28.0 to 0.29.0. ([\#19216](https://github.com/element-hq/synapse/issues/19216))
* Bump rpds-py from 0.29.0 to 0.30.0. ([\#19247](https://github.com/element-hq/synapse/issues/19247))
* Bump sentry-sdk from 2.44.0 to 2.46.0. ([\#19218](https://github.com/element-hq/synapse/issues/19218))
* Bump types-bleach from 6.2.0.20250809 to 6.3.0.20251115. ([\#19217](https://github.com/element-hq/synapse/issues/19217))
* Bump types-jsonschema from 4.25.1.20250822 to 4.25.1.20251009. ([\#19252](https://github.com/element-hq/synapse/issues/19252))
# Synapse 1.143.0 (2025-11-25)
## Dropping support for PostgreSQL 13
In line with our [deprecation policy](https://github.com/element-hq/synapse/blob/develop/docs/deprecation_policy.md), we've dropped
support for PostgreSQL 13, as it is no longer supported upstream.
This release of Synapse requires PostgreSQL 14+.
No significant changes since 1.143.0rc2.
# Synapse 1.143.0rc2 (2025-11-18) # Synapse 1.143.0rc2 (2025-11-18)
## Dropping support for PostgreSQL 13
In line with our [deprecation policy](https://github.com/element-hq/synapse/blob/develop/docs/deprecation_policy.md), we've dropped
support for PostgreSQL 13, as it is no longer supported upstream.
This release of Synapse requires PostgreSQL 14+.
## Internal Changes ## Internal Changes
- Fixes docker image creation in the release workflow. - Fixes docker image creation in the release workflow.
@@ -110,6 +8,12 @@ This release of Synapse requires PostgreSQL 14+.
# Synapse 1.143.0rc1 (2025-11-18) # Synapse 1.143.0rc1 (2025-11-18)
## Dropping support for PostgreSQL 13
In line with our [deprecation policy](https://github.com/element-hq/synapse/blob/develop/docs/deprecation_policy.md), we've dropped
support for PostgreSQL 13, as it is no longer supported upstream.
This release of Synapse requires PostgreSQL 14+.
## Features ## Features
- Support multiple config files in `register_new_matrix_user`. ([\#18784](https://github.com/element-hq/synapse/issues/18784)) - Support multiple config files in `register_new_matrix_user`. ([\#18784](https://github.com/element-hq/synapse/issues/18784))

5
Cargo.lock generated
View File

@@ -374,11 +374,12 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]] [[package]]
name = "http" name = "http"
version = "1.4.0" version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565"
dependencies = [ dependencies = [
"bytes", "bytes",
"fnv",
"itoa", "itoa",
] ]

View File

@@ -7,48 +7,170 @@
Synapse is an open source `Matrix <https://matrix.org>`__ homeserver Synapse is an open source `Matrix <https://matrix.org>`__ homeserver
implementation, written and maintained by `Element <https://element.io>`_. implementation, written and maintained by `Element <https://element.io>`_.
`Matrix <https://github.com/matrix-org>`__ is the open standard for secure and `Matrix <https://github.com/matrix-org>`__ is the open standard for
interoperable real-time communications. You can directly run and manage the secure and interoperable real-time communications. You can directly run
source code in this repository, available under an AGPL license (or and manage the source code in this repository, available under an AGPL
alternatively under a commercial license from Element). license (or alternatively under a commercial license from Element).
There is no support provided by Element unless you have a
subscription from Element.
There is no support provided by Element unless you have a subscription from Subscription
Element. ============
🚀 Getting started For those that need an enterprise-ready solution, Element
================== Server Suite (ESS) is `available via subscription <https://element.io/pricing>`_.
ESS builds on Synapse to offer a complete Matrix-based backend including the full
`Admin Console product <https://element.io/enterprise-functionality/admin-console>`_,
giving admins the power to easily manage an organization-wide
deployment. It includes advanced identity management, auditing,
moderation and data retention options as well as Long-Term Support and
SLAs. ESS supports any Matrix-compatible client.
This component is developed and maintained by `Element <https://element.io>`_. .. contents::
It gets shipped as part of the **Element Server Suite (ESS)** which provides the
official means of deployment.
ESS is a Matrix distribution from Element with focus on quality and ease of use. 🛠️ Installation and configuration
It ships a full Matrix stack tailored to the respective use case. ==================================
There are three editions of ESS: The Synapse documentation describes `how to install Synapse <https://element-hq.github.io/synapse/latest/setup/installation.html>`_. We recommend using
`Docker images <https://element-hq.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org
<https://element-hq.github.io/synapse/latest/setup/installation.html#matrixorg-packages>`_.
- `ESS Community <https://github.com/element-hq/ess-helm>`_ - the free Matrix .. _federation:
distribution from Element tailored to small-/mid-scale, non-commercial
community use cases Synapse has a variety of `config options
- `ESS Pro <https://element.io/server-suite>`_ - the commercial Matrix <https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html>`_
distribution from Element for professional use which can be used to customise its behaviour after installation.
- `ESS TI-M <https://element.io/server-suite/ti-messenger>`_ - a special version There are additional details on how to `configure Synapse for federation here
of ESS Pro focused on the requirements of TI-Messenger Pro and ePA as <https://element-hq.github.io/synapse/latest/federate.html>`_.
specified by the German National Digital Health Agency Gematik
.. _reverse-proxy:
Using a reverse proxy with Synapse
----------------------------------
It is recommended to put a reverse proxy such as
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
`HAProxy <https://www.haproxy.org/>`_ or
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
doing so is that it means that you can expose the default https port (443) to
Matrix clients without needing to run Synapse with root privileges.
For information on configuring one, see `the reverse proxy docs
<https://element-hq.github.io/synapse/latest/reverse_proxy.html>`_.
Upgrading an existing Synapse
-----------------------------
The instructions for upgrading Synapse are in `the upgrade notes`_.
Please check these instructions as upgrading may require extra steps for some
versions of Synapse.
.. _the upgrade notes: https://element-hq.github.io/synapse/develop/upgrade.html
🛠️ Standalone installation and configuration Platform dependencies
============================================ ---------------------
The Synapse documentation describes `options for installing Synapse standalone Synapse uses a number of platform dependencies such as Python and PostgreSQL,
<https://element-hq.github.io/synapse/latest/setup/installation.html>`_. See and aims to follow supported upstream versions. See the
below for more useful documentation links. `deprecation policy <https://element-hq.github.io/synapse/latest/deprecation_policy.html>`_
for more details.
- `Synapse configuration options <https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html>`_
- `Synapse configuration for federation <https://element-hq.github.io/synapse/latest/federate.html>`_
- `Using a reverse proxy with Synapse <https://element-hq.github.io/synapse/latest/reverse_proxy.html>`_
- `Upgrading Synapse <https://element-hq.github.io/synapse/develop/upgrade.html>`_
Security note
-------------
Matrix serves raw, user-supplied data in some APIs -- specifically the `content
repository endpoints`_.
.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid
Whilst we make a reasonable effort to mitigate against XSS attacks (for
instance, by using `CSP`_), a Matrix homeserver should not be hosted on a
domain hosting other web applications. This especially applies to sharing
the domain with Matrix web clients and other sensitive applications like
webmail. See
https://developer.github.com/changes/2014-04-25-user-content-security for more
information.
.. _CSP: https://github.com/matrix-org/synapse/pull/1021
Ideally, the homeserver should not simply be on a different subdomain, but on
a completely different `registered domain`_ (also known as top-level site or
eTLD+1). This is because `some attacks`_ are still possible as long as the two
applications share the same registered domain.
.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3
.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie
To illustrate this with an example, if your Element Web or other sensitive web
application is hosted on ``A.example1.com``, you should ideally host Synapse on
``example2.com``. Some amount of protection is offered by hosting on
``B.example1.com`` instead, so this is also acceptable in some scenarios.
However, you should *not* host your Synapse on ``A.example1.com``.
Note that all of the above refers exclusively to the domain used in Synapse's
``public_baseurl`` setting. In particular, it has no bearing on the domain
mentioned in MXIDs hosted on that server.
Following this advice ensures that even if an XSS is found in Synapse, the
impact to other applications will be minimal.
🧪 Testing a new installation
=============================
The easiest way to try out your new Synapse installation is by connecting to it
from a web client.
Unless you are running a test instance of Synapse on your local machine, in
general, you will need to enable TLS support before you can successfully
connect from a client: see
`TLS certificates <https://element-hq.github.io/synapse/latest/setup/installation.html#tls-certificates>`_.
An easy way to get started is to login or register via Element at
https://app.element.io/#/login or https://app.element.io/#/register respectively.
You will need to change the server you are logging into from ``matrix.org``
and instead specify a homeserver URL of ``https://<server_name>:8448``
(or just ``https://<server_name>`` if you are using a reverse proxy).
If you prefer to use another client, refer to our
`client breakdown <https://matrix.org/ecosystem/clients/>`_.
If all goes well you should at least be able to log in, create a room, and
start sending messages.
.. _`client-user-reg`:
Registering a new user from a client
------------------------------------
By default, registration of new users via Matrix clients is disabled. To enable
it:
1. In the
`registration config section <https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#registration>`_
set ``enable_registration: true`` in ``homeserver.yaml``.
2. Then **either**:
a. set up a `CAPTCHA <https://element-hq.github.io/synapse/latest/CAPTCHA_SETUP.html>`_, or
b. set ``enable_registration_without_verification: true`` in ``homeserver.yaml``.
We **strongly** recommend using a CAPTCHA, particularly if your homeserver is exposed to
the public internet. Without it, anyone can freely register accounts on your homeserver.
This can be exploited by attackers to create spambots targeting the rest of the Matrix
federation.
Your new Matrix ID will be formed partly from the ``server_name``, and partly
from a localpart you specify when you create the account in the form of::
@localpart:my.domain.name
(pronounced "at localpart on my dot domain dot name").
As when logging in, you will need to specify a "Custom server". Specify your
desired ``localpart`` in the 'Username' box.
🎯 Troubleshooting and support 🎯 Troubleshooting and support
============================== ==============================
@@ -60,7 +182,7 @@ Enterprise quality support for Synapse including SLAs is available as part of an
`Element Server Suite (ESS) <https://element.io/pricing>`_ subscription. `Element Server Suite (ESS) <https://element.io/pricing>`_ subscription.
If you are an existing ESS subscriber then you can raise a `support request <https://ems.element.io/support>`_ If you are an existing ESS subscriber then you can raise a `support request <https://ems.element.io/support>`_
and access the `Element product documentation <https://docs.element.io>`_. and access the `knowledge base <https://ems-docs.element.io>`_.
🤝 Community support 🤝 Community support
-------------------- --------------------
@@ -79,6 +201,35 @@ issues for support requests, only for bug reports and feature requests.
.. |docs| replace:: ``docs`` .. |docs| replace:: ``docs``
.. _docs: docs .. _docs: docs
🪪 Identity Servers
===================
Identity servers have the job of mapping email addresses and other 3rd Party
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
before creating that mapping.
**Identity servers do not store accounts or credentials - these are stored and managed on homeservers.
Identity Servers are just for mapping 3rd Party IDs to Matrix IDs.**
This process is highly security-sensitive, as there is an obvious risk of spam if it
is too easy to sign up for Matrix accounts or harvest 3PID data. In the longer
term, we hope to create a decentralised system to manage it (`matrix-doc #712
<https://github.com/matrix-org/matrix-doc/issues/712>`_), but in the meantime,
the role of managing trusted identity in the Matrix ecosystem is farmed out to
a cluster of known trusted ecosystem partners, who run 'Matrix Identity
Servers' such as `Sydent <https://github.com/matrix-org/sydent>`_, whose role
is purely to authenticate and track 3PID logins and publish end-user public
keys.
You can host your own copy of Sydent, but this will prevent you reaching other
users in the Matrix ecosystem via their email address, and prevent them finding
you. We therefore recommend that you use one of the centralised identity servers
at ``https://matrix.org`` or ``https://vector.im`` for now.
To reiterate: the Identity server will only be used if you choose to associate
an email address with your account, or send an invite to another user via their
email address.
🛠️ Development 🛠️ Development
============== ==============
@@ -101,29 +252,20 @@ Alongside all that, join our developer community on Matrix:
Copyright and Licensing Copyright and Licensing
======================= =======================
| Copyright 20142017 OpenMarket Ltd | Copyright 2014-2017 OpenMarket Ltd
| Copyright 2017 Vector Creations Ltd | Copyright 2017 Vector Creations Ltd
| Copyright 20172025 New Vector Ltd | Copyright 2017-2025 New Vector Ltd
| Copyright 2025 Element Creations Ltd |
This software is dual-licensed by Element Creations Ltd (Element). It can be This software is dual-licensed by New Vector Ltd (Element). It can be used either:
used either:
(1) for free under the terms of the GNU Affero General Public License (as (1) for free under the terms of the GNU Affero General Public License (as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version); OR
published by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version); OR
(2) under the terms of a paid-for Element Commercial License agreement between (2) under the terms of a paid-for Element Commercial License agreement between you and Element (the terms of which may vary depending on what you and Element have agreed to).
you and Element (the terms of which may vary depending on what you and
Element have agreed to).
Unless required by applicable law or agreed to in writing, software distributed Unless required by applicable law or agreed to in writing, software distributed under the Licenses is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the Licenses for the specific language governing permissions and limitations under the Licenses.
under the Licenses is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the Licenses for the
specific language governing permissions and limitations under the Licenses.
Please contact `licensing@element.io <mailto:licensing@element.io>`_ to purchase Please contact `licensing@element.io <mailto:licensing@element.io>`_ to purchase an Element commercial license for this software.
an Element commercial license for this software.
.. |support| image:: https://img.shields.io/badge/matrix-community%20support-success .. |support| image:: https://img.shields.io/badge/matrix-community%20support-success

View File

@@ -1 +0,0 @@
Group together dependabot update PRs to reduce the review load.

View File

@@ -1 +0,0 @@
Fix `HomeServer.shutdown()` failing if the homeserver hasn't been setup yet.

View File

@@ -1 +0,0 @@
Fix sliding sync performance slow down for long lived connections.

1
changelog.d/19209.misc Normal file
View File

@@ -0,0 +1 @@
Refactor `scripts-dev/complement.sh` logic to avoid `exit` to facilitate being able to source it from other scripts (composable).

View File

@@ -1 +0,0 @@
Respond with useful error codes with `Content-Length` header/s are invalid.

View File

@@ -1 +0,0 @@
Fix a bug where Mastodon posts (and possibly other embeds) have the wrong description for URL previews.

View File

@@ -1 +0,0 @@
Fix `HomeServer.shutdown()` failing if the homeserver failed to `start`.

View File

@@ -1 +0,0 @@
Switch the build backend from `poetry-core` to `maturin`.

View File

@@ -1 +0,0 @@
Raise the limit for concurrently-open non-security @dependabot PRs from 5 to 10.

View File

@@ -1 +0,0 @@
Remove the "Updates to locked dependencies" section from the changelog due to lack of use and the maintenance burden.

View File

@@ -1 +0,0 @@
Require 14 days to pass before pulling in general dependency updates to help mitigate upstream supply chain attacks.

View File

@@ -1 +0,0 @@
Add `memberships` endpoint to the admin API. This is useful for forensics and T&S purpose.

View File

@@ -1 +0,0 @@
Drop the broken netlify documentation workflow until a new one is implemented.

View File

@@ -1 +0,0 @@
Fix bug where `Duration` was logged incorrectly.

View File

@@ -1 +0,0 @@
Add an admin API for retrieving a paginated list of quarantined media.

View File

@@ -1 +0,0 @@
Document the importance of `public_baseurl` when configuring OpenID Connect authentication.

View File

@@ -1 +0,0 @@
Fix bug introduced in 1.143.0 that broke support for versions of `zope-interface` older than 6.2.

View File

@@ -1 +0,0 @@
Server admins can bypass the quarantine media check when downloading media by setting the `admin_unsafely_bypass_quarantine` query parameter to `true` on Client-Server API media download requests.

View File

@@ -1 +0,0 @@
Don't include debug logs in `Clock` unless explicitly enabled.

View File

@@ -1 +0,0 @@
Implemented pagination for the [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) mutual rooms endpoint. Contributed by @tulir @ Beeper.

View File

@@ -1 +0,0 @@
Admin API: add worker support to `GET /_synapse/admin/v2/users/<user_id>`.

View File

@@ -1 +0,0 @@
Use `uv` to test olddeps to ensure all transitive dependencies use minimum versions.

View File

@@ -1 +0,0 @@
Improve proxy support for the `federation_client.py` dev script. Contributed by Denis Kasak (@dkasak).

View File

@@ -1 +0,0 @@
Unpin the version of Rust we use to build Synapse wheels (was 1.82.0) now that MacOS support has been dropped.

View File

@@ -1 +0,0 @@
Add experimental support for the [MSC4370](https://github.com/matrix-org/matrix-spec-proposals/pull/4370) Federation API `GET /extremities` endpoint.

18
debian/changelog vendored
View File

@@ -1,21 +1,3 @@
matrix-synapse-py3 (1.144.0) stable; urgency=medium
* New Synapse release 1.144.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 09 Dec 2025 08:30:40 -0700
matrix-synapse-py3 (1.144.0~rc1) stable; urgency=medium
* New Synapse release 1.144.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Dec 2025 09:11:19 -0700
matrix-synapse-py3 (1.143.0) stable; urgency=medium
* New Synapse release 1.143.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Nov 2025 08:44:56 -0700
matrix-synapse-py3 (1.143.0~rc2) stable; urgency=medium matrix-synapse-py3 (1.143.0~rc2) stable; urgency=medium
* New Synapse release 1.143.0rc2. * New Synapse release 1.143.0rc2.

View File

@@ -196,7 +196,6 @@ WORKERS_CONFIG: dict[str, dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload", "^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload",
"^/_matrix/client/(api/v1|r0|v3|unstable)/keys/device_signing/upload$", "^/_matrix/client/(api/v1|r0|v3|unstable)/keys/device_signing/upload$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/keys/signatures/upload$", "^/_matrix/client/(api/v1|r0|v3|unstable)/keys/signatures/upload$",
"^/_matrix/client/unstable/org.matrix.msc4140/delayed_events(/.*/restart)?$",
], ],
"shared_extra_conf": {}, "shared_extra_conf": {},
"worker_extra_conf": "", "worker_extra_conf": "",

View File

@@ -5,7 +5,6 @@
# Setup # Setup
- [Installation](setup/installation.md) - [Installation](setup/installation.md)
- [Security](setup/security.md)
- [Using Postgres](postgres.md) - [Using Postgres](postgres.md)
- [Configuring a Reverse Proxy](reverse_proxy.md) - [Configuring a Reverse Proxy](reverse_proxy.md)
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md) - [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)

View File

@@ -73,33 +73,6 @@ Response:
} }
``` ```
## Listing all quarantined media
This API returns a list of all quarantined media on the server. It is paginated, and can be scoped to either local or
remote media. Note that the pagination values are also scoped to the request parameters - changing them but keeping the
same pagination values will result in unexpected results.
Request:
```http
GET /_synapse/admin/v1/media/quarantined?from=0&limit=100&kind=local
```
`from` and `limit` are optional parameters, and default to `0` and `100` respectively. They are the row index and number
of rows to return - they are not timestamps.
`kind` *MUST* either be `local` or `remote`.
The API returns a JSON body containing MXC URIs for the quarantined media, like the following:
```json
{
"media": [
"mxc://localhost/xwvutsrqponmlkjihgfedcba",
"mxc://localhost/abcdefghijklmnopqrstuvwx"
]
}
```
# Quarantine media # Quarantine media
Quarantining media means that it is marked as inaccessible by users. It applies Quarantining media means that it is marked as inaccessible by users. It applies
@@ -115,20 +88,6 @@ is quarantined, Synapse will:
- Quarantine any existing cached remote media. - Quarantine any existing cached remote media.
- Quarantine any future remote media. - Quarantine any future remote media.
## Downloading quarantined media
Normally, when media is quarantined, it will return a 404 error when downloaded.
Admins can bypass this by adding `?admin_unsafely_bypass_quarantine=true`
to the [normal download URL](https://spec.matrix.org/v1.16/client-server-api/#get_matrixclientv1mediadownloadservernamemediaid).
Bypassing the quarantine check is not recommended. Media is typically quarantined
to prevent harmful content from being served to users, which includes admins. Only
set the bypass parameter if you intentionally want to access potentially harmful
content.
Non-admin users cannot bypass quarantine checks, even when specifying the above
query parameter.
## Quarantining media by ID ## Quarantining media by ID
This API quarantines a single piece of local or remote media. This API quarantines a single piece of local or remote media.

View File

@@ -505,55 +505,6 @@ with a body of:
} }
``` ```
## List room memberships of a user
Gets a list of room memberships for a specific `user_id`. This
endpoint differs from
[`GET /_synapse/admin/v1/users/<user_id>/joined_rooms`](#list-joined-rooms-of-a-user)
in that it returns rooms with memberships other than "join".
The API is:
```
GET /_synapse/admin/v1/users/<user_id>/memberships
```
A response body like the following is returned:
```json
{
"memberships": {
"!DuGcnbhHGaSZQoNQR:matrix.org": "join",
"!ZtSaPCawyWtxfWiIy:matrix.org": "leave",
}
}
```
which is a list of room membership states for the given user. This endpoint can
be used with both local and remote users, with the caveat that the homeserver will
only be aware of the memberships for rooms that one of its local users has joined.
Remote user memberships may also be out of date if all local users have since left
a room. The homeserver will thus no longer receive membership updates about it.
The list includes rooms that the user has since left; other membership states (knock,
invite, etc.) are also possible.
Note that rooms will only disappear from this list if they are
[purged](./rooms.md#delete-room-api) from the homeserver.
**Parameters**
The following parameters should be set in the URL:
- `user_id` - fully qualified: for example, `@user:server.com`.
**Response**
The following fields are returned in the JSON response body:
- `memberships` - A map of `room_id` (string) to `membership` state (string).
## List joined rooms of a user ## List joined rooms of a user
Gets a list of all `room_id` that a specific `user_id` is joined to and is a member of (participating in). Gets a list of all `room_id` that a specific `user_id` is joined to and is a member of (participating in).

View File

@@ -50,11 +50,6 @@ setting in your configuration file.
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
the text below for example configurations for specific providers. the text below for example configurations for specific providers.
For setups using [`.well-known` delegation](delegate.md), make sure
[`public_baseurl`](usage/configuration/config_documentation.md#public_baseurl) is set
appropriately. If unset, Synapse defaults to `https://<server_name>/` which is used in
the OIDC callback URL.
## OIDC Back-Channel Logout ## OIDC Back-Channel Logout
Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications. Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications.

View File

@@ -16,15 +16,8 @@ that your email address is probably `user@example.com` rather than
`user@email.example.com`) - but doing so may require more advanced setup: see `user@email.example.com`) - but doing so may require more advanced setup: see
[Setting up Federation](../federate.md). [Setting up Federation](../federate.md).
⚠️ Before setting up Synapse please consult the [security page](security.md) for
best practices. ⚠️
## Installing Synapse ## Installing Synapse
Note: Synapse uses a number of platform dependencies such as Python and PostgreSQL,
and aims to follow supported upstream versions. See the [deprecation
policy](../deprecation_policy.md) for more details.
### Prebuilt packages ### Prebuilt packages
Prebuilt packages are available for a number of platforms. These are recommended Prebuilt packages are available for a number of platforms. These are recommended

View File

@@ -1,41 +0,0 @@
# Security
This page lays out security best-practices when running Synapse.
If you believe you have encountered a security issue, see our [Security
Disclosure Policy](https://element.io/en/security/security-disclosure-policy).
## Content repository
Matrix serves raw, user-supplied data in some APIs — specifically the [content
repository endpoints](https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid).
Whilst we make a reasonable effort to mitigate against XSS attacks (for
instance, by using [CSP](https://github.com/matrix-org/synapse/pull/1021)), a
Matrix homeserver should not be hosted on a domain hosting other web
applications. This especially applies to sharing the domain with Matrix web
clients and other sensitive applications like webmail. See
https://developer.github.com/changes/2014-04-25-user-content-security for more
information.
Ideally, the homeserver should not simply be on a different subdomain, but on a
completely different [registered
domain](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3)
(also known as top-level site or eTLD+1). This is because [some
attacks](https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie)
are still possible as long as the two applications share the same registered
domain.
To illustrate this with an example, if your Element Web or other sensitive web
application is hosted on `A.example1.com`, you should ideally host Synapse on
`example2.com`. Some amount of protection is offered by hosting on
`B.example1.com` instead, so this is also acceptable in some scenarios.
However, you should *not* host your Synapse on `A.example1.com`.
Note that all of the above refers exclusively to the domain used in Synapse's
`public_baseurl` setting. In particular, it has no bearing on the domain
mentioned in MXIDs hosted on that server.
Following this advice ensures that even if an XSS is found in Synapse, the
impact to other applications will be minimal.

View File

@@ -117,25 +117,6 @@ each upgrade are complete before moving on to the next upgrade, to avoid
stacking them up. You can monitor the currently running background updates with stacking them up. You can monitor the currently running background updates with
[the Admin API](usage/administration/admin_api/background_updates.html#status). [the Admin API](usage/administration/admin_api/background_updates.html#status).
# Upgrading to v1.144.0
## Worker support for unstable MSC4140 `/restart` endpoint
The following unstable endpoint pattern may now be routed to worker processes:
```
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events/.*/restart$
```
## Unstable mutual rooms endpoint is now behind an experimental feature flag
The unstable mutual rooms endpoint from
[MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)
(`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`) is now
disabled by default. If you rely on this unstable endpoint, you must now set
`experimental_features.msc2666_enabled: true` in your configuration to keep
using it.
# Upgrading to v1.143.0 # Upgrading to v1.143.0
## Dropping support for PostgreSQL 13 ## Dropping support for PostgreSQL 13

View File

@@ -255,8 +255,6 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$ ^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
^/_matrix/client/(r0|v3|unstable)/capabilities$ ^/_matrix/client/(r0|v3|unstable)/capabilities$
^/_matrix/client/(r0|v3|unstable)/notifications$ ^/_matrix/client/(r0|v3|unstable)/notifications$
# Admin API requests
^/_synapse/admin/v1/rooms/[^/]+$ ^/_synapse/admin/v1/rooms/[^/]+$
# Encryption requests # Encryption requests
@@ -287,13 +285,10 @@ information.
# User directory search requests # User directory search requests
^/_matrix/client/(r0|v3|unstable)/user_directory/search$ ^/_matrix/client/(r0|v3|unstable)/user_directory/search$
# Unstable MSC4140 support
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events(/.*/restart)?$
Additionally, the following REST endpoints can be handled for GET requests: Additionally, the following REST endpoints can be handled for GET requests:
# Push rules requests
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/ ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events
# Account data requests # Account data requests
^/_matrix/client/(r0|v3|unstable)/.*/tags ^/_matrix/client/(r0|v3|unstable)/.*/tags
@@ -302,9 +297,6 @@ Additionally, the following REST endpoints can be handled for GET requests:
# Presence requests # Presence requests
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
# Admin API requests
^/_synapse/admin/v2/users/[^/]+$
Pagination requests can also be handled, but all requests for a given Pagination requests can also be handled, but all requests for a given
room must be routed to the same instance. Additionally, care must be taken to room must be routed to the same instance. Additionally, care must be taken to
ensure that the purge history admin API is not used while pagination requests ensure that the purge history admin API is not used while pagination requests

913
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[project] [project]
name = "matrix-synapse" name = "matrix-synapse"
version = "1.144.0" version = "1.143.0rc2"
description = "Homeserver for the Matrix decentralised comms protocol" description = "Homeserver for the Matrix decentralised comms protocol"
readme = "README.rst" readme = "README.rst"
authors = [ authors = [
@@ -42,8 +42,7 @@ dependencies = [
"Twisted[tls]>=21.2.0", "Twisted[tls]>=21.2.0",
"treq>=21.5.0", "treq>=21.5.0",
# Twisted has required pyopenssl 16.0 since about Twisted 16.6. # Twisted has required pyopenssl 16.0 since about Twisted 16.6.
# pyOpenSSL 16.2.0 fixes compatibility with OpenSSL 1.1.0. "pyOpenSSL>=16.0.0",
"pyOpenSSL>=16.2.0",
"PyYAML>=5.3", "PyYAML>=5.3",
"pyasn1>=0.1.9", "pyasn1>=0.1.9",
"pyasn1-modules>=0.0.7", "pyasn1-modules>=0.0.7",
@@ -96,25 +95,6 @@ dependencies = [
# This is used for parsing multipart responses # This is used for parsing multipart responses
"python-multipart>=0.0.9", "python-multipart>=0.0.9",
# Transitive dependency constraints
# These dependencies aren't directly required by Synapse.
# However, in order for Synapse to build, Synapse requires a higher minimum version
# for these dependencies than the minimum specified by the direct dependency.
# We should periodically check to see if these dependencies are still necessary and
# remove any that are no longer required.
"cffi>=1.15", # via cryptography
"pynacl>=1.3", # via signedjson
"pyparsing>=2.4", # via packaging
"pyrsistent>=0.18.0", # via jsonschema
"requests>=2.16.0", # 2.16.0+ no longer vendors urllib3, avoiding Python 3.10+ incompatibility
"urllib3>=1.26.5", # via treq; 1.26.5 fixes Python 3.10+ collections.abc compatibility
# 5.2 is the current version in Debian oldstable. If we don't care to support that, then 5.4 is
# the minimum version from Ubuntu 22.04 and RHEL 9. (as of 2025-12)
# When bumping this version to 6.2 or above, refer to https://github.com/element-hq/synapse/pull/19274
# for details of Synapse improvements that may be unlocked. Particularly around the use of `|`
# syntax with zope interface types.
"zope-interface>=5.2", # via twisted
] ]
[project.optional-dependencies] [project.optional-dependencies]
@@ -124,16 +104,7 @@ postgres = [
"psycopg2cffi>=2.8;platform_python_implementation == 'PyPy'", "psycopg2cffi>=2.8;platform_python_implementation == 'PyPy'",
"psycopg2cffi-compat==1.1;platform_python_implementation == 'PyPy'", "psycopg2cffi-compat==1.1;platform_python_implementation == 'PyPy'",
] ]
saml2 = [ saml2 = ["pysaml2>=4.5.0"]
"pysaml2>=4.5.0",
# Transitive dependencies from pysaml2
# These dependencies aren't directly required by Synapse.
# However, in order for Synapse to build, Synapse requires a higher minimum version
# for these dependencies than the minimum specified by the direct dependency.
"defusedxml>=0.7.1", # via pysaml2
"pytz>=2018.3", # via pysaml2
]
oidc = ["authlib>=0.15.1"] oidc = ["authlib>=0.15.1"]
# systemd-python is necessary for logging to the systemd journal via # systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in # `systemd.journal.JournalHandler`, as is documented in
@@ -141,25 +112,15 @@ oidc = ["authlib>=0.15.1"]
systemd = ["systemd-python>=231"] systemd = ["systemd-python>=231"]
url-preview = ["lxml>=4.6.3"] url-preview = ["lxml>=4.6.3"]
sentry = ["sentry-sdk>=0.7.2"] sentry = ["sentry-sdk>=0.7.2"]
opentracing = [ opentracing = ["jaeger-client>=4.2.0", "opentracing>=2.2.0"]
"jaeger-client>=4.2.0",
"opentracing>=2.2.0",
# Transitive dependencies from jaeger-client
# These dependencies aren't directly required by Synapse.
# However, in order for Synapse to build, Synapse requires a higher minimum version
# for these dependencies than the minimum specified by the direct dependency.
"thrift>=0.10", # via jaeger-client
"tornado>=6.0", # via jaeger-client
]
jwt = ["authlib"] jwt = ["authlib"]
# hiredis is not a *strict* dependency, but it makes things much faster. # hiredis is not a *strict* dependency, but it makes things much faster.
# (if it is not installed, we fall back to slow code.) # (if it is not installed, we fall back to slow code.)
redis = ["txredisapi>=1.4.7", "hiredis>=0.3"] redis = ["txredisapi>=1.4.7", "hiredis"]
# Required to use experimental `caches.track_memory_usage` config option. # Required to use experimental `caches.track_memory_usage` config option.
cache-memory = ["pympler>=1.0"] cache-memory = ["pympler"]
# If this is updated, don't forget to update the equivalent lines in # If this is updated, don't forget to update the equivalent lines in
# `dependency-groups.dev` below. # tool.poetry.group.dev.dependencies.
test = ["parameterized>=0.9.0", "idna>=3.3"] test = ["parameterized>=0.9.0", "idna>=3.3"]
# The duplication here is awful. # The duplication here is awful.
@@ -188,22 +149,12 @@ all = [
# opentracing # opentracing
"jaeger-client>=4.2.0", "opentracing>=2.2.0", "jaeger-client>=4.2.0", "opentracing>=2.2.0",
# redis # redis
"txredisapi>=1.4.7", "hiredis>=0.3", "txredisapi>=1.4.7", "hiredis",
# cache-memory # cache-memory
# 1.0 added support for python 3.10, our current minimum supported python version "pympler",
"pympler>=1.0",
# omitted: # omitted:
# - test: it's useful to have this separate from dev deps in the olddeps job # - test: it's useful to have this separate from dev deps in the olddeps job
# - systemd: this is a system-based requirement # - systemd: this is a system-based requirement
# Transitive dependencies
# These dependencies aren't directly required by Synapse.
# However, in order for Synapse to build, Synapse requires a higher minimum version
# for these dependencies than the minimum specified by the direct dependency.
"defusedxml>=0.7.1", # via pysaml2
"pytz>=2018.3", # via pysaml2
"thrift>=0.10", # via jaeger-client
"tornado>=6.0", # via jaeger-client
] ]
[project.urls] [project.urls]
@@ -226,85 +177,6 @@ synapse_port_db = "synapse._scripts.synapse_port_db:main"
synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main" synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main" update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.poetry]
packages = [{ include = "synapse" }]
[tool.poetry.build]
# Compile our rust module when using `poetry install`. This is still required
# while using `poetry` as the build frontend. Saves the developer from needing
# to run both:
#
# $ poetry install
# $ maturin develop
script = "build_rust.py"
# Create a `setup.py` file which will call the `build` method in our build
# script.
#
# Our build script currently uses the "old" build method, where we define a
# `build` method and `setup.py` calls it. Poetry developers have mentioned that
# this will eventually be removed:
# https://github.com/matrix-org/synapse/pull/14949#issuecomment-1418001859
#
# The new build method is defined here:
# https://python-poetry.org/docs/building-extension-modules/#maturin-build-script
# but is still marked as "unstable" at the time of writing. This would also
# bump our minimum `poetry-core` version to 1.5.0.
#
# We can just drop this work-around entirely if migrating away from
# Poetry, thus there's little motivation to update the build script.
generate-setup-file = true
# Dependencies used for developing Synapse itself.
#
# Hold off on migrating these to `dev-dependencies` (PEP 735) for now until
# Poetry 2.2.0+, pip 25.1+ are more widely available.
[tool.poetry.group.dev.dependencies]
# We pin development dependencies in poetry.lock so that our tests don't start
# failing on new releases. Keeping lower bounds loose here means that dependabot
# can bump versions without having to update the content-hash in the lockfile.
# This helps prevents merge conflicts when running a batch of dependabot updates.
ruff = "0.14.6"
# Typechecking
lxml-stubs = ">=0.4.0"
mypy = "*"
mypy-zope = "*"
types-bleach = ">=4.1.0"
types-jsonschema = ">=3.2.0"
types-netaddr = ">=0.8.0.6"
types-opentracing = ">=2.4.2"
types-Pillow = ">=8.3.4"
types-psycopg2 = ">=2.9.9"
types-pyOpenSSL = ">=20.0.7"
types-PyYAML = ">=5.4.10"
types-requests = ">=2.26.0"
types-setuptools = ">=57.4.0"
# Dependencies which are exclusively required by unit test code. This is
# NOT a list of all modules that are necessary to run the unit tests.
# Tests assume that all optional dependencies are installed.
#
# If this is updated, don't forget to update the equivalent lines in
# project.optional-dependencies.test.
parameterized = ">=0.9.0"
idna = ">=3.3"
# The following are used by the release script
click = ">=8.1.3"
# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
GitPython = ">=3.1.20"
markdown-it-py = ">=3.0.0"
pygithub = ">=1.59"
# The following are executed as commands by the release script.
twine = "*"
# Towncrier min version comes from https://github.com/matrix-org/synapse/pull/3425. Rationale unclear.
towncrier = ">=18.6.0rc1"
# Used for checking the Poetry lockfile
tomli = ">=1.2.3"
# Used for checking the schema delta files
sqlglot = ">=28.0.0"
[tool.towncrier] [tool.towncrier]
package = "synapse" package = "synapse"
@@ -388,12 +260,15 @@ select = [
"G", "G",
# pyupgrade # pyupgrade
"UP006", "UP006",
"UP007",
"UP045",
] ]
extend-safe-fixes = [ extend-safe-fixes = [
# pyupgrade rules compatible with Python >= 3.9 # pyupgrade rules compatible with Python >= 3.9
"UP006", "UP006",
# Allow ruff to automatically fix trailing spaces within a multi-line string/comment. "UP007",
"W293" # pyupgrade rules compatible with Python >= 3.10
"UP045",
] ]
[tool.ruff.lint.isort] [tool.ruff.lint.isort]
@@ -414,29 +289,85 @@ line-ending = "auto"
[tool.maturin] [tool.maturin]
manifest-path = "rust/Cargo.toml" manifest-path = "rust/Cargo.toml"
module-name = "synapse.synapse_rust" module-name = "synapse.synapse_rust"
python-source = "."
sdist-include = [ [tool.poetry]
"AUTHORS.rst", packages = [
"book.toml", { include = "synapse" },
"changelog.d",
"CHANGES.md",
"CONTRIBUTING.md",
"demo",
"docs",
"INSTALL.md",
"mypy.ini",
"scripts-dev",
"synmark",
"sytest-blacklist",
"tests",
"UPGRADE.rst",
"Cargo.toml",
"Cargo.lock",
"rust/Cargo.toml",
"rust/build.rs",
"rust/src/**",
] ]
sdist-exclude = ["synapse/*.so"] include = [
{ path = "AUTHORS.rst", format = "sdist" },
{ path = "book.toml", format = "sdist" },
{ path = "changelog.d", format = "sdist" },
{ path = "CHANGES.md", format = "sdist" },
{ path = "CONTRIBUTING.md", format = "sdist" },
{ path = "demo", format = "sdist" },
{ path = "docs", format = "sdist" },
{ path = "INSTALL.md", format = "sdist" },
{ path = "mypy.ini", format = "sdist" },
{ path = "scripts-dev", format = "sdist" },
{ path = "synmark", format="sdist" },
{ path = "sytest-blacklist", format = "sdist" },
{ path = "tests", format = "sdist" },
{ path = "UPGRADE.rst", format = "sdist" },
{ path = "Cargo.toml", format = "sdist" },
{ path = "Cargo.lock", format = "sdist" },
{ path = "rust/Cargo.toml", format = "sdist" },
{ path = "rust/build.rs", format = "sdist" },
{ path = "rust/src/**", format = "sdist" },
]
exclude = [
{ path = "synapse/*.so", format = "sdist"}
]
[tool.poetry.build]
script = "build_rust.py"
generate-setup-file = true
[tool.poetry.group.dev.dependencies]
# We pin development dependencies in poetry.lock so that our tests don't start
# failing on new releases. Keeping lower bounds loose here means that dependabot
# can bump versions without having to update the content-hash in the lockfile.
# This helps prevents merge conflicts when running a batch of dependabot updates.
ruff = "0.14.5"
# Typechecking
lxml-stubs = ">=0.4.0"
mypy = "*"
mypy-zope = "*"
types-bleach = ">=4.1.0"
types-jsonschema = ">=3.2.0"
types-netaddr = ">=0.8.0.6"
types-opentracing = ">=2.4.2"
types-Pillow = ">=8.3.4"
types-psycopg2 = ">=2.9.9"
types-pyOpenSSL = ">=20.0.7"
types-PyYAML = ">=5.4.10"
types-requests = ">=2.26.0"
types-setuptools = ">=57.4.0"
# Dependencies which are exclusively required by unit test code. This is
# NOT a list of all modules that are necessary to run the unit tests.
# Tests assume that all optional dependencies are installed.
#
# If this is updated, don't forget to update the equivalent lines in
# project.optional-dependencies.test.
parameterized = ">=0.9.0"
idna = ">=3.3"
# The following are used by the release script
click = ">=8.1.3"
# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
GitPython = ">=3.1.20"
markdown-it-py = ">=3.0.0"
pygithub = ">=1.59"
# The following are executed as commands by the release script.
twine = "*"
# Towncrier min version comes from https://github.com/matrix-org/synapse/pull/3425. Rationale unclear.
towncrier = ">=18.6.0rc1"
# Used for checking the Poetry lockfile
tomli = ">=1.2.3"
[build-system] [build-system]
# The upper bounds here are defensive, intended to prevent situations like # The upper bounds here are defensive, intended to prevent situations like
@@ -445,8 +376,8 @@ sdist-exclude = ["synapse/*.so"]
# runtime errors caused by build system changes. # runtime errors caused by build system changes.
# We are happy to raise these upper bounds upon request, # We are happy to raise these upper bounds upon request,
# provided we check that it's safe to do so (i.e. that CI passes). # provided we check that it's safe to do so (i.e. that CI passes).
requires = ["maturin>=1.0,<2.0"] requires = ["poetry-core>=2.0.0,<=2.1.3", "setuptools_rust>=1.3,<=1.11.1"]
build-backend = "maturin" build-backend = "poetry.core.masonry.api"
[tool.cibuildwheel] [tool.cibuildwheel]
@@ -462,8 +393,7 @@ build-backend = "maturin"
# We skip: # We skip:
# - free-threaded cpython builds: these are not currently supported. # - free-threaded cpython builds: these are not currently supported.
# - i686: We don't support 32-bit platforms. # - i686: We don't support 32-bit platforms.
# - *macosx*: we don't support building wheels for MacOS. skip = "cp3??t-* *i686*"
skip = "cp3??t-* *i686* *macosx*"
# Enable non-default builds. See the list of available options: # Enable non-default builds. See the list of available options:
# https://cibuildwheel.pypa.io/en/stable/options#enable # https://cibuildwheel.pypa.io/en/stable/options#enable
# #
@@ -471,6 +401,9 @@ skip = "cp3??t-* *i686* *macosx*"
enable = "pypy" enable = "pypy"
# We need a rust compiler. # We need a rust compiler.
#
# We temporarily pin Rust to 1.82.0 to work around
# https://github.com/element-hq/synapse/issues/17988
before-all = "sh .ci/before_build_wheel.sh" before-all = "sh .ci/before_build_wheel.sh"
environment= { PATH = "$PATH:$HOME/.cargo/bin" } environment= { PATH = "$PATH:$HOME/.cargo/bin" }
@@ -480,3 +413,12 @@ environment= { PATH = "$PATH:$HOME/.cargo/bin" }
before-build = "rm -rf {project}/build" before-build = "rm -rf {project}/build"
build-frontend = "build" build-frontend = "build"
test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'" test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'"
[tool.cibuildwheel.linux]
# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py -w {dest_dir} {wheel}"
[tool.cibuildwheel.macos]
# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py --require-archs {delocate_archs} -w {dest_dir} {wheel}"

View File

@@ -1,56 +0,0 @@
/*
* This file is licensed under the Affero General Public License (AGPL) version 3.
*
* Copyright (C) 2025 Element Creations, Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* See the GNU Affero General Public License for more details:
* <https://www.gnu.org/licenses/agpl-3.0.html>.
*/
use once_cell::sync::OnceCell;
use pyo3::{
types::{IntoPyDict, PyAnyMethods},
Bound, BoundObject, IntoPyObject, Py, PyAny, PyErr, PyResult, Python,
};
/// A reference to the `synapse.util.duration` module.
static DURATION: OnceCell<Py<PyAny>> = OnceCell::new();
/// Access to the `synapse.util.duration` module.
fn duration_module(py: Python<'_>) -> PyResult<&Bound<'_, PyAny>> {
Ok(DURATION
.get_or_try_init(|| py.import("synapse.util.duration").map(Into::into))?
.bind(py))
}
/// Mirrors the `synapse.util.duration.Duration` Python class.
pub struct SynapseDuration {
microseconds: u64,
}
impl SynapseDuration {
/// For now we only need to create durations from milliseconds.
pub fn from_milliseconds(milliseconds: u64) -> Self {
Self {
microseconds: milliseconds * 1_000,
}
}
}
impl<'py> IntoPyObject<'py> for &SynapseDuration {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> Result<Self::Output, Self::Error> {
let duration_module = duration_module(py)?;
let kwargs = [("microseconds", self.microseconds)].into_py_dict(py)?;
let duration_instance = duration_module.call_method("Duration", (), Some(&kwargs))?;
Ok(duration_instance.into_bound())
}
}

View File

@@ -5,7 +5,6 @@ use pyo3::prelude::*;
use pyo3_log::ResetHandle; use pyo3_log::ResetHandle;
pub mod acl; pub mod acl;
pub mod duration;
pub mod errors; pub mod errors;
pub mod events; pub mod events;
pub mod http; pub mod http;

View File

@@ -35,7 +35,6 @@ use ulid::Ulid;
use self::session::Session; use self::session::Session;
use crate::{ use crate::{
duration::SynapseDuration,
errors::{NotFoundError, SynapseError}, errors::{NotFoundError, SynapseError},
http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt}, http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt},
UnwrapInfallible, UnwrapInfallible,
@@ -133,8 +132,6 @@ impl RendezvousHandler {
.unwrap_infallible() .unwrap_infallible()
.unbind(); .unbind();
let eviction_duration = SynapseDuration::from_milliseconds(eviction_interval);
// Construct a Python object so that we can get a reference to the // Construct a Python object so that we can get a reference to the
// evict method and schedule it to run. // evict method and schedule it to run.
let self_ = Py::new( let self_ = Py::new(
@@ -152,7 +149,7 @@ impl RendezvousHandler {
let evict = self_.getattr(py, "_evict")?; let evict = self_.getattr(py, "_evict")?;
homeserver.call_method0("get_clock")?.call_method( homeserver.call_method0("get_clock")?.call_method(
"looping_call", "looping_call",
(evict, &eviction_duration), (evict, eviction_interval),
None, None,
)?; )?;

View File

@@ -1,5 +1,5 @@
$schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json $schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json
$id: https://element-hq.github.io/synapse/schema/synapse/v1.144/synapse-config.schema.json $id: https://element-hq.github.io/synapse/schema/synapse/v1.143/synapse-config.schema.json
type: object type: object
properties: properties:
modules: modules:

View File

@@ -9,10 +9,15 @@ from typing import Any
import click import click
import git import git
import sqlglot
import sqlglot.expressions
SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$") SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$")
INDEX_CREATION_REGEX = re.compile(
r"CREATE .*INDEX .*ON ([a-z_0-9]+)", flags=re.IGNORECASE
)
INDEX_DELETION_REGEX = re.compile(r"DROP .*INDEX ([a-z_0-9]+)", flags=re.IGNORECASE)
TABLE_CREATION_REGEX = re.compile(
r"CREATE .*TABLE.* ([a-z_0-9]+)\s*\(", flags=re.IGNORECASE
)
# The base branch we want to check against. We use the main development branch # The base branch we want to check against. We use the main development branch
# on the assumption that is what we are developing against. # on the assumption that is what we are developing against.
@@ -136,9 +141,6 @@ def main(force_colors: bool) -> None:
color=force_colors, color=force_colors,
) )
# Mark this run as not successful, but continue so that we report *all*
# errors.
return_code = 1
else: else:
click.secho( click.secho(
f"All deltas are in the correct folder: {current_schema_version}!", f"All deltas are in the correct folder: {current_schema_version}!",
@@ -151,79 +153,31 @@ def main(force_colors: bool) -> None:
# and delta files are also numbered in order. # and delta files are also numbered in order.
changed_delta_files.sort() changed_delta_files.sort()
success = check_schema_delta(changed_delta_files, force_colors) # Now check that we're not trying to create or drop indices. If we want to
if not success: # do that they should be in background updates. The exception is when we
return_code = 1 # create indices on tables we've just created.
created_tables = set()
click.get_current_context().exit(return_code) for delta_file in changed_delta_files:
def check_schema_delta(delta_files: list[str], force_colors: bool) -> bool:
"""Check that the given schema delta files do not create or drop indices
inappropriately.
Index creation is only allowed on tables created in the same set of deltas.
Index deletion is never allowed and should be done in background updates.
Returns:
True if all checks succeeded, False if at least one failed.
"""
# The tables created in this delta
created_tables = set[str]()
# The indices created/dropped in this delta, each a tuple of (table_name, sql)
created_indices = list[tuple[str, str]]()
# The indices dropped in this delta, just the sql
dropped_indices = list[str]()
for delta_file in delta_files:
with open(delta_file) as fd: with open(delta_file) as fd:
delta_contents = fd.read() delta_lines = fd.readlines()
# Assume the SQL dialect from the file extension, defaulting to Postgres. for line in delta_lines:
sql_lang = "postgres" # Strip SQL comments
if delta_file.endswith(".sqlite"): line = line.split("--", maxsplit=1)[0]
sql_lang = "sqlite"
statements = sqlglot.parse(delta_contents, read=sql_lang) # Check and track any tables we create
match = TABLE_CREATION_REGEX.search(line)
for statement in statements: if match:
if isinstance(statement, sqlglot.expressions.Create): table_name = match.group(1)
if statement.kind == "TABLE":
assert isinstance(statement.this, sqlglot.expressions.Schema)
assert isinstance(statement.this.this, sqlglot.expressions.Table)
table_name = statement.this.this.name
created_tables.add(table_name) created_tables.add(table_name)
elif statement.kind == "INDEX":
assert isinstance(statement.this, sqlglot.expressions.Index)
table_name = statement.this.args["table"].name # Check for dropping indices, these are always banned
created_indices.append((table_name, statement.sql())) match = INDEX_DELETION_REGEX.search(line)
elif isinstance(statement, sqlglot.expressions.Drop): if match:
if statement.kind == "INDEX": clause = match.group()
dropped_indices.append(statement.sql())
success = True
for table_name, clause in created_indices:
if table_name not in created_tables:
click.secho( click.secho(
f"Found delta with index creation for existing table: '{clause}'", f"Found delta with index deletion: '{clause}' in {delta_file}",
fg="red",
bold=True,
color=force_colors,
)
click.secho(
" ↪ These should be in background updates (or the table should be created in the same delta).",
)
success = False
for clause in dropped_indices:
click.secho(
f"Found delta with index deletion: '{clause}'",
fg="red", fg="red",
bold=True, bold=True,
color=force_colors, color=force_colors,
@@ -231,9 +185,27 @@ def check_schema_delta(delta_files: list[str], force_colors: bool) -> bool:
click.secho( click.secho(
" ↪ These should be in background updates.", " ↪ These should be in background updates.",
) )
success = False return_code = 1
return success # Check for index creation, which is only allowed for tables we've
# created.
match = INDEX_CREATION_REGEX.search(line)
if match:
clause = match.group()
table_name = match.group(1)
if table_name not in created_tables:
click.secho(
f"Found delta with index creation for existing table: '{clause}' in {delta_file}",
fg="red",
bold=True,
color=force_colors,
)
click.secho(
" ↪ These should be in background updates (or the table should be created in the same delta).",
)
return_code = 1
click.get_current_context().exit(return_code)
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -72,12 +72,6 @@ For help on arguments to 'go test', run 'go help testflag'.
EOF EOF
} }
# We use a function to wrap the script logic so that we can use `return` to exit early
# if needed. This is particularly useful so that this script can be sourced by other
# scripts without exiting the calling subshell (composable). This allows us to share
# variables like `SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES` with other scripts.
#
# Returns an exit code of 0 on success, or 1 on failure.
main() { main() {
# parse our arguments # parse our arguments
skip_docker_build="" skip_docker_build=""
@@ -210,12 +204,21 @@ main() {
echo_if_github "::endgroup::" echo_if_github "::endgroup::"
fi fi
echo "Docker images built."
else
echo "Skipping Docker image build as requested."
fi fi
if [ -n "$skip_complement_run" ]; then
echo "Skipping Complement run as requested."
return 0
fi
export COMPLEMENT_BASE_IMAGE=complement-synapse
if [ -n "$use_editable_synapse" ]; then
export COMPLEMENT_BASE_IMAGE=complement-synapse-editable
export COMPLEMENT_HOST_MOUNTS="$editable_mount"
fi
extra_test_args=()
test_packages=( test_packages=(
./tests/csapi ./tests/csapi
./tests ./tests
@@ -231,16 +234,6 @@ main() {
./tests/msc4306 ./tests/msc4306
) )
# Export the list of test packages as a space-separated environment variable, so other
# scripts can use it.
export SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES="${test_packages[@]}"
export COMPLEMENT_BASE_IMAGE=complement-synapse
if [ -n "$use_editable_synapse" ]; then
export COMPLEMENT_BASE_IMAGE=complement-synapse-editable
export COMPLEMENT_HOST_MOUNTS="$editable_mount"
fi
# Enable dirty runs, so tests will reuse the same container where possible. # Enable dirty runs, so tests will reuse the same container where possible.
# This significantly speeds up tests, but increases the possibility of test pollution. # This significantly speeds up tests, but increases the possibility of test pollution.
export COMPLEMENT_ENABLE_DIRTY_RUNS=1 export COMPLEMENT_ENABLE_DIRTY_RUNS=1
@@ -249,18 +242,8 @@ main() {
# (The prefix is stripped off before reaching the container.) # (The prefix is stripped off before reaching the container.)
export COMPLEMENT_SHARE_ENV_PREFIX=PASS_ export COMPLEMENT_SHARE_ENV_PREFIX=PASS_
# * -count=1: Only run tests once, and disable caching for tests.
# * -v: Output test logs, even if those tests pass.
# * -tags=synapse_blacklist: Enable the `synapse_blacklist` build tag, which is
# necessary for `runtime.Synapse` checks/skips to work in the tests
test_args=(
-v
-tags="synapse_blacklist"
-count=1
)
# It takes longer than 10m to run the whole suite. # It takes longer than 10m to run the whole suite.
test_args+=("-timeout=60m") extra_test_args+=("-timeout=60m")
if [[ -n "$WORKERS" ]]; then if [[ -n "$WORKERS" ]]; then
# Use workers. # Use workers.
@@ -312,15 +295,11 @@ main() {
# particularly tricky. # particularly tricky.
export PASS_SYNAPSE_LOG_TESTING=1 export PASS_SYNAPSE_LOG_TESTING=1
if [ -n "$skip_complement_run" ]; then
echo "Skipping Complement run as requested."
return 0
fi
# Run the tests! # Run the tests!
echo "Running Complement with ${test_args[@]} $@ ${test_packages[@]}" echo "Images built; running complement with ${extra_test_args[@]} $@ ${test_packages[@]}"
cd "$COMPLEMENT_DIR" cd "$COMPLEMENT_DIR"
go test "${test_args[@]}" "$@" "${test_packages[@]}"
go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" "${test_packages[@]}"
} }
main "$@" main "$@"

View File

@@ -145,7 +145,7 @@ def request(
print("Requesting %s" % dest, file=sys.stderr) print("Requesting %s" % dest, file=sys.stderr)
s = requests.Session() s = requests.Session()
s.mount("matrix-federation://", MatrixConnectionAdapter(verify_tls=verify_tls)) s.mount("matrix-federation://", MatrixConnectionAdapter())
headers: dict[str, str] = { headers: dict[str, str] = {
"Authorization": authorization_headers[0], "Authorization": authorization_headers[0],
@@ -267,17 +267,6 @@ def read_args_from_config(args: argparse.Namespace) -> None:
class MatrixConnectionAdapter(HTTPAdapter): class MatrixConnectionAdapter(HTTPAdapter):
"""
A Matrix federation-aware HTTP Adapter.
"""
verify_tls: bool
"""whether to verify the remote server's TLS certificate."""
def __init__(self, verify_tls: bool = True) -> None:
self.verify_tls = verify_tls
super().__init__()
def send( def send(
self, self,
request: PreparedRequest, request: PreparedRequest,
@@ -291,7 +280,7 @@ class MatrixConnectionAdapter(HTTPAdapter):
assert isinstance(request.url, str) assert isinstance(request.url, str)
parsed = urlparse.urlsplit(request.url) parsed = urlparse.urlsplit(request.url)
server_name = parsed.netloc server_name = parsed.netloc
well_known = self._get_well_known(parsed.netloc, verify_tls=self.verify_tls) well_known = self._get_well_known(parsed.netloc)
if well_known: if well_known:
server_name = well_known server_name = well_known
@@ -329,21 +318,6 @@ class MatrixConnectionAdapter(HTTPAdapter):
print( print(
f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr
) )
if proxies:
scheme = parsed.scheme
if isinstance(scheme, bytes):
scheme = scheme.decode("utf-8")
proxy_for_scheme = proxies.get(scheme)
if proxy_for_scheme:
return self.proxy_manager_for(proxy_for_scheme).connection_from_host(
host,
port=port,
scheme="https",
pool_kwargs={"server_hostname": ssl_server_name},
)
return self.poolmanager.connection_from_host( return self.poolmanager.connection_from_host(
host, host,
port=port, port=port,
@@ -394,7 +368,7 @@ class MatrixConnectionAdapter(HTTPAdapter):
return server_name, 8448, server_name return server_name, 8448, server_name
@staticmethod @staticmethod
def _get_well_known(server_name: str, verify_tls: bool = True) -> str | None: def _get_well_known(server_name: str) -> str | None:
if ":" in server_name: if ":" in server_name:
# explicit port, or ipv6 literal. Either way, no .well-known # explicit port, or ipv6 literal. Either way, no .well-known
return None return None
@@ -405,7 +379,7 @@ class MatrixConnectionAdapter(HTTPAdapter):
print(f"fetching {uri}", file=sys.stderr) print(f"fetching {uri}", file=sys.stderr)
try: try:
resp = requests.get(uri, verify=verify_tls) resp = requests.get(uri)
if resp.status_code != 200: if resp.status_code != 200:
print("%s gave %i" % (uri, resp.status_code), file=sys.stderr) print("%s gave %i" % (uri, resp.status_code), file=sys.stderr)
return None return None

View File

@@ -32,7 +32,7 @@ import time
import urllib.request import urllib.request
from os import path from os import path
from tempfile import TemporaryDirectory from tempfile import TemporaryDirectory
from typing import Any from typing import Any, Match
import attr import attr
import click import click
@@ -291,12 +291,6 @@ def _prepare() -> None:
synapse_repo.git.add("-u") synapse_repo.git.add("-u")
subprocess.run("git diff --cached", shell=True) subprocess.run("git diff --cached", shell=True)
print(
"Consider any upcoming platform deprecations that should be mentioned in the changelog. (e.g. upcoming Python, PostgreSQL or SQLite deprecations)"
)
print(
"Platform deprecations should be mentioned at least 1 release prior to being unsupported."
)
if click.confirm("Edit changelog?", default=False): if click.confirm("Edit changelog?", default=False):
click.edit(filename="CHANGES.md") click.edit(filename="CHANGES.md")
@@ -968,6 +962,10 @@ def generate_and_write_changelog(
new_changes = new_changes.replace( new_changes = new_changes.replace(
"No significant changes.", f"No significant changes since {current_version}." "No significant changes.", f"No significant changes since {current_version}."
) )
new_changes += build_dependabot_changelog(
repo,
current_version,
)
# Prepend changes to changelog # Prepend changes to changelog
with open("CHANGES.md", "r+") as f: with open("CHANGES.md", "r+") as f:
@@ -982,5 +980,49 @@ def generate_and_write_changelog(
os.remove(filename) os.remove(filename)
def build_dependabot_changelog(repo: Repo, current_version: version.Version) -> str:
"""Summarise dependabot commits between `current_version` and `release_branch`.
Returns an empty string if there have been no such commits; otherwise outputs a
third-level markdown header followed by an unordered list."""
last_release_commit = repo.tag("v" + str(current_version)).commit
rev_spec = f"{last_release_commit.hexsha}.."
commits = list(git.objects.Commit.iter_items(repo, rev_spec))
messages = []
for commit in reversed(commits):
if commit.author.name == "dependabot[bot]":
message: str | bytes = commit.message
if isinstance(message, bytes):
message = message.decode("utf-8")
messages.append(message.split("\n", maxsplit=1)[0])
if not messages:
print(f"No dependabot commits in range {rev_spec}", file=sys.stderr)
return ""
messages.sort()
def replacer(match: Match[str]) -> str:
desc = match.group(1)
number = match.group(2)
return f"* {desc}. ([\\#{number}](https://github.com/element-hq/synapse/issues/{number}))"
for i, message in enumerate(messages):
messages[i] = re.sub(r"(.*) \(#(\d+)\)$", replacer, message)
messages.insert(0, "### Updates to locked dependencies\n")
# Add an extra blank line to the bottom of the section
messages.append("")
return "\n".join(messages)
@cli.command()
@click.argument("since")
def test_dependabot_changelog(since: str) -> None:
"""Test building the dependabot changelog.
Summarises all dependabot commits between the SINCE tag and the current git HEAD."""
print(build_dependabot_changelog(git.Repo("."), version.Version(since)))
if __name__ == "__main__": if __name__ == "__main__":
cli() cli()

View File

@@ -29,19 +29,6 @@ from typing import Final
# the max size of a (canonical-json-encoded) event # the max size of a (canonical-json-encoded) event
MAX_PDU_SIZE = 65536 MAX_PDU_SIZE = 65536
# The maximum allowed size of an HTTP request.
# Other than media uploads, the biggest request we expect to see is a fully-loaded
# /federation/v1/send request.
#
# The main thing in such a request is up to 50 PDUs, and up to 100 EDUs. PDUs are
# limited to 65536 bytes (possibly slightly more if the sender didn't use canonical
# json encoding); there is no specced limit to EDUs (see
# https://github.com/matrix-org/matrix-doc/issues/3121).
#
# in short, we somewhat arbitrarily limit requests to 200 * 64K (about 12.5M)
#
MAX_REQUEST_SIZE = 200 * MAX_PDU_SIZE
# Max/min size of ints in canonical JSON # Max/min size of ints in canonical JSON
CANONICALJSON_MAX_INT = (2**53) - 1 CANONICALJSON_MAX_INT = (2**53) - 1
CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT
@@ -320,10 +307,6 @@ class AccountDataTypes:
MSC4155_INVITE_PERMISSION_CONFIG: Final = ( MSC4155_INVITE_PERMISSION_CONFIG: Final = (
"org.matrix.msc4155.invite_permission_config" "org.matrix.msc4155.invite_permission_config"
) )
# MSC4380: Invite blocking
MSC4380_INVITE_PERMISSION_CONFIG: Final = (
"org.matrix.msc4380.invite_permission_config"
)
# Synapse-specific behaviour. See "Client-Server API Extensions" documentation # Synapse-specific behaviour. See "Client-Server API Extensions" documentation
# in Admin API for more information. # in Admin API for more information.
SYNAPSE_ADMIN_CLIENT_CONFIG: Final = "io.element.synapse.admin_client_config" SYNAPSE_ADMIN_CLIENT_CONFIG: Final = "io.element.synapse.admin_client_config"

View File

@@ -137,7 +137,7 @@ class Codes(str, Enum):
PROFILE_TOO_LARGE = "M_PROFILE_TOO_LARGE" PROFILE_TOO_LARGE = "M_PROFILE_TOO_LARGE"
KEY_TOO_LARGE = "M_KEY_TOO_LARGE" KEY_TOO_LARGE = "M_KEY_TOO_LARGE"
# Part of MSC4155/MSC4380 # Part of MSC4155
INVITE_BLOCKED = "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED" INVITE_BLOCKED = "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED"
# Part of MSC4190 # Part of MSC4190
@@ -856,12 +856,6 @@ class HttpResponseException(CodeMessageException):
return ProxiedRequestError(self.code, errmsg, errcode, j) return ProxiedRequestError(self.code, errmsg, errcode, j)
class HomeServerNotSetupException(Exception):
"""
Raised when an operation is attempted on the HomeServer before setup() has been called.
"""
class ShadowBanError(Exception): class ShadowBanError(Exception):
""" """
Raised when a shadow-banned user attempts to perform an action. Raised when a shadow-banned user attempts to perform an action.

View File

@@ -27,7 +27,6 @@ from synapse.config.ratelimiting import RatelimitSettings
from synapse.storage.databases.main import DataStore from synapse.storage.databases.main import DataStore
from synapse.types import Requester from synapse.types import Requester
from synapse.util.clock import Clock from synapse.util.clock import Clock
from synapse.util.duration import Duration
from synapse.util.wheel_timer import WheelTimer from synapse.util.wheel_timer import WheelTimer
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -101,7 +100,7 @@ class Ratelimiter:
# and doesn't affect correctness. # and doesn't affect correctness.
self._timer: WheelTimer[Hashable] = WheelTimer() self._timer: WheelTimer[Hashable] = WheelTimer()
self.clock.looping_call(self._prune_message_counts, Duration(seconds=15)) self.clock.looping_call(self._prune_message_counts, 15 * 1000)
def _get_key(self, requester: Requester | None, key: Hashable | None) -> Hashable: def _get_key(self, requester: Requester | None, key: Hashable | None) -> Hashable:
"""Use the requester's MXID as a fallback key if no key is provided.""" """Use the requester's MXID as a fallback key if no key is provided."""

View File

@@ -36,13 +36,12 @@ from typing import (
Awaitable, Awaitable,
Callable, Callable,
NoReturn, NoReturn,
Optional,
cast, cast,
) )
from wsgiref.simple_server import WSGIServer from wsgiref.simple_server import WSGIServer
from cryptography.utils import CryptographyDeprecationWarning from cryptography.utils import CryptographyDeprecationWarning
from typing_extensions import ParamSpec, assert_never from typing_extensions import ParamSpec
import twisted import twisted
from twisted.internet import defer, error, reactor as _reactor from twisted.internet import defer, error, reactor as _reactor
@@ -60,17 +59,12 @@ from twisted.python.threadpool import ThreadPool
from twisted.web.resource import Resource from twisted.web.resource import Resource
import synapse.util.caches import synapse.util.caches
from synapse.api.constants import MAX_REQUEST_SIZE from synapse.api.constants import MAX_PDU_SIZE
from synapse.app import check_bind_error from synapse.app import check_bind_error
from synapse.config import ConfigError from synapse.config import ConfigError
from synapse.config._base import format_config_error from synapse.config._base import format_config_error
from synapse.config.homeserver import HomeServerConfig from synapse.config.homeserver import HomeServerConfig
from synapse.config.server import ( from synapse.config.server import ListenerConfig, ManholeConfig, TCPListenerConfig
ListenerConfig,
ManholeConfig,
TCPListenerConfig,
UnixListenerConfig,
)
from synapse.crypto import context_factory from synapse.crypto import context_factory
from synapse.events.auto_accept_invites import InviteAutoAccepter from synapse.events.auto_accept_invites import InviteAutoAccepter
from synapse.events.presence_router import load_legacy_presence_router from synapse.events.presence_router import load_legacy_presence_router
@@ -419,44 +413,13 @@ def listen_unix(
] ]
class ListenerException(RuntimeError):
"""
An exception raised when we fail to listen with the given `ListenerConfig`.
Attributes:
listener_config: The listener config that caused the exception.
"""
def __init__(
self,
listener_config: ListenerConfig,
):
listener_human_name = ""
port = ""
if isinstance(listener_config, TCPListenerConfig):
listener_human_name = "TCP port"
port = str(listener_config.port)
elif isinstance(listener_config, UnixListenerConfig):
listener_human_name = "unix socket"
port = listener_config.path
else:
assert_never(listener_config)
super().__init__(
"Failed to listen on %s (%s) with the given listener config: %s"
% (listener_human_name, port, listener_config)
)
self.listener_config = listener_config
def listen_http( def listen_http(
hs: "HomeServer", hs: "HomeServer",
listener_config: ListenerConfig, listener_config: ListenerConfig,
root_resource: Resource, root_resource: Resource,
version_string: str, version_string: str,
max_request_body_size: int, max_request_body_size: int,
context_factory: Optional[IOpenSSLContextFactory], context_factory: IOpenSSLContextFactory | None,
reactor: ISynapseReactor = reactor, reactor: ISynapseReactor = reactor,
) -> list[Port]: ) -> list[Port]:
""" """
@@ -484,7 +447,6 @@ def listen_http(
hs=hs, hs=hs,
) )
try:
if isinstance(listener_config, TCPListenerConfig): if isinstance(listener_config, TCPListenerConfig):
if listener_config.is_tls(): if listener_config.is_tls():
# refresh_certificate should have been called before this. # refresh_certificate should have been called before this.
@@ -506,11 +468,9 @@ def listen_http(
site, site,
reactor=reactor, reactor=reactor,
) )
logger.info( logger.info("Synapse now listening on TCP port %d", listener_config.port)
"Synapse now listening on TCP port %d", listener_config.port
)
elif isinstance(listener_config, UnixListenerConfig): else:
ports = listen_unix( ports = listen_unix(
listener_config.path, listener_config.mode, site, reactor=reactor listener_config.path, listener_config.mode, site, reactor=reactor
) )
@@ -520,19 +480,6 @@ def listen_http(
"Synapse now listening on Unix Socket at: %s", "Synapse now listening on Unix Socket at: %s",
ports[0].getHost().name.decode("utf-8"), ports[0].getHost().name.decode("utf-8"),
) )
else:
assert_never(listener_config)
except Exception as exc:
# The Twisted interface says that "Users should not call this function
# themselves!" but this appears to be the correct/only way handle proper cleanup
# of the site when things go wrong. In the normal case, a `Port` is created
# which we can call `Port.stopListening()` on to do the same thing (but no
# `Port` is created when an error occurs).
#
# We use `site.stopFactory()` instead of `site.doStop()` as the latter assumes
# that `site.doStart()` was called (which won't be the case if an error occurs).
site.stopFactory()
raise ListenerException(listener_config) from exc
return ports return ports
@@ -896,8 +843,17 @@ def sdnotify(state: bytes) -> None:
def max_request_body_size(config: HomeServerConfig) -> int: def max_request_body_size(config: HomeServerConfig) -> int:
"""Get a suitable maximum size for incoming HTTP requests""" """Get a suitable maximum size for incoming HTTP requests"""
# Baseline default for any request that isn't configured in the homeserver config # Other than media uploads, the biggest request we expect to see is a fully-loaded
max_request_size = MAX_REQUEST_SIZE # /federation/v1/send request.
#
# The main thing in such a request is up to 50 PDUs, and up to 100 EDUs. PDUs are
# limited to 65536 bytes (possibly slightly more if the sender didn't use canonical
# json encoding); there is no specced limit to EDUs (see
# https://github.com/matrix-org/matrix-doc/issues/3121).
#
# in short, we somewhat arbitrarily limit requests to 200 * 64K (about 12.5M)
#
max_request_size = 200 * MAX_PDU_SIZE
# if we have a media repo enabled, we may need to allow larger uploads than that # if we have a media repo enabled, we may need to allow larger uploads than that
if config.media.can_load_media_repo: if config.media.can_load_media_repo:

View File

@@ -24,7 +24,7 @@ import logging
import os import os
import sys import sys
import tempfile import tempfile
from typing import Mapping, Optional, Sequence from typing import Mapping, Sequence
from twisted.internet import defer, task from twisted.internet import defer, task
@@ -291,7 +291,7 @@ def load_config(argv_options: list[str]) -> tuple[HomeServerConfig, argparse.Nam
def create_homeserver( def create_homeserver(
config: HomeServerConfig, config: HomeServerConfig,
reactor: Optional[ISynapseReactor] = None, reactor: ISynapseReactor | None = None,
) -> AdminCmdServer: ) -> AdminCmdServer:
""" """
Create a homeserver instance for the Synapse admin command process. Create a homeserver instance for the Synapse admin command process.

View File

@@ -21,7 +21,6 @@
# #
import logging import logging
import sys import sys
from typing import Optional
from twisted.web.resource import Resource from twisted.web.resource import Resource
@@ -336,7 +335,7 @@ def load_config(argv_options: list[str]) -> HomeServerConfig:
def create_homeserver( def create_homeserver(
config: HomeServerConfig, config: HomeServerConfig,
reactor: Optional[ISynapseReactor] = None, reactor: ISynapseReactor | None = None,
) -> GenericWorkerServer: ) -> GenericWorkerServer:
""" """
Create a homeserver instance for the Synapse worker process. Create a homeserver instance for the Synapse worker process.

View File

@@ -22,7 +22,7 @@
import logging import logging
import os import os
import sys import sys
from typing import Iterable, Optional from typing import Iterable
from twisted.internet.tcp import Port from twisted.internet.tcp import Port
from twisted.web.resource import EncodingResourceWrapper, Resource from twisted.web.resource import EncodingResourceWrapper, Resource
@@ -350,7 +350,7 @@ def load_or_generate_config(argv_options: list[str]) -> HomeServerConfig:
def create_homeserver( def create_homeserver(
config: HomeServerConfig, config: HomeServerConfig,
reactor: Optional[ISynapseReactor] = None, reactor: ISynapseReactor | None = None,
) -> SynapseHomeServer: ) -> SynapseHomeServer:
""" """
Create a homeserver instance for the Synapse main process. Create a homeserver instance for the Synapse main process.

View File

@@ -30,20 +30,24 @@ from twisted.internet import defer
from synapse.metrics import SERVER_NAME_LABEL from synapse.metrics import SERVER_NAME_LABEL
from synapse.types import JsonDict from synapse.types import JsonDict
from synapse.util.duration import Duration from synapse.util.constants import (
MILLISECONDS_PER_SECOND,
ONE_HOUR_SECONDS,
ONE_MINUTE_SECONDS,
)
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.server import HomeServer from synapse.server import HomeServer
logger = logging.getLogger("synapse.app.homeserver") logger = logging.getLogger("synapse.app.homeserver")
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME = Duration(minutes=5) INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS
""" """
We wait 5 minutes to send the first set of stats as the server can be quite busy the We wait 5 minutes to send the first set of stats as the server can be quite busy the
first few minutes first few minutes
""" """
PHONE_HOME_INTERVAL = Duration(hours=3) PHONE_HOME_INTERVAL_SECONDS = 3 * ONE_HOUR_SECONDS
""" """
Phone home stats are sent every 3 hours Phone home stats are sent every 3 hours
""" """
@@ -218,13 +222,13 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
# table will decrease # table will decrease
clock.looping_call( clock.looping_call(
hs.get_datastores().main.generate_user_daily_visits, hs.get_datastores().main.generate_user_daily_visits,
Duration(minutes=5), 5 * ONE_MINUTE_SECONDS * MILLISECONDS_PER_SECOND,
) )
# monthly active user limiting functionality # monthly active user limiting functionality
clock.looping_call( clock.looping_call(
hs.get_datastores().main.reap_monthly_active_users, hs.get_datastores().main.reap_monthly_active_users,
Duration(hours=1), ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND,
) )
hs.get_datastores().main.reap_monthly_active_users() hs.get_datastores().main.reap_monthly_active_users()
@@ -263,14 +267,14 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only: if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only:
generate_monthly_active_users() generate_monthly_active_users()
clock.looping_call(generate_monthly_active_users, Duration(minutes=5)) clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
# End of monthly active user settings # End of monthly active user settings
if hs.config.metrics.report_stats: if hs.config.metrics.report_stats:
logger.info("Scheduling stats reporting for 3 hour intervals") logger.info("Scheduling stats reporting for 3 hour intervals")
clock.looping_call( clock.looping_call(
phone_stats_home, phone_stats_home,
PHONE_HOME_INTERVAL, PHONE_HOME_INTERVAL_SECONDS * MILLISECONDS_PER_SECOND,
hs, hs,
stats, stats,
) )
@@ -278,14 +282,14 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
# We need to defer this init for the cases that we daemonize # We need to defer this init for the cases that we daemonize
# otherwise the process ID we get is that of the non-daemon process # otherwise the process ID we get is that of the non-daemon process
clock.call_later( clock.call_later(
Duration(seconds=0), 0,
performance_stats_init, performance_stats_init,
) )
# We wait 5 minutes to send the first set of stats as the server can # We wait 5 minutes to send the first set of stats as the server can
# be quite busy the first few minutes # be quite busy the first few minutes
clock.call_later( clock.call_later(
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME, INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS,
phone_stats_home, phone_stats_home,
hs, hs,
stats, stats,

View File

@@ -77,7 +77,6 @@ from synapse.logging.context import run_in_background
from synapse.storage.databases.main import DataStore from synapse.storage.databases.main import DataStore
from synapse.types import DeviceListUpdates, JsonMapping from synapse.types import DeviceListUpdates, JsonMapping
from synapse.util.clock import Clock, DelayedCallWrapper from synapse.util.clock import Clock, DelayedCallWrapper
from synapse.util.duration import Duration
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.server import HomeServer from synapse.server import HomeServer
@@ -505,8 +504,8 @@ class _Recoverer:
self.scheduled_recovery: DelayedCallWrapper | None = None self.scheduled_recovery: DelayedCallWrapper | None = None
def recover(self) -> None: def recover(self) -> None:
delay = Duration(seconds=2**self.backoff_counter) delay = 2**self.backoff_counter
logger.info("Scheduling retries on %s in %fs", self.service.id, delay.as_secs()) logger.info("Scheduling retries on %s in %fs", self.service.id, delay)
self.scheduled_recovery = self.clock.call_later( self.scheduled_recovery = self.clock.call_later(
delay, delay,
self.hs.run_as_background_process, self.hs.run_as_background_process,

View File

@@ -672,8 +672,7 @@ class RootConfig:
action="append", action="append",
metavar="CONFIG_FILE", metavar="CONFIG_FILE",
help="Specify config file. Can be given multiple times and" help="Specify config file. Can be given multiple times and"
" may specify directories containing *.yaml files." " may specify directories containing *.yaml files.",
" Top-level keys in later files overwrite ones in earlier files.",
) )
parser.add_argument( parser.add_argument(
"--no-secrets-in-config", "--no-secrets-in-config",

View File

@@ -3,7 +3,6 @@
# #
# Copyright 2021 The Matrix.org Foundation C.I.C. # Copyright 2021 The Matrix.org Foundation C.I.C.
# Copyright (C) 2023 New Vector, Ltd # Copyright (C) 2023 New Vector, Ltd
# Copyright (C) 2025 Element Creations Ltd
# #
# This program is free software: you can redistribute it and/or modify # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as # it under the terms of the GNU Affero General Public License as
@@ -439,9 +438,6 @@ class ExperimentalConfig(Config):
# previously calculated push actions. # previously calculated push actions.
self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False) self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False)
# MSC2666: Query mutual rooms between two users.
self.msc2666_enabled: bool = experimental.get("msc2666_enabled", False)
# MSC2815 (allow room moderators to view redacted event content) # MSC2815 (allow room moderators to view redacted event content)
self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False) self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)
@@ -534,9 +530,6 @@ class ExperimentalConfig(Config):
"msc4108_delegation_endpoint", None "msc4108_delegation_endpoint", None
) )
# MSC4370: Get extremities federation endpoint
self.msc4370_enabled = experimental.get("msc4370_enabled", False)
auth_delegated = self.msc3861.enabled or ( auth_delegated = self.msc3861.enabled or (
config.get("matrix_authentication_service") or {} config.get("matrix_authentication_service") or {}
).get("enabled", False) ).get("enabled", False)
@@ -600,6 +593,3 @@ class ExperimentalConfig(Config):
# MSC4306: Thread Subscriptions # MSC4306: Thread Subscriptions
# (and MSC4308: Thread Subscriptions extension to Sliding Sync) # (and MSC4308: Thread Subscriptions extension to Sliding Sync)
self.msc4306_enabled: bool = experimental.get("msc4306_enabled", False) self.msc4306_enabled: bool = experimental.get("msc4306_enabled", False)
# MSC4380: Invite blocking
self.msc4380_enabled: bool = experimental.get("msc4380_enabled", False)

View File

@@ -21,7 +21,6 @@
import abc import abc
import logging import logging
from contextlib import ExitStack
from typing import TYPE_CHECKING, Callable, Iterable from typing import TYPE_CHECKING, Callable, Iterable
import attr import attr
@@ -151,40 +150,23 @@ class Keyring:
""" """
def __init__( def __init__(
self, self, hs: "HomeServer", key_fetchers: "Iterable[KeyFetcher] | None" = None
hs: "HomeServer",
test_only_key_fetchers: "list[KeyFetcher] | None" = None,
): ):
"""
Args:
hs: The HomeServer instance
test_only_key_fetchers: Dependency injection for tests only. If provided,
these key fetchers will be used instead of the default ones.
"""
# Clean-up to avoid partial initialization leaving behind references.
with ExitStack() as exit:
self.server_name = hs.hostname self.server_name = hs.hostname
self._key_fetchers: list[KeyFetcher] = [] if key_fetchers is None:
if test_only_key_fetchers is None:
# Always fetch keys from the database. # Always fetch keys from the database.
store_key_fetcher = StoreKeyFetcher(hs) mutable_key_fetchers: list[KeyFetcher] = [StoreKeyFetcher(hs)]
exit.callback(store_key_fetcher.shutdown)
self._key_fetchers.append(store_key_fetcher)
# Fetch keys from configured trusted key servers, if any exist. # Fetch keys from configured trusted key servers, if any exist.
key_servers = hs.config.key.key_servers key_servers = hs.config.key.key_servers
if key_servers: if key_servers:
perspectives_key_fetcher = PerspectivesKeyFetcher(hs) mutable_key_fetchers.append(PerspectivesKeyFetcher(hs))
exit.callback(perspectives_key_fetcher.shutdown)
self._key_fetchers.append(perspectives_key_fetcher)
# Finally, fetch keys from the origin server directly. # Finally, fetch keys from the origin server directly.
server_key_fetcher = ServerKeyFetcher(hs) mutable_key_fetchers.append(ServerKeyFetcher(hs))
exit.callback(server_key_fetcher.shutdown)
self._key_fetchers.append(server_key_fetcher) self._key_fetchers: Iterable[KeyFetcher] = tuple(mutable_key_fetchers)
else: else:
self._key_fetchers = test_only_key_fetchers self._key_fetchers = key_fetchers
self._fetch_keys_queue: BatchingQueue[ self._fetch_keys_queue: BatchingQueue[
_FetchKeyRequest, dict[str, dict[str, FetchKeyResult]] _FetchKeyRequest, dict[str, dict[str, FetchKeyResult]]
@@ -195,7 +177,6 @@ class Keyring:
# The method called to fetch each key # The method called to fetch each key
process_batch_callback=self._inner_fetch_key_requests, process_batch_callback=self._inner_fetch_key_requests,
) )
exit.callback(self._fetch_keys_queue.shutdown)
self._is_mine_server_name = hs.is_mine_server_name self._is_mine_server_name = hs.is_mine_server_name
@@ -213,19 +194,13 @@ class Keyring:
valid_until_ts=2**63, # fake future timestamp valid_until_ts=2**63, # fake future timestamp
) )
# We reached the end of the block which means everything was successful, so
# no exit handlers are needed (remove them all).
exit.pop_all()
def shutdown(self) -> None: def shutdown(self) -> None:
""" """
Prepares the KeyRing for garbage collection by shutting down it's queues. Prepares the KeyRing for garbage collection by shutting down it's queues.
""" """
self._fetch_keys_queue.shutdown() self._fetch_keys_queue.shutdown()
for key_fetcher in self._key_fetchers: for key_fetcher in self._key_fetchers:
key_fetcher.shutdown() key_fetcher.shutdown()
self._key_fetchers.clear()
async def verify_json_for_server( async def verify_json_for_server(
self, self,
@@ -546,22 +521,10 @@ class StoreKeyFetcher(KeyFetcher):
"""KeyFetcher impl which fetches keys from our data store""" """KeyFetcher impl which fetches keys from our data store"""
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
# Clean-up to avoid partial initialization leaving behind references.
with ExitStack() as exit:
super().__init__(hs) super().__init__(hs)
# `KeyFetcher` keeps a reference to `hs` which we need to clean up if
# something goes wrong so we can cleanly shutdown the homeserver.
exit.callback(super().shutdown)
# An error can be raised here if someone tried to create a `StoreKeyFetcher`
# before the homeserver is fully set up (`HomeServerNotSetupException:
# HomeServer.setup must be called before getting datastores`).
self.store = hs.get_datastores().main self.store = hs.get_datastores().main
# We reached the end of the block which means everything was successful, so
# no exit handlers are needed (remove them all).
exit.pop_all()
async def _fetch_keys( async def _fetch_keys(
self, keys_to_fetch: list[_FetchKeyRequest] self, keys_to_fetch: list[_FetchKeyRequest]
) -> dict[str, dict[str, FetchKeyResult]]: ) -> dict[str, dict[str, FetchKeyResult]]:
@@ -580,22 +543,10 @@ class StoreKeyFetcher(KeyFetcher):
class BaseV2KeyFetcher(KeyFetcher): class BaseV2KeyFetcher(KeyFetcher):
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
# Clean-up to avoid partial initialization leaving behind references.
with ExitStack() as exit:
super().__init__(hs) super().__init__(hs)
# `KeyFetcher` keeps a reference to `hs` which we need to clean up if
# something goes wrong so we can cleanly shutdown the homeserver.
exit.callback(super().shutdown)
# An error can be raised here if someone tried to create a `StoreKeyFetcher`
# before the homeserver is fully set up (`HomeServerNotSetupException:
# HomeServer.setup must be called before getting datastores`).
self.store = hs.get_datastores().main self.store = hs.get_datastores().main
# We reached the end of the block which means everything was successful, so
# no exit handlers are needed (remove them all).
exit.pop_all()
async def process_v2_response( async def process_v2_response(
self, from_server: str, response_json: JsonDict, time_added_ms: int self, from_server: str, response_json: JsonDict, time_added_ms: int
) -> dict[str, FetchKeyResult]: ) -> dict[str, FetchKeyResult]:

View File

@@ -548,7 +548,7 @@ class FrozenEventV4(FrozenEventV3):
assert create_event_id not in self._dict["auth_events"] assert create_event_id not in self._dict["auth_events"]
if self.type == EventTypes.Create and self.get_state_key() == "": if self.type == EventTypes.Create and self.get_state_key() == "":
return self._dict["auth_events"] # should be [] return self._dict["auth_events"] # should be []
return [*self._dict["auth_events"], create_event_id] return self._dict["auth_events"] + [create_event_id]
def _event_type_from_format_version( def _event_type_from_format_version(

View File

@@ -75,7 +75,6 @@ from synapse.types import JsonDict, StrCollection, UserID, get_domain_from_id
from synapse.types.handlers.policy_server import RECOMMENDATION_OK, RECOMMENDATION_SPAM from synapse.types.handlers.policy_server import RECOMMENDATION_OK, RECOMMENDATION_SPAM
from synapse.util.async_helpers import concurrently_execute from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.duration import Duration
from synapse.util.retryutils import NotRetryingDestination from synapse.util.retryutils import NotRetryingDestination
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -133,7 +132,7 @@ class FederationClient(FederationBase):
super().__init__(hs) super().__init__(hs)
self.pdu_destination_tried: dict[str, dict[str, int]] = {} self.pdu_destination_tried: dict[str, dict[str, int]] = {}
self._clock.looping_call(self._clear_tried_cache, Duration(minutes=1)) self._clock.looping_call(self._clear_tried_cache, 60 * 1000)
self.state = hs.get_state_handler() self.state = hs.get_state_handler()
self.transport_layer = hs.get_federation_transport_client() self.transport_layer = hs.get_federation_transport_client()

View File

@@ -4,7 +4,6 @@
# Copyright 2019-2021 Matrix.org Federation C.I.C # Copyright 2019-2021 Matrix.org Federation C.I.C
# Copyright 2015, 2016 OpenMarket Ltd # Copyright 2015, 2016 OpenMarket Ltd
# Copyright (C) 2023 New Vector, Ltd # Copyright (C) 2023 New Vector, Ltd
# Copyright (C) 2025 Element Creations Ltd
# #
# This program is free software: you can redistribute it and/or modify # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as # it under the terms of the GNU Affero General Public License as
@@ -90,7 +89,6 @@ from synapse.types import JsonDict, StateMap, UserID, get_domain_from_id
from synapse.util import unwrapFirstError from synapse.util import unwrapFirstError
from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
from synapse.util.caches.response_cache import ResponseCache from synapse.util.caches.response_cache import ResponseCache
from synapse.util.duration import Duration
from synapse.util.stringutils import parse_server_name from synapse.util.stringutils import parse_server_name
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -228,7 +226,7 @@ class FederationServer(FederationBase):
) )
# We pause a bit so that we don't start handling all rooms at once. # We pause a bit so that we don't start handling all rooms at once.
await self._clock.sleep(Duration(seconds=random.uniform(0, 0.1))) await self._clock.sleep(random.uniform(0, 0.1))
async def on_backfill_request( async def on_backfill_request(
self, origin: str, room_id: str, versions: list[str], limit: int self, origin: str, room_id: str, versions: list[str], limit: int
@@ -303,9 +301,7 @@ class FederationServer(FederationBase):
# Start a periodic check for old staged events. This is to handle # Start a periodic check for old staged events. This is to handle
# the case where locks time out, e.g. if another process gets killed # the case where locks time out, e.g. if another process gets killed
# without dropping its locks. # without dropping its locks.
self._clock.looping_call( self._clock.looping_call(self._handle_old_staged_events, 60 * 1000)
self._handle_old_staged_events, Duration(minutes=1)
)
# keep this as early as possible to make the calculated origin ts as # keep this as early as possible to make the calculated origin ts as
# accurate as possible. # accurate as possible.
@@ -684,16 +680,6 @@ class FederationServer(FederationBase):
resp = await self.registry.on_query(query_type, args) resp = await self.registry.on_query(query_type, args)
return 200, resp return 200, resp
async def on_get_extremities_request(self, origin: str, room_id: str) -> JsonDict:
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
await self._event_auth_handler.assert_host_in_room(room_id, origin)
extremities = await self.store.get_forward_extremities_for_room(room_id)
prev_event_ids = [e[0] for e in extremities]
return {"prev_events": prev_event_ids}
async def on_make_join_request( async def on_make_join_request(
self, origin: str, room_id: str, user_id: str, supported_versions: list[str] self, origin: str, room_id: str, user_id: str, supported_versions: list[str]
) -> dict[str, Any]: ) -> dict[str, Any]:

View File

@@ -53,7 +53,6 @@ from synapse.federation.sender import AbstractFederationSender, FederationSender
from synapse.metrics import SERVER_NAME_LABEL, LaterGauge from synapse.metrics import SERVER_NAME_LABEL, LaterGauge
from synapse.replication.tcp.streams.federation import FederationStream from synapse.replication.tcp.streams.federation import FederationStream
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection
from synapse.util.duration import Duration
from synapse.util.metrics import Measure from synapse.util.metrics import Measure
from .units import Edu from .units import Edu
@@ -138,7 +137,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
assert isinstance(queue, Sized) assert isinstance(queue, Sized)
register(queue_name, queue=queue) register(queue_name, queue=queue)
self.clock.looping_call(self._clear_queue, Duration(seconds=30)) self.clock.looping_call(self._clear_queue, 30 * 1000)
def shutdown(self) -> None: def shutdown(self) -> None:
"""Stops this federation sender instance from sending further transactions.""" """Stops this federation sender instance from sending further transactions."""

View File

@@ -174,7 +174,6 @@ from synapse.types import (
get_domain_from_id, get_domain_from_id,
) )
from synapse.util.clock import Clock from synapse.util.clock import Clock
from synapse.util.duration import Duration
from synapse.util.metrics import Measure from synapse.util.metrics import Measure
from synapse.util.retryutils import filter_destinations_by_retry_limiter from synapse.util.retryutils import filter_destinations_by_retry_limiter
@@ -219,12 +218,12 @@ transaction_queue_pending_edus_gauge = LaterGauge(
# Please note that rate limiting still applies, so while the loop is # Please note that rate limiting still applies, so while the loop is
# executed every X seconds the destinations may not be woken up because # executed every X seconds the destinations may not be woken up because
# they are being rate limited following previous attempt failures. # they are being rate limited following previous attempt failures.
WAKEUP_RETRY_PERIOD = Duration(minutes=1) WAKEUP_RETRY_PERIOD_SEC = 60
# Time to wait in between waking up each destination, i.e. one destination # Time (in s) to wait in between waking up each destination, i.e. one destination
# will be woken up every <x> seconds until we have woken every destination # will be woken up every <x> seconds until we have woken every destination
# has outstanding catch-up. # has outstanding catch-up.
WAKEUP_INTERVAL_BETWEEN_DESTINATIONS = Duration(seconds=5) WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC = 5
class AbstractFederationSender(metaclass=abc.ABCMeta): class AbstractFederationSender(metaclass=abc.ABCMeta):
@@ -380,7 +379,7 @@ class _DestinationWakeupQueue:
queue.attempt_new_transaction() queue.attempt_new_transaction()
await self.clock.sleep(Duration(seconds=current_sleep_seconds)) await self.clock.sleep(current_sleep_seconds)
if not self.queue: if not self.queue:
break break
@@ -469,7 +468,7 @@ class FederationSender(AbstractFederationSender):
# Regularly wake up destinations that have outstanding PDUs to be caught up # Regularly wake up destinations that have outstanding PDUs to be caught up
self.clock.looping_call_now( self.clock.looping_call_now(
self.hs.run_as_background_process, self.hs.run_as_background_process,
WAKEUP_RETRY_PERIOD, WAKEUP_RETRY_PERIOD_SEC * 1000.0,
"wake_destinations_needing_catchup", "wake_destinations_needing_catchup",
self._wake_destinations_needing_catchup, self._wake_destinations_needing_catchup,
) )
@@ -1162,4 +1161,4 @@ class FederationSender(AbstractFederationSender):
last_processed, last_processed,
) )
self.wake_destination(destination) self.wake_destination(destination)
await self.clock.sleep(WAKEUP_INTERVAL_BETWEEN_DESTINATIONS) await self.clock.sleep(WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC)

View File

@@ -4,7 +4,6 @@
# Copyright 2020 Sorunome # Copyright 2020 Sorunome
# Copyright 2014-2021 The Matrix.org Foundation C.I.C. # Copyright 2014-2021 The Matrix.org Foundation C.I.C.
# Copyright (C) 2023 New Vector, Ltd # Copyright (C) 2023 New Vector, Ltd
# Copyright (C) 2025 Element Creations Ltd
# #
# This program is free software: you can redistribute it and/or modify # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as # it under the terms of the GNU Affero General Public License as
@@ -34,7 +33,6 @@ from synapse.federation.transport.server.federation import (
FederationMediaDownloadServlet, FederationMediaDownloadServlet,
FederationMediaThumbnailServlet, FederationMediaThumbnailServlet,
FederationUnstableClientKeysClaimServlet, FederationUnstableClientKeysClaimServlet,
FederationUnstableGetExtremitiesServlet,
) )
from synapse.http.server import HttpServer, JsonResource from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import ( from synapse.http.servlet import (
@@ -328,12 +326,6 @@ def register_servlets(
if not hs.config.media.can_load_media_repo: if not hs.config.media.can_load_media_repo:
continue continue
if (
servletclass == FederationUnstableGetExtremitiesServlet
and not hs.config.experimental.msc4370_enabled
):
continue
servletclass( servletclass(
hs=hs, hs=hs,
authenticator=authenticator, authenticator=authenticator,

View File

@@ -3,7 +3,6 @@
# #
# Copyright 2021 The Matrix.org Foundation C.I.C. # Copyright 2021 The Matrix.org Foundation C.I.C.
# Copyright (C) 2023 New Vector, Ltd # Copyright (C) 2023 New Vector, Ltd
# Copyright (C) 2025 Element Creations Ltd
# #
# This program is free software: you can redistribute it and/or modify # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as # it under the terms of the GNU Affero General Public License as
@@ -274,22 +273,6 @@ class FederationQueryServlet(BaseFederationServerServlet):
return await self.handler.on_query_request(query_type, args) return await self.handler.on_query_request(query_type, args)
class FederationUnstableGetExtremitiesServlet(BaseFederationServerServlet):
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc4370"
PATH = "/extremities/(?P<room_id>[^/]*)"
CATEGORY = "Federation requests"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: dict[bytes, list[bytes]],
room_id: str,
) -> tuple[int, JsonDict]:
result = await self.handler.on_get_extremities_request(origin, room_id)
return 200, result
class FederationMakeJoinServlet(BaseFederationServerServlet): class FederationMakeJoinServlet(BaseFederationServerServlet):
PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)" PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
CATEGORY = "Federation requests" CATEGORY = "Federation requests"
@@ -901,7 +884,6 @@ FEDERATION_SERVLET_CLASSES: tuple[type[BaseFederationServlet], ...] = (
FederationBackfillServlet, FederationBackfillServlet,
FederationTimestampLookupServlet, FederationTimestampLookupServlet,
FederationQueryServlet, FederationQueryServlet,
FederationUnstableGetExtremitiesServlet,
FederationMakeJoinServlet, FederationMakeJoinServlet,
FederationMakeLeaveServlet, FederationMakeLeaveServlet,
FederationEventServlet, FederationEventServlet,

View File

@@ -28,7 +28,6 @@ from synapse.metrics.background_process_metrics import wrap_as_background_proces
from synapse.types import UserID from synapse.types import UserID
from synapse.util import stringutils from synapse.util import stringutils
from synapse.util.async_helpers import delay_cancellation from synapse.util.async_helpers import delay_cancellation
from synapse.util.duration import Duration
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.server import HomeServer from synapse.server import HomeServer
@@ -74,7 +73,7 @@ class AccountValidityHandler:
# Check the renewal emails to send and send them every 30min. # Check the renewal emails to send and send them every 30min.
if hs.config.worker.run_background_tasks: if hs.config.worker.run_background_tasks:
self.clock.looping_call(self._send_renewal_emails, Duration(minutes=30)) self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
async def is_user_expired(self, user_id: str) -> bool: async def is_user_expired(self, user_id: str) -> bool:
"""Checks if a user has expired against third-party modules. """Checks if a user has expired against third-party modules.

View File

@@ -74,7 +74,6 @@ from synapse.storage.databases.main.registration import (
from synapse.types import JsonDict, Requester, StrCollection, UserID from synapse.types import JsonDict, Requester, StrCollection, UserID
from synapse.util import stringutils as stringutils from synapse.util import stringutils as stringutils
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
from synapse.util.duration import Duration
from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.stringutils import base62_encode from synapse.util.stringutils import base62_encode
from synapse.util.threepids import canonicalise_email from synapse.util.threepids import canonicalise_email
@@ -243,7 +242,7 @@ class AuthHandler:
if hs.config.worker.run_background_tasks: if hs.config.worker.run_background_tasks:
self._clock.looping_call( self._clock.looping_call(
run_as_background_process, run_as_background_process,
Duration(minutes=5), 5 * 60 * 1000,
"expire_old_sessions", "expire_old_sessions",
self.server_name, self.server_name,
self._expire_old_sessions, self._expire_old_sessions,

View File

@@ -13,7 +13,7 @@
# #
import logging import logging
from typing import TYPE_CHECKING, Optional from typing import TYPE_CHECKING
from twisted.internet.interfaces import IDelayedCall from twisted.internet.interfaces import IDelayedCall
@@ -42,7 +42,6 @@ from synapse.types import (
UserID, UserID,
create_requester, create_requester,
) )
from synapse.util.duration import Duration
from synapse.util.events import generate_fake_event_id from synapse.util.events import generate_fake_event_id
from synapse.util.metrics import Measure from synapse.util.metrics import Measure
from synapse.util.sentinel import Sentinel from synapse.util.sentinel import Sentinel
@@ -74,7 +73,7 @@ class DelayedEventsHandler:
cfg=self._config.ratelimiting.rc_delayed_event_mgmt, cfg=self._config.ratelimiting.rc_delayed_event_mgmt,
) )
self._next_delayed_event_call: Optional[IDelayedCall] = None self._next_delayed_event_call: IDelayedCall | None = None
# The current position in the current_state_delta stream # The current position in the current_state_delta stream
self._event_pos: int | None = None self._event_pos: int | None = None
@@ -93,22 +92,20 @@ class DelayedEventsHandler:
# Kick off again (without blocking) to catch any missed notifications # Kick off again (without blocking) to catch any missed notifications
# that may have fired before the callback was added. # that may have fired before the callback was added.
self._clock.call_later( self._clock.call_later(
Duration(seconds=0), 0,
self.notify_new_event, self.notify_new_event,
) )
# Now process any delayed events that are due to be sent. # Delayed events that are already marked as processed on startup might not have been
# # sent properly on the last run of the server, so unmark them to send them again.
# We set `reprocess_events` to True in case any events had been
# marked as processed, but had not yet actually been sent,
# before the homeserver stopped.
#
# Caveat: this will double-send delayed events that successfully persisted, but failed # Caveat: this will double-send delayed events that successfully persisted, but failed
# to be removed from the DB table of delayed events. # to be removed from the DB table of delayed events.
# TODO: To avoid double-sending, scan the timeline to find which of these events were # TODO: To avoid double-sending, scan the timeline to find which of these events were
# already sent. To do so, must store delay_ids in sent events to retrieve them later. # already sent. To do so, must store delay_ids in sent events to retrieve them later.
await self._store.unprocess_delayed_events()
events, next_send_ts = await self._store.process_timeout_delayed_events( events, next_send_ts = await self._store.process_timeout_delayed_events(
self._get_current_ts(), reprocess_events=True self._get_current_ts()
) )
if next_send_ts: if next_send_ts:
@@ -426,21 +423,16 @@ class DelayedEventsHandler:
Raises: Raises:
NotFoundError: if no matching delayed event could be found. NotFoundError: if no matching delayed event could be found.
""" """
assert self._is_master
await self._delayed_event_mgmt_ratelimiter.ratelimit( await self._delayed_event_mgmt_ratelimiter.ratelimit(
None, request.getClientAddress().host None, request.getClientAddress().host
) )
await make_deferred_yieldable(self._initialized_from_db)
# Note: We don't need to wait on `self._initialized_from_db` here as the
# events that deals with are already marked as processed.
#
# `restart_delayed_events` will skip over such events entirely.
next_send_ts = await self._store.restart_delayed_event( next_send_ts = await self._store.restart_delayed_event(
delay_id, self._get_current_ts() delay_id, self._get_current_ts()
) )
# Only the main process handles sending delayed events.
if self._is_master:
if self._next_send_ts_changed(next_send_ts): if self._next_send_ts_changed(next_send_ts):
self._schedule_next_at(next_send_ts) self._schedule_next_at(next_send_ts)
@@ -509,17 +501,17 @@ class DelayedEventsHandler:
def _schedule_next_at(self, next_send_ts: Timestamp) -> None: def _schedule_next_at(self, next_send_ts: Timestamp) -> None:
delay = next_send_ts - self._get_current_ts() delay = next_send_ts - self._get_current_ts()
delay_duration = Duration(milliseconds=max(delay, 0)) delay_sec = delay / 1000 if delay > 0 else 0
if self._next_delayed_event_call is None: if self._next_delayed_event_call is None:
self._next_delayed_event_call = self._clock.call_later( self._next_delayed_event_call = self._clock.call_later(
delay_duration, delay_sec,
self.hs.run_as_background_process, self.hs.run_as_background_process,
"_send_on_timeout", "_send_on_timeout",
self._send_on_timeout, self._send_on_timeout,
) )
else: else:
self._next_delayed_event_call.reset(delay_duration.as_secs()) self._next_delayed_event_call.reset(delay_sec)
async def get_all_for_user(self, requester: Requester) -> list[JsonDict]: async def get_all_for_user(self, requester: Requester) -> list[JsonDict]:
"""Return all pending delayed events requested by the given user.""" """Return all pending delayed events requested by the given user."""

View File

@@ -71,7 +71,6 @@ from synapse.util import stringutils
from synapse.util.async_helpers import Linearizer from synapse.util.async_helpers import Linearizer
from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.cancellation import cancellable from synapse.util.cancellation import cancellable
from synapse.util.duration import Duration
from synapse.util.metrics import measure_func from synapse.util.metrics import measure_func
from synapse.util.retryutils import ( from synapse.util.retryutils import (
NotRetryingDestination, NotRetryingDestination,
@@ -86,7 +85,7 @@ logger = logging.getLogger(__name__)
DELETE_DEVICE_MSGS_TASK_NAME = "delete_device_messages" DELETE_DEVICE_MSGS_TASK_NAME = "delete_device_messages"
MAX_DEVICE_DISPLAY_NAME_LEN = 100 MAX_DEVICE_DISPLAY_NAME_LEN = 100
DELETE_STALE_DEVICES_INTERVAL = Duration(days=1) DELETE_STALE_DEVICES_INTERVAL_MS = 24 * 60 * 60 * 1000
def _check_device_name_length(name: str | None) -> None: def _check_device_name_length(name: str | None) -> None:
@@ -187,7 +186,7 @@ class DeviceHandler:
): ):
self.clock.looping_call( self.clock.looping_call(
self.hs.run_as_background_process, self.hs.run_as_background_process,
DELETE_STALE_DEVICES_INTERVAL, DELETE_STALE_DEVICES_INTERVAL_MS,
desc="delete_stale_devices", desc="delete_stale_devices",
func=self._delete_stale_devices, func=self._delete_stale_devices,
) )
@@ -916,7 +915,7 @@ class DeviceHandler:
) )
DEVICE_MSGS_DELETE_BATCH_LIMIT = 1000 DEVICE_MSGS_DELETE_BATCH_LIMIT = 1000
DEVICE_MSGS_DELETE_SLEEP = Duration(milliseconds=100) DEVICE_MSGS_DELETE_SLEEP_MS = 100
async def _delete_device_messages( async def _delete_device_messages(
self, self,
@@ -942,7 +941,9 @@ class DeviceHandler:
if from_stream_id is None: if from_stream_id is None:
return TaskStatus.COMPLETE, None, None return TaskStatus.COMPLETE, None, None
await self.clock.sleep(DeviceWriterHandler.DEVICE_MSGS_DELETE_SLEEP) await self.clock.sleep(
DeviceWriterHandler.DEVICE_MSGS_DELETE_SLEEP_MS / 1000.0
)
class DeviceWriterHandler(DeviceHandler): class DeviceWriterHandler(DeviceHandler):
@@ -1468,7 +1469,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
self._resync_retry_lock = Lock() self._resync_retry_lock = Lock()
self.clock.looping_call( self.clock.looping_call(
self.hs.run_as_background_process, self.hs.run_as_background_process,
Duration(seconds=30), 30 * 1000,
func=self._maybe_retry_device_resync, func=self._maybe_retry_device_resync,
desc="_maybe_retry_device_resync", desc="_maybe_retry_device_resync",
) )

View File

@@ -46,7 +46,6 @@ from synapse.types import (
) )
from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.cancellation import cancellable from synapse.util.cancellation import cancellable
from synapse.util.duration import Duration
from synapse.util.json import json_decoder from synapse.util.json import json_decoder
from synapse.util.retryutils import ( from synapse.util.retryutils import (
NotRetryingDestination, NotRetryingDestination,
@@ -1635,7 +1634,7 @@ class E2eKeysHandler:
# matrix.org has about 15M users in the e2e_one_time_keys_json table # matrix.org has about 15M users in the e2e_one_time_keys_json table
# (comprising 20M devices). We want this to take about a week, so we need # (comprising 20M devices). We want this to take about a week, so we need
# to do about one batch of 100 users every 4 seconds. # to do about one batch of 100 users every 4 seconds.
await self.clock.sleep(Duration(seconds=4)) await self.clock.sleep(4)
def _check_cross_signing_key( def _check_cross_signing_key(

View File

@@ -72,7 +72,6 @@ from synapse.storage.invite_rule import InviteRule
from synapse.types import JsonDict, StrCollection, get_domain_from_id from synapse.types import JsonDict, StrCollection, get_domain_from_id
from synapse.types.state import StateFilter from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer from synapse.util.async_helpers import Linearizer
from synapse.util.duration import Duration
from synapse.util.retryutils import NotRetryingDestination from synapse.util.retryutils import NotRetryingDestination
from synapse.visibility import filter_events_for_server from synapse.visibility import filter_events_for_server
@@ -1973,9 +1972,7 @@ class FederationHandler:
logger.warning( logger.warning(
"%s; waiting for %d ms...", e, e.retry_after_ms "%s; waiting for %d ms...", e, e.retry_after_ms
) )
await self.clock.sleep( await self.clock.sleep(e.retry_after_ms / 1000)
Duration(milliseconds=e.retry_after_ms)
)
# Success, no need to try the rest of the destinations. # Success, no need to try the rest of the destinations.
break break

View File

@@ -91,7 +91,6 @@ from synapse.types import (
) )
from synapse.types.state import StateFilter from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.duration import Duration
from synapse.util.iterutils import batch_iter, partition, sorted_topologically from synapse.util.iterutils import batch_iter, partition, sorted_topologically
from synapse.util.retryutils import NotRetryingDestination from synapse.util.retryutils import NotRetryingDestination
from synapse.util.stringutils import shortstr from synapse.util.stringutils import shortstr
@@ -1803,7 +1802,7 @@ class FederationEventHandler:
# the reactor. For large rooms let's yield to the reactor # the reactor. For large rooms let's yield to the reactor
# occasionally to ensure we don't block other work. # occasionally to ensure we don't block other work.
if (i + 1) % 1000 == 0: if (i + 1) % 1000 == 0:
await self._clock.sleep(Duration(seconds=0)) await self._clock.sleep(0)
# Also persist the new event in batches for similar reasons as above. # Also persist the new event in batches for similar reasons as above.
for batch in batch_iter(events_and_contexts_to_persist, 1000): for batch in batch_iter(events_and_contexts_to_persist, 1000):

View File

@@ -22,7 +22,7 @@
import logging import logging
import random import random
from http import HTTPStatus from http import HTTPStatus
from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence from typing import TYPE_CHECKING, Any, Mapping, Sequence
from canonicaljson import encode_canonical_json from canonicaljson import encode_canonical_json
@@ -83,7 +83,6 @@ from synapse.types.state import StateFilter
from synapse.util import log_failure, unwrapFirstError from synapse.util import log_failure, unwrapFirstError
from synapse.util.async_helpers import Linearizer, gather_results from synapse.util.async_helpers import Linearizer, gather_results
from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.duration import Duration
from synapse.util.json import json_decoder, json_encoder from synapse.util.json import json_decoder, json_encoder
from synapse.util.metrics import measure_func from synapse.util.metrics import measure_func
from synapse.visibility import get_effective_room_visibility_from_state from synapse.visibility import get_effective_room_visibility_from_state
@@ -111,7 +110,7 @@ class MessageHandler:
# The scheduled call to self._expire_event. None if no call is currently # The scheduled call to self._expire_event. None if no call is currently
# scheduled. # scheduled.
self._scheduled_expiry: Optional[IDelayedCall] = None self._scheduled_expiry: IDelayedCall | None = None
if not hs.config.worker.worker_app: if not hs.config.worker.worker_app:
self.hs.run_as_background_process( self.hs.run_as_background_process(
@@ -434,11 +433,14 @@ class MessageHandler:
# Figure out how many seconds we need to wait before expiring the event. # Figure out how many seconds we need to wait before expiring the event.
now_ms = self.clock.time_msec() now_ms = self.clock.time_msec()
delay = Duration(milliseconds=max(expiry_ts - now_ms, 0)) delay = (expiry_ts - now_ms) / 1000
logger.info( # callLater doesn't support negative delays, so trim the delay to 0 if we're
"Scheduling expiry for event %s in %.3fs", event_id, delay.as_secs() # in that case.
) if delay < 0:
delay = 0
logger.info("Scheduling expiry for event %s in %.3fs", event_id, delay)
self._scheduled_expiry = self.clock.call_later( self._scheduled_expiry = self.clock.call_later(
delay, delay,
@@ -549,7 +551,7 @@ class EventCreationHandler:
"send_dummy_events_to_fill_extremities", "send_dummy_events_to_fill_extremities",
self._send_dummy_events_to_fill_extremities, self._send_dummy_events_to_fill_extremities,
), ),
Duration(minutes=5), 5 * 60 * 1000,
) )
self._message_handler = hs.get_message_handler() self._message_handler = hs.get_message_handler()
@@ -1010,7 +1012,7 @@ class EventCreationHandler:
if not ignore_shadow_ban and requester.shadow_banned: if not ignore_shadow_ban and requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester. # We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(Duration(seconds=random.randint(1, 10))) await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError() raise ShadowBanError()
room_version = None room_version = None
@@ -1513,7 +1515,7 @@ class EventCreationHandler:
and requester.shadow_banned and requester.shadow_banned
): ):
# We randomly sleep a bit just to annoy the requester. # We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(Duration(seconds=random.randint(1, 10))) await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError() raise ShadowBanError()
if event.is_state(): if event.is_state():
@@ -1955,12 +1957,6 @@ class EventCreationHandler:
room_alias_str = event.content.get("alias", None) room_alias_str = event.content.get("alias", None)
directory_handler = self.hs.get_directory_handler() directory_handler = self.hs.get_directory_handler()
if room_alias_str and room_alias_str != original_alias: if room_alias_str and room_alias_str != original_alias:
if not isinstance(room_alias_str, str):
raise SynapseError(
400,
"The alias must be of type string.",
Codes.INVALID_PARAM,
)
await self._validate_canonical_alias( await self._validate_canonical_alias(
directory_handler, room_alias_str, event.room_id directory_handler, room_alias_str, event.room_id
) )
@@ -1984,12 +1980,6 @@ class EventCreationHandler:
new_alt_aliases = set(alt_aliases) - set(original_alt_aliases) new_alt_aliases = set(alt_aliases) - set(original_alt_aliases)
if new_alt_aliases: if new_alt_aliases:
for alias_str in new_alt_aliases: for alias_str in new_alt_aliases:
if not isinstance(alias_str, str):
raise SynapseError(
400,
"Each alt_alias must be of type string.",
Codes.INVALID_PARAM,
)
await self._validate_canonical_alias( await self._validate_canonical_alias(
directory_handler, alias_str, event.room_id directory_handler, alias_str, event.room_id
) )

View File

@@ -21,31 +21,27 @@
import logging import logging
from typing import TYPE_CHECKING, cast from typing import TYPE_CHECKING, cast
import attr
from twisted.python.failure import Failure from twisted.python.failure import Failure
from synapse.api.constants import Direction, EventTypes, Membership from synapse.api.constants import Direction, EventTypes, Membership
from synapse.api.errors import SynapseError from synapse.api.errors import SynapseError
from synapse.api.filtering import Filter from synapse.api.filtering import Filter
from synapse.events import EventBase from synapse.events.utils import SerializeEventConfig
from synapse.handlers.relations import BundledAggregations
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
from synapse.logging.opentracing import trace from synapse.logging.opentracing import trace
from synapse.rest.admin._base import assert_user_is_admin from synapse.rest.admin._base import assert_user_is_admin
from synapse.streams.config import PaginationConfig from synapse.streams.config import PaginationConfig
from synapse.types import ( from synapse.types import (
JsonDict,
JsonMapping, JsonMapping,
Requester, Requester,
ScheduledTask, ScheduledTask,
StreamKeyType, StreamKeyType,
StreamToken,
TaskStatus, TaskStatus,
) )
from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
from synapse.types.state import StateFilter from synapse.types.state import StateFilter
from synapse.util.async_helpers import ReadWriteLock from synapse.util.async_helpers import ReadWriteLock
from synapse.util.duration import Duration
from synapse.visibility import filter_events_for_client from synapse.visibility import filter_events_for_client
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -73,58 +69,6 @@ PURGE_ROOM_ACTION_NAME = "purge_room"
SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME = "shutdown_and_purge_room" SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME = "shutdown_and_purge_room"
@attr.s(slots=True, frozen=True, auto_attribs=True)
class GetMessagesResult:
"""
Everything needed to serialize a `/messages` response.
"""
messages_chunk: list[EventBase]
"""
A list of room events.
- When the request is `Direction.FORWARDS`, events will be in the range:
`start_token` < x <= `end_token`, (ascending topological_order)
- When the request is `Direction.BACKWARDS`, events will be in the range:
`start_token` >= x > `end_token`, (descending topological_order)
Note that an empty chunk does not necessarily imply that no more events are
available. Clients should continue to paginate until no `end_token` property is returned.
"""
bundled_aggregations: dict[str, BundledAggregations]
"""
A map of event ID to the bundled aggregations for the events in the chunk.
If an event doesn't have any bundled aggregations, it may not appear in the map.
"""
state: list[EventBase] | None
"""
A list of state events relevant to showing the chunk. For example, if
lazy_load_members is enabled in the filter then this may contain the membership
events for the senders of events in the chunk.
Omitted from the response when `None`.
"""
start_token: StreamToken
"""
Token corresponding to the start of chunk. This will be the same as the value given
in `from` query parameter of the `/messages` request.
"""
end_token: StreamToken | None
"""
A token corresponding to the end of chunk. This token can be passed back to this
endpoint to request further events.
If no further events are available (either because we have reached the start of the
timeline, or because the user does not have permission to see any more events), this
property is omitted from the response.
"""
class PaginationHandler: class PaginationHandler:
"""Handles pagination and purge history requests. """Handles pagination and purge history requests.
@@ -172,7 +116,7 @@ class PaginationHandler:
self.clock.looping_call( self.clock.looping_call(
self.hs.run_as_background_process, self.hs.run_as_background_process,
Duration(milliseconds=job.interval), job.interval,
"purge_history_for_rooms_in_range", "purge_history_for_rooms_in_range",
self.purge_history_for_rooms_in_range, self.purge_history_for_rooms_in_range,
job.shortest_max_lifetime, job.shortest_max_lifetime,
@@ -473,7 +417,7 @@ class PaginationHandler:
as_client_event: bool = True, as_client_event: bool = True,
event_filter: Filter | None = None, event_filter: Filter | None = None,
use_admin_priviledge: bool = False, use_admin_priviledge: bool = False,
) -> GetMessagesResult: ) -> JsonDict:
"""Get messages in a room. """Get messages in a room.
Args: Args:
@@ -672,13 +616,10 @@ class PaginationHandler:
# In that case we do not return end, to tell the client # In that case we do not return end, to tell the client
# there is no need for further queries. # there is no need for further queries.
if not events: if not events:
return GetMessagesResult( return {
messages_chunk=[], "chunk": [],
bundled_aggregations={}, "start": await from_token.to_string(self.store),
state=None, }
start_token=from_token,
end_token=None,
)
if event_filter: if event_filter:
events = await event_filter.filter(events) events = await event_filter.filter(events)
@@ -694,13 +635,11 @@ class PaginationHandler:
# if after the filter applied there are no more events # if after the filter applied there are no more events
# return immediately - but there might be more in next_token batch # return immediately - but there might be more in next_token batch
if not events: if not events:
return GetMessagesResult( return {
messages_chunk=[], "chunk": [],
bundled_aggregations={}, "start": await from_token.to_string(self.store),
state=None, "end": await next_token.to_string(self.store),
start_token=from_token, }
end_token=next_token,
)
state = None state = None
if event_filter and event_filter.lazy_load_members and len(events) > 0: if event_filter and event_filter.lazy_load_members and len(events) > 0:
@@ -717,20 +656,38 @@ class PaginationHandler:
if state_ids: if state_ids:
state_dict = await self.store.get_events(list(state_ids.values())) state_dict = await self.store.get_events(list(state_ids.values()))
state = list(state_dict.values()) state = state_dict.values()
aggregations = await self._relations_handler.get_bundled_aggregations( aggregations = await self._relations_handler.get_bundled_aggregations(
events, user_id events, user_id
) )
return GetMessagesResult( time_now = self.clock.time_msec()
messages_chunk=events,
bundled_aggregations=aggregations, serialize_options = SerializeEventConfig(
state=state, as_client_event=as_client_event, requester=requester
start_token=from_token,
end_token=next_token,
) )
chunk = {
"chunk": (
await self._event_serializer.serialize_events(
events,
time_now,
config=serialize_options,
bundle_aggregations=aggregations,
)
),
"start": await from_token.to_string(self.store),
"end": await next_token.to_string(self.store),
}
if state:
chunk["state"] = await self._event_serializer.serialize_events(
state, time_now, config=serialize_options
)
return chunk
async def _shutdown_and_purge_room( async def _shutdown_and_purge_room(
self, self,
task: ScheduledTask, task: ScheduledTask,

View File

@@ -121,7 +121,6 @@ from synapse.types import (
get_domain_from_id, get_domain_from_id,
) )
from synapse.util.async_helpers import Linearizer from synapse.util.async_helpers import Linearizer
from synapse.util.duration import Duration
from synapse.util.metrics import Measure from synapse.util.metrics import Measure
from synapse.util.wheel_timer import WheelTimer from synapse.util.wheel_timer import WheelTimer
@@ -204,7 +203,7 @@ EXTERNAL_PROCESS_EXPIRY = 5 * 60 * 1000
# Delay before a worker tells the presence handler that a user has stopped # Delay before a worker tells the presence handler that a user has stopped
# syncing. # syncing.
UPDATE_SYNCING_USERS = Duration(seconds=10) UPDATE_SYNCING_USERS_MS = 10 * 1000
assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER
@@ -529,7 +528,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs)
self._set_state_client = ReplicationPresenceSetState.make_client(hs) self._set_state_client = ReplicationPresenceSetState.make_client(hs)
self.clock.looping_call(self.send_stop_syncing, UPDATE_SYNCING_USERS) self.clock.looping_call(self.send_stop_syncing, UPDATE_SYNCING_USERS_MS)
hs.register_async_shutdown_handler( hs.register_async_shutdown_handler(
phase="before", phase="before",
@@ -582,7 +581,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
for (user_id, device_id), last_sync_ms in list( for (user_id, device_id), last_sync_ms in list(
self._user_devices_going_offline.items() self._user_devices_going_offline.items()
): ):
if now - last_sync_ms > UPDATE_SYNCING_USERS.as_millis(): if now - last_sync_ms > UPDATE_SYNCING_USERS_MS:
self._user_devices_going_offline.pop((user_id, device_id), None) self._user_devices_going_offline.pop((user_id, device_id), None)
self.send_user_sync(user_id, device_id, False, last_sync_ms) self.send_user_sync(user_id, device_id, False, last_sync_ms)
@@ -862,20 +861,20 @@ class PresenceHandler(BasePresenceHandler):
# The initial delay is to allow disconnected clients a chance to # The initial delay is to allow disconnected clients a chance to
# reconnect before we treat them as offline. # reconnect before we treat them as offline.
self.clock.call_later( self.clock.call_later(
Duration(seconds=30), 30,
self.clock.looping_call, self.clock.looping_call,
self._handle_timeouts, self._handle_timeouts,
Duration(seconds=5), 5000,
) )
# Presence information is persisted, whether or not it is being tracked # Presence information is persisted, whether or not it is being tracked
# internally. # internally.
if self._presence_enabled: if self._presence_enabled:
self.clock.call_later( self.clock.call_later(
Duration(minutes=1), 60,
self.clock.looping_call, self.clock.looping_call,
self._persist_unpersisted_changes, self._persist_unpersisted_changes,
Duration(minutes=1), 60 * 1000,
) )
presence_wheel_timer_size_gauge.register_hook( presence_wheel_timer_size_gauge.register_hook(
@@ -2431,7 +2430,7 @@ class PresenceFederationQueue:
_KEEP_ITEMS_IN_QUEUE_FOR_MS = 5 * 60 * 1000 _KEEP_ITEMS_IN_QUEUE_FOR_MS = 5 * 60 * 1000
# How often to check if we can expire entries from the queue. # How often to check if we can expire entries from the queue.
_CLEAR_ITEMS_EVERY_MS = Duration(minutes=1) _CLEAR_ITEMS_EVERY_MS = 60 * 1000
def __init__(self, hs: "HomeServer", presence_handler: BasePresenceHandler): def __init__(self, hs: "HomeServer", presence_handler: BasePresenceHandler):
self._clock = hs.get_clock() self._clock = hs.get_clock()

View File

@@ -34,7 +34,6 @@ from synapse.api.errors import (
from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia
from synapse.types import JsonDict, JsonValue, Requester, UserID, create_requester from synapse.types import JsonDict, JsonValue, Requester, UserID, create_requester
from synapse.util.caches.descriptors import cached from synapse.util.caches.descriptors import cached
from synapse.util.duration import Duration
from synapse.util.stringutils import parse_and_validate_mxc_uri from synapse.util.stringutils import parse_and_validate_mxc_uri
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -584,7 +583,7 @@ class ProfileHandler:
# Do not actually update the room state for shadow-banned users. # Do not actually update the room state for shadow-banned users.
if requester.shadow_banned: if requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester. # We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(Duration(seconds=random.randint(1, 10))) await self.clock.sleep(random.randint(1, 10))
return return
room_ids = await self.store.get_rooms_for_user(target_user.to_string()) room_ids = await self.store.get_rooms_for_user(target_user.to_string())

View File

@@ -92,7 +92,6 @@ from synapse.types.state import StateFilter
from synapse.util import stringutils from synapse.util import stringutils
from synapse.util.async_helpers import concurrently_execute from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.response_cache import ResponseCache from synapse.util.caches.response_cache import ResponseCache
from synapse.util.duration import Duration
from synapse.util.iterutils import batch_iter from synapse.util.iterutils import batch_iter
from synapse.util.stringutils import parse_and_validate_server_name from synapse.util.stringutils import parse_and_validate_server_name
from synapse.visibility import filter_events_for_client from synapse.visibility import filter_events_for_client
@@ -1180,7 +1179,7 @@ class RoomCreationHandler:
if (invite_list or invite_3pid_list) and requester.shadow_banned: if (invite_list or invite_3pid_list) and requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester. # We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(Duration(seconds=random.randint(1, 10))) await self.clock.sleep(random.randint(1, 10))
# Allow the request to go through, but remove any associated invites. # Allow the request to go through, but remove any associated invites.
invite_3pid_list = [] invite_3pid_list = []

View File

@@ -66,7 +66,6 @@ from synapse.types import (
from synapse.types.state import StateFilter from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_left_room from synapse.util.distributor import user_left_room
from synapse.util.duration import Duration
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.server import HomeServer from synapse.server import HomeServer
@@ -643,7 +642,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if action == Membership.INVITE and requester.shadow_banned: if action == Membership.INVITE and requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester. # We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(Duration(seconds=random.randint(1, 10))) await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError() raise ShadowBanError()
key = (room_id,) key = (room_id,)
@@ -874,7 +873,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if target_id == self._server_notices_mxid: if target_id == self._server_notices_mxid:
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user") raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
block_invite_result: tuple[Codes, dict] | None = None block_invite_result = None
if ( if (
self._server_notices_mxid is not None self._server_notices_mxid is not None
@@ -1648,7 +1647,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if requester.shadow_banned: if requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester. # We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(Duration(seconds=random.randint(1, 10))) await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError() raise ShadowBanError()
# We need to rate limit *before* we send out any 3PID invites, so we # We need to rate limit *before* we send out any 3PID invites, so we
@@ -2191,7 +2190,7 @@ class RoomForgetterHandler(StateDeltasHandler):
# We kick this off to pick up outstanding work from before the last restart. # We kick this off to pick up outstanding work from before the last restart.
self._clock.call_later( self._clock.call_later(
Duration(seconds=0), 0,
self.notify_new_event, self.notify_new_event,
) )
@@ -2233,7 +2232,7 @@ class RoomForgetterHandler(StateDeltasHandler):
# #
# We wait for a short time so that we don't "tight" loop just # We wait for a short time so that we don't "tight" loop just
# keeping the table up to date. # keeping the table up to date.
await self._clock.sleep(Duration(milliseconds=500)) await self._clock.sleep(0.5)
self.pos = self._store.get_room_max_stream_ordering() self.pos = self._store.get_room_max_stream_ordering()
await self._store.update_room_forgetter_stream_pos(self.pos) await self._store.update_room_forgetter_stream_pos(self.pos)

View File

@@ -17,7 +17,6 @@ import logging
from itertools import chain from itertools import chain
from typing import TYPE_CHECKING, AbstractSet, Mapping from typing import TYPE_CHECKING, AbstractSet, Mapping
import attr
from prometheus_client import Histogram from prometheus_client import Histogram
from typing_extensions import assert_never from typing_extensions import assert_never
@@ -63,7 +62,6 @@ from synapse.types.handlers.sliding_sync import (
HaveSentRoomFlag, HaveSentRoomFlag,
MutablePerConnectionState, MutablePerConnectionState,
PerConnectionState, PerConnectionState,
RoomLazyMembershipChanges,
RoomSyncConfig, RoomSyncConfig,
SlidingSyncConfig, SlidingSyncConfig,
SlidingSyncResult, SlidingSyncResult,
@@ -108,7 +106,7 @@ class SlidingSyncHandler:
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
self.is_mine_id = hs.is_mine_id self.is_mine_id = hs.is_mine_id
self.connection_store = SlidingSyncConnectionStore(self.clock, self.store) self.connection_store = SlidingSyncConnectionStore(self.store)
self.extensions = SlidingSyncExtensionHandler(hs) self.extensions = SlidingSyncExtensionHandler(hs)
self.room_lists = SlidingSyncRoomLists(hs) self.room_lists = SlidingSyncRoomLists(hs)
@@ -763,6 +761,8 @@ class SlidingSyncHandler:
!= Membership.JOIN, != Membership.JOIN,
filter_send_to_client=True, filter_send_to_client=True,
) )
# TODO: Filter out `EventTypes.CallInvite` in public rooms,
# see https://github.com/element-hq/synapse/issues/17359
# TODO: Handle timeline gaps (`get_timeline_gaps()`) # TODO: Handle timeline gaps (`get_timeline_gaps()`)
@@ -983,15 +983,14 @@ class SlidingSyncHandler:
# #
# Calculate the `StateFilter` based on the `required_state` for the room # Calculate the `StateFilter` based on the `required_state` for the room
required_state_filter = StateFilter.none() required_state_filter = StateFilter.none()
# The requested `required_state_map` with the lazy membership expanded and
# Keep track of which users' state we may need to fetch. We split this # `$ME` replaced with the user's ID. This allows us to see what membership we've
# into explicit users and lazy loaded users. # sent down to the client in the next request.
explicit_user_state = set() #
lazy_load_user_ids = set() # Make a copy so we can modify it. Still need to be careful to make a copy of
# the state key sets if we want to add/remove from them. We could make a deep
# Whether lazy-loading of room members is enabled. # copy but this saves us some work.
lazy_load_room_members = False expanded_required_state_map = dict(room_sync_config.required_state_map)
if room_membership_for_user_at_to_token.membership not in ( if room_membership_for_user_at_to_token.membership not in (
Membership.INVITE, Membership.INVITE,
Membership.KNOCK, Membership.KNOCK,
@@ -1039,6 +1038,7 @@ class SlidingSyncHandler:
else: else:
required_state_types: list[tuple[str, str | None]] = [] required_state_types: list[tuple[str, str | None]] = []
num_wild_state_keys = 0 num_wild_state_keys = 0
lazy_load_room_members = False
num_others = 0 num_others = 0
for ( for (
state_type, state_type,
@@ -1070,60 +1070,43 @@ class SlidingSyncHandler:
timeline_event.state_key timeline_event.state_key
) )
# The client needs to know the membership of everyone in
# the timeline we're returning.
lazy_load_user_ids.update(timeline_membership)
# Update the required state filter so we pick up the new # Update the required state filter so we pick up the new
# membership # membership
if limited or initial:
# If the timeline is limited, we only need to
# return the membership changes for people in
# the timeline.
for user_id in timeline_membership: for user_id in timeline_membership:
required_state_types.append( required_state_types.append(
(EventTypes.Member, user_id) (EventTypes.Member, user_id)
) )
else:
# For non-limited timelines we always return all
# membership changes. This is so that clients
# who have fetched the full membership list
# already can continue to maintain it for
# non-limited syncs.
#
# This assumes that for non-limited syncs there
# won't be many membership changes that wouldn't
# have been included already (this can only
# happen if membership state was rolled back due
# to state resolution anyway).
#
# `None` is a wildcard in the `StateFilter`
required_state_types.append((EventTypes.Member, None))
# Record the extra members we're returning. # Add an explicit entry for each user in the timeline
lazy_load_user_ids.update( #
state_key # Make a new set or copy of the state key set so we can
for event_type, state_key in room_state_delta_id_map # modify it without affecting the original
if event_type == EventTypes.Member # `required_state_map`
expanded_required_state_map[EventTypes.Member] = (
expanded_required_state_map.get(
EventTypes.Member, set()
) )
else: | timeline_membership
)
elif state_key == StateValues.ME:
num_others += 1 num_others += 1
required_state_types.append((state_type, user.to_string()))
# Replace `$ME` with the user's ID so we can deduplicate # Replace `$ME` with the user's ID so we can deduplicate
# when someone requests the same state with `$ME` or with # when someone requests the same state with `$ME` or with
# their user ID. # their user ID.
normalized_state_key = state_key #
if state_key == StateValues.ME: # Make a new set or copy of the state key set so we can
normalized_state_key = user.to_string() # modify it without affecting the original
# `required_state_map`
if state_type == EventTypes.Member: expanded_required_state_map[EventTypes.Member] = (
# Also track explicitly requested member state for expanded_required_state_map.get(
# lazy membership tracking. EventTypes.Member, set()
explicit_user_state.add(normalized_state_key)
required_state_types.append(
(state_type, normalized_state_key)
) )
| {user.to_string()}
)
else:
num_others += 1
required_state_types.append((state_type, state_key))
set_tag( set_tag(
SynapseTags.FUNC_ARG_PREFIX SynapseTags.FUNC_ARG_PREFIX
@@ -1141,10 +1124,6 @@ class SlidingSyncHandler:
required_state_filter = StateFilter.from_types(required_state_types) required_state_filter = StateFilter.from_types(required_state_types)
# Remove any explicitly requested user state from the lazy-loaded set,
# as we track them separately.
lazy_load_user_ids -= explicit_user_state
# We need this base set of info for the response so let's just fetch it along # We need this base set of info for the response so let's just fetch it along
# with the `required_state` for the room # with the `required_state` for the room
hero_room_state = [ hero_room_state = [
@@ -1172,22 +1151,6 @@ class SlidingSyncHandler:
# We can return all of the state that was requested if this was the first # We can return all of the state that was requested if this was the first
# time we've sent the room down this connection. # time we've sent the room down this connection.
room_state: StateMap[EventBase] = {} room_state: StateMap[EventBase] = {}
# Includes the state for the heroes if we need them (may contain other
# state as well).
hero_membership_state: StateMap[EventBase] = {}
# By default, we mark all `lazy_load_user_ids` as being sent down
# for the first time in this sync. We later check if we sent any of them
# down previously and update `returned_user_id_to_last_seen_ts_map` if
# we have.
returned_user_id_to_last_seen_ts_map = {}
if lazy_load_room_members:
returned_user_id_to_last_seen_ts_map = dict.fromkeys(lazy_load_user_ids)
new_connection_state.room_lazy_membership[room_id] = RoomLazyMembershipChanges(
returned_user_id_to_last_seen_ts_map=returned_user_id_to_last_seen_ts_map
)
if initial: if initial:
room_state = await self.get_current_state_at( room_state = await self.get_current_state_at(
room_id=room_id, room_id=room_id,
@@ -1195,97 +1158,28 @@ class SlidingSyncHandler:
state_filter=state_filter, state_filter=state_filter,
to_token=to_token, to_token=to_token,
) )
# The `room_state` includes the hero membership state if needed.
# We'll later filter this down so we don't need to do so here.
hero_membership_state = room_state
else: else:
assert from_token is not None
assert from_bound is not None assert from_bound is not None
if prev_room_sync_config is not None: if prev_room_sync_config is not None:
# Define `all_required_user_state` as all user state we want, which
# is the explicitly requested members, any needed for lazy
# loading, and users whose membership has changed.
all_required_user_state = explicit_user_state | lazy_load_user_ids
for state_type, state_key in room_state_delta_id_map:
if state_type == EventTypes.Member:
all_required_user_state.add(state_key)
# We need to know what user state we previously sent down the
# connection so we can determine what has changed.
#
# We need to fetch all users whose memberships we may want
# to send down this sync. This includes (and matches
# `all_required_user_state`):
# 1. Explicitly requested user state
# 2. Lazy loaded members, i.e. users who appear in the
# timeline.
# 3. The users whose membership has changed in the room, i.e.
# in the state deltas.
#
# This is to correctly handle the cases where a user was
# previously sent down as a lazy loaded member:
# - and is now explicitly requested (so shouldn't be sent down
# again); or
# - their membership has changed (so we need to invalidate
# their entry in the lazy loaded table if we don't send the
# change down).
if all_required_user_state:
previously_returned_user_to_last_seen = (
await self.store.get_sliding_sync_connection_lazy_members(
connection_position=from_token.connection_position,
room_id=room_id,
user_ids=all_required_user_state,
)
)
# Update the room lazy membership changes to track which
# lazy loaded members were needed for this sync. This is so
# that we can correctly track the last time we sent down
# users' membership (and so can evict old membership state
# from the DB tables).
returned_user_id_to_last_seen_ts_map.update(
(user_id, timestamp)
for user_id, timestamp in previously_returned_user_to_last_seen.items()
if user_id in lazy_load_user_ids
)
else:
previously_returned_user_to_last_seen = {}
# Check if there are any changes to the required state config # Check if there are any changes to the required state config
# that we need to handle. # that we need to handle.
changes_return = _required_state_changes( changed_required_state_map, added_state_filter = (
_required_state_changes(
user.to_string(), user.to_string(),
prev_required_state_map=prev_room_sync_config.required_state_map, prev_required_state_map=prev_room_sync_config.required_state_map,
request_required_state_map=room_sync_config.required_state_map, request_required_state_map=expanded_required_state_map,
previously_returned_lazy_user_ids=previously_returned_user_to_last_seen.keys(),
request_lazy_load_user_ids=lazy_load_user_ids,
state_deltas=room_state_delta_id_map, state_deltas=room_state_delta_id_map,
) )
changed_required_state_map = changes_return.changed_required_state_map )
new_connection_state.room_lazy_membership[ if added_state_filter:
room_id
].invalidated_user_ids = changes_return.lazy_members_invalidated
# Add any previously returned explicit memberships to the lazy
# loaded table. This happens when a client requested explicit
# members and then converted them to lazy loading.
for user_id in changes_return.extra_users_to_add_to_lazy_cache:
# We don't know the right timestamp to use here, as we don't
# know the last time we would have sent the membership down.
# So we don't overwrite it if we have a timestamp already,
# and fallback to `None` (which means now) if we don't.
returned_user_id_to_last_seen_ts_map.setdefault(user_id, None)
if changes_return.added_state_filter:
# Some state entries got added, so we pull out the current # Some state entries got added, so we pull out the current
# state for them. If we don't do this we'd only send down new deltas. # state for them. If we don't do this we'd only send down new deltas.
state_ids = await self.get_current_state_ids_at( state_ids = await self.get_current_state_ids_at(
room_id=room_id, room_id=room_id,
room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, room_membership_for_user_at_to_token=room_membership_for_user_at_to_token,
state_filter=changes_return.added_state_filter, state_filter=added_state_filter,
to_token=to_token, to_token=to_token,
) )
room_state_delta_id_map.update(state_ids) room_state_delta_id_map.update(state_ids)
@@ -1297,7 +1191,6 @@ class SlidingSyncHandler:
# If the membership changed and we have to get heroes, get the remaining # If the membership changed and we have to get heroes, get the remaining
# heroes from the state # heroes from the state
hero_membership_state = {}
if hero_user_ids: if hero_user_ids:
hero_membership_state = await self.get_current_state_at( hero_membership_state = await self.get_current_state_at(
room_id=room_id, room_id=room_id,
@@ -1305,6 +1198,7 @@ class SlidingSyncHandler:
state_filter=StateFilter.from_types(hero_room_state), state_filter=StateFilter.from_types(hero_room_state),
to_token=to_token, to_token=to_token,
) )
room_state.update(hero_membership_state)
required_room_state: StateMap[EventBase] = {} required_room_state: StateMap[EventBase] = {}
if required_state_filter != StateFilter.none(): if required_state_filter != StateFilter.none():
@@ -1327,7 +1221,7 @@ class SlidingSyncHandler:
# Assemble heroes: extract the info from the state we just fetched # Assemble heroes: extract the info from the state we just fetched
heroes: list[SlidingSyncResult.RoomResult.StrippedHero] = [] heroes: list[SlidingSyncResult.RoomResult.StrippedHero] = []
for hero_user_id in hero_user_ids: for hero_user_id in hero_user_ids:
member_event = hero_membership_state.get((EventTypes.Member, hero_user_id)) member_event = room_state.get((EventTypes.Member, hero_user_id))
if member_event is not None: if member_event is not None:
heroes.append( heroes.append(
SlidingSyncResult.RoomResult.StrippedHero( SlidingSyncResult.RoomResult.StrippedHero(
@@ -1389,7 +1283,7 @@ class SlidingSyncHandler:
bump_stamp = 0 bump_stamp = 0
room_sync_required_state_map_to_persist: Mapping[str, AbstractSet[str]] = ( room_sync_required_state_map_to_persist: Mapping[str, AbstractSet[str]] = (
room_sync_config.required_state_map expanded_required_state_map
) )
if changed_required_state_map: if changed_required_state_map:
room_sync_required_state_map_to_persist = changed_required_state_map room_sync_required_state_map_to_persist = changed_required_state_map
@@ -1579,37 +1473,13 @@ class SlidingSyncHandler:
return None return None
@attr.s(auto_attribs=True)
class _RequiredStateChangesReturn:
"""Return type for _required_state_changes."""
changed_required_state_map: Mapping[str, AbstractSet[str]] | None
"""The updated required state map to store in the room config, or None if
there is no change."""
added_state_filter: StateFilter
"""The state filter to use to fetch any additional current state that needs
to be returned to the client."""
extra_users_to_add_to_lazy_cache: AbstractSet[str] = frozenset()
"""The set of user IDs we should add to the lazy members cache that we had
previously returned. Handles the case where a user was previously sent down
explicitly but is now being lazy loaded."""
lazy_members_invalidated: AbstractSet[str] = frozenset()
"""The set of user IDs whose membership has changed but we didn't send down,
so we need to invalidate them from the cache."""
def _required_state_changes( def _required_state_changes(
user_id: str, user_id: str,
*, *,
prev_required_state_map: Mapping[str, AbstractSet[str]], prev_required_state_map: Mapping[str, AbstractSet[str]],
request_required_state_map: Mapping[str, AbstractSet[str]], request_required_state_map: Mapping[str, AbstractSet[str]],
previously_returned_lazy_user_ids: AbstractSet[str],
request_lazy_load_user_ids: AbstractSet[str],
state_deltas: StateMap[str], state_deltas: StateMap[str],
) -> _RequiredStateChangesReturn: ) -> tuple[Mapping[str, AbstractSet[str]] | None, StateFilter]:
"""Calculates the changes between the required state room config from the """Calculates the changes between the required state room config from the
previous requests compared with the current request. previous requests compared with the current request.
@@ -1623,62 +1493,14 @@ def _required_state_changes(
added, removed and then added again to the required state. In that case we added, removed and then added again to the required state. In that case we
only want to re-send that entry down sync if it has changed. only want to re-send that entry down sync if it has changed.
Args: Returns:
user_id: The user ID of the user making the request. A 2-tuple of updated required state config (or None if there is no update)
prev_required_state_map: The required state map from the previous and the state filter to use to fetch extra current state that we need to
request. return.
request_required_state_map: The required state map from the current
request.
previously_returned_lazy_user_ids: The set of user IDs whose membership
we have previously returned to the client due to lazy loading. This
is filtered to only include users who have either sent events in the
`timeline`, `required_state` or whose membership changed.
request_lazy_load_user_ids: The set of user IDs whose lazy-loaded
membership is required for this request.
state_deltas: The state deltas in the room in the request token range,
considering user membership. See `get_current_state_deltas_for_room`
for more details.
""" """
# First we find any lazy members that have been invalidated due to state
# changes that we are not sending down.
lazy_members_invalidated = set()
for event_type, state_key in state_deltas:
if event_type != EventTypes.Member:
continue
if state_key in request_lazy_load_user_ids:
# Because it's part of the `request_lazy_load_user_ids`, we're going to
# send this member change down.
continue
if state_key not in previously_returned_lazy_user_ids:
# We've not previously returned this member so nothing to
# invalidate.
continue
lazy_members_invalidated.add(state_key)
if prev_required_state_map == request_required_state_map: if prev_required_state_map == request_required_state_map:
# There has been no change in state, just need to check lazy members. # There has been no change. Return immediately.
newly_returned_lazy_members = ( return None, StateFilter.none()
request_lazy_load_user_ids - previously_returned_lazy_user_ids
)
if newly_returned_lazy_members:
# There are some new lazy members we need to fetch.
added_types: list[tuple[str, str | None]] = []
for new_user_id in newly_returned_lazy_members:
added_types.append((EventTypes.Member, new_user_id))
added_state_filter = StateFilter.from_types(added_types)
else:
added_state_filter = StateFilter.none()
return _RequiredStateChangesReturn(
changed_required_state_map=None,
added_state_filter=added_state_filter,
lazy_members_invalidated=lazy_members_invalidated,
)
prev_wildcard = prev_required_state_map.get(StateValues.WILDCARD, set()) prev_wildcard = prev_required_state_map.get(StateValues.WILDCARD, set())
request_wildcard = request_required_state_map.get(StateValues.WILDCARD, set()) request_wildcard = request_required_state_map.get(StateValues.WILDCARD, set())
@@ -1688,29 +1510,17 @@ def _required_state_changes(
# already fetching everything, we don't have to fetch anything now that they've # already fetching everything, we don't have to fetch anything now that they've
# narrowed. # narrowed.
if StateValues.WILDCARD in prev_wildcard: if StateValues.WILDCARD in prev_wildcard:
return _RequiredStateChangesReturn( return request_required_state_map, StateFilter.none()
changed_required_state_map=request_required_state_map,
added_state_filter=StateFilter.none(),
lazy_members_invalidated=lazy_members_invalidated,
)
# If a event type wildcard has been added or removed we don't try and do # If a event type wildcard has been added or removed we don't try and do
# anything fancy, and instead always update the effective room required # anything fancy, and instead always update the effective room required
# state config to match the request. # state config to match the request.
if request_wildcard - prev_wildcard: if request_wildcard - prev_wildcard:
# Some keys were added, so we need to fetch everything # Some keys were added, so we need to fetch everything
return _RequiredStateChangesReturn( return request_required_state_map, StateFilter.all()
changed_required_state_map=request_required_state_map,
added_state_filter=StateFilter.all(),
lazy_members_invalidated=lazy_members_invalidated,
)
if prev_wildcard - request_wildcard: if prev_wildcard - request_wildcard:
# Keys were only removed, so we don't have to fetch everything. # Keys were only removed, so we don't have to fetch everything.
return _RequiredStateChangesReturn( return request_required_state_map, StateFilter.none()
changed_required_state_map=request_required_state_map,
added_state_filter=StateFilter.none(),
lazy_members_invalidated=lazy_members_invalidated,
)
# Contains updates to the required state map compared with the previous room # Contains updates to the required state map compared with the previous room
# config. This has the same format as `RoomSyncConfig.required_state` # config. This has the same format as `RoomSyncConfig.required_state`
@@ -1742,17 +1552,6 @@ def _required_state_changes(
# Nothing *added*, so we skip. Removals happen below. # Nothing *added*, so we skip. Removals happen below.
continue continue
# Handle the special case of adding `$LAZY` membership, where we want to
# always record the change to be lazy loading, as we immediately start
# using the lazy loading tables so there is no point *not* recording the
# change to lazy load in the effective room config.
if event_type == EventTypes.Member:
old_state_key_lazy = StateValues.LAZY in old_state_keys
request_state_key_lazy = StateValues.LAZY in request_state_keys
if not old_state_key_lazy and request_state_key_lazy:
changes[event_type] = request_state_keys
continue
# We only remove state keys from the effective state if they've been # We only remove state keys from the effective state if they've been
# removed from the request *and* the state has changed. This ensures # removed from the request *and* the state has changed. This ensures
# that if a client removes and then re-adds a state key, we only send # that if a client removes and then re-adds a state key, we only send
@@ -1823,31 +1622,9 @@ def _required_state_changes(
# LAZY values should also be ignore for event types that are # LAZY values should also be ignore for event types that are
# not membership. # not membership.
pass pass
elif event_type == EventTypes.Member:
if state_key not in previously_returned_lazy_user_ids:
# Only add *explicit* members we haven't previously sent
# down.
added.append((event_type, state_key))
else: else:
added.append((event_type, state_key)) added.append((event_type, state_key))
previously_required_state_members = set(
prev_required_state_map.get(EventTypes.Member, ())
)
if StateValues.ME in previously_required_state_members:
previously_required_state_members.add(user_id)
# We also need to pull out any lazy members that are now required but
# haven't previously been returned.
for required_user_id in (
request_lazy_load_user_ids
# Remove previously returned users
- previously_returned_lazy_user_ids
# Exclude previously explicitly requested members.
- previously_required_state_members
):
added.append((EventTypes.Member, required_user_id))
added_state_filter = StateFilter.from_types(added) added_state_filter = StateFilter.from_types(added)
# Figure out what changes we need to apply to the effective required state # Figure out what changes we need to apply to the effective required state
@@ -1888,25 +1665,13 @@ def _required_state_changes(
changes[event_type] = request_state_keys changes[event_type] = request_state_keys
continue continue
# When handling $LAZY membership, we want to either a) not update the
# state or b) update it to match the request. This is to avoid churn of
# the effective required state for rooms (we deduplicate required state
# between rooms), and because we can store the previously returned
# explicit memberships with the lazy loaded memberships.
if event_type == EventTypes.Member: if event_type == EventTypes.Member:
old_state_key_lazy = StateValues.LAZY in old_state_keys old_state_key_lazy = StateValues.LAZY in old_state_keys
request_state_key_lazy = StateValues.LAZY in request_state_keys request_state_key_lazy = StateValues.LAZY in request_state_keys
has_lazy = old_state_key_lazy or request_state_key_lazy
# If a "$LAZY" has been added or removed we always update to match
# the request.
if old_state_key_lazy != request_state_key_lazy: if old_state_key_lazy != request_state_key_lazy:
changes[event_type] = request_state_keys # If a "$LAZY" has been added or removed we always update the effective room
continue # required state config to match the request.
# Or if we have lazy membership and there are invalidated
# explicit memberships.
if has_lazy and invalidated_state_keys:
changes[event_type] = request_state_keys changes[event_type] = request_state_keys
continue continue
@@ -1921,28 +1686,6 @@ def _required_state_changes(
if invalidated_state_keys: if invalidated_state_keys:
changes[event_type] = old_state_keys - invalidated_state_keys changes[event_type] = old_state_keys - invalidated_state_keys
# Check for any explicit membership changes that were removed that we can
# add to the lazy members previously returned. This is so that we don't
# return a user due to lazy loading if they were previously returned as an
# explicit membership.
users_to_add_to_lazy_cache: set[str] = set()
membership_changes = changes.get(EventTypes.Member, set())
if membership_changes and StateValues.LAZY in request_state_keys:
for state_key in prev_required_state_map.get(EventTypes.Member, set()):
if state_key == StateValues.WILDCARD or state_key == StateValues.LAZY:
# Ignore non-user IDs.
continue
if state_key == StateValues.ME:
# Normalize to proper user ID
state_key = user_id
# We remember the user if they haven't been invalidated
if (EventTypes.Member, state_key) not in state_deltas:
users_to_add_to_lazy_cache.add(state_key)
new_required_state_map = None
if changes: if changes:
# Update the required state config based on the changes. # Update the required state config based on the changes.
new_required_state_map = dict(prev_required_state_map) new_required_state_map = dict(prev_required_state_map)
@@ -1953,9 +1696,6 @@ def _required_state_changes(
# Remove entries with empty state keys. # Remove entries with empty state keys.
new_required_state_map.pop(event_type, None) new_required_state_map.pop(event_type, None)
return _RequiredStateChangesReturn( return new_required_state_map, added_state_filter
changed_required_state_map=new_required_state_map, else:
added_state_filter=added_state_filter, return None, added_state_filter
lazy_members_invalidated=lazy_members_invalidated,
extra_users_to_add_to_lazy_cache=users_to_add_to_lazy_cache,
)

View File

@@ -34,12 +34,10 @@ from synapse.api.constants import (
EventTypes, EventTypes,
Membership, Membership,
) )
from synapse.api.errors import SlidingSyncUnknownPosition
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import StrippedStateEvent from synapse.events import StrippedStateEvent
from synapse.events.utils import parse_stripped_state_event from synapse.events.utils import parse_stripped_state_event
from synapse.logging.opentracing import start_active_span, trace from synapse.logging.opentracing import start_active_span, trace
from synapse.storage.databases.main.sliding_sync import UPDATE_INTERVAL_LAST_USED_TS
from synapse.storage.databases.main.state import ( from synapse.storage.databases.main.state import (
ROOM_UNKNOWN_SENTINEL, ROOM_UNKNOWN_SENTINEL,
Sentinel as StateSentinel, Sentinel as StateSentinel,
@@ -70,7 +68,6 @@ from synapse.types.handlers.sliding_sync import (
) )
from synapse.types.state import StateFilter from synapse.types.state import StateFilter
from synapse.util import MutableOverlayMapping from synapse.util import MutableOverlayMapping
from synapse.util.duration import Duration
from synapse.util.sentinel import Sentinel from synapse.util.sentinel import Sentinel
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -80,27 +77,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Minimum time in milliseconds since the last sync before we consider expiring
# the connection due to too many rooms to send. This stops from getting into
# tight loops with clients that request lots of data at once.
#
# c.f. `NUM_ROOMS_THRESHOLD`. These values are somewhat arbitrary picked.
MINIMUM_NOT_USED_AGE_EXPIRY = Duration(hours=1)
# How many rooms with updates we allow before we consider the connection expired
# due to too many rooms to send.
#
# c.f. `MINIMUM_NOT_USED_AGE_EXPIRY_MS`. These values are somewhat arbitrary
# picked.
NUM_ROOMS_THRESHOLD = 100
# Sanity check that our minimum age is sensible compared to the update interval,
# i.e. if `MINIMUM_NOT_USED_AGE_EXPIRY_MS` is too small then we might expire the
# connection even if it is actively being used (and we're just not updating the
# DB frequently enough). We arbitrarily double the update interval to give some
# wiggle room.
assert 2 * UPDATE_INTERVAL_LAST_USED_TS < MINIMUM_NOT_USED_AGE_EXPIRY
# Helper definition for the types that we might return. We do this to avoid # Helper definition for the types that we might return. We do this to avoid
# copying data between types (which can be expensive for many rooms). # copying data between types (which can be expensive for many rooms).
RoomsForUserType = RoomsForUserStateReset | RoomsForUser | RoomsForUserSlidingSync RoomsForUserType = RoomsForUserStateReset | RoomsForUser | RoomsForUserSlidingSync
@@ -200,7 +176,6 @@ class SlidingSyncRoomLists:
self.storage_controllers = hs.get_storage_controllers() self.storage_controllers = hs.get_storage_controllers()
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
self.is_mine_id = hs.is_mine_id self.is_mine_id = hs.is_mine_id
self._clock = hs.get_clock()
async def compute_interested_rooms( async def compute_interested_rooms(
self, self,
@@ -882,41 +857,11 @@ class SlidingSyncRoomLists:
# We only need to check for new events since any state changes # We only need to check for new events since any state changes
# will also come down as new events. # will also come down as new events.
rooms_that_have_updates = (
rooms_that_have_updates = await ( self.store.get_rooms_that_might_have_updates(
self.store.get_rooms_that_have_updates_since_sliding_sync_table(
relevant_room_map.keys(), from_token.room_key relevant_room_map.keys(), from_token.room_key
) )
) )
# Check if we have lots of updates to send, if so then its
# better for us to tell the client to do a full resync
# instead (to try and avoid long SSS response times when
# there is new data).
#
# Due to the construction of the SSS API, the client is in
# charge of setting the range of rooms to request updates
# for. Generally, it will start with a small range and then
# expand (and occasionally it may contract the range again
# if its been offline for a while). If we know there are a
# lot of updates, it's better to reset the connection and
# wait for the client to start again (with a much smaller
# range) than to try and send down a large number of updates
# (which can take a long time).
#
# We only do this if the last sync was over
# `MINIMUM_NOT_USED_AGE_EXPIRY_MS` to ensure we don't get
# into tight loops with clients that keep requesting large
# sliding sync windows.
if len(rooms_that_have_updates) > NUM_ROOMS_THRESHOLD:
last_sync_ts = previous_connection_state.last_used_ts
if (
last_sync_ts is not None
and (self._clock.time_msec() - last_sync_ts)
> MINIMUM_NOT_USED_AGE_EXPIRY.as_millis()
):
raise SlidingSyncUnknownPosition()
rooms_should_send.update(rooms_that_have_updates) rooms_should_send.update(rooms_that_have_updates)
relevant_rooms_to_send_map = { relevant_rooms_to_send_map = {
room_id: room_sync_config room_id: room_sync_config

View File

@@ -13,6 +13,7 @@
# #
import logging import logging
from typing import TYPE_CHECKING
import attr import attr
@@ -24,7 +25,9 @@ from synapse.types.handlers.sliding_sync import (
PerConnectionState, PerConnectionState,
SlidingSyncConfig, SlidingSyncConfig,
) )
from synapse.util.clock import Clock
if TYPE_CHECKING:
pass
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -58,8 +61,7 @@ class SlidingSyncConnectionStore:
to mapping of room ID to `HaveSentRoom`. to mapping of room ID to `HaveSentRoom`.
""" """
clock: Clock store: "DataStore"
store: DataStore
async def get_and_clear_connection_positions( async def get_and_clear_connection_positions(
self, self,
@@ -73,7 +75,7 @@ class SlidingSyncConnectionStore:
""" """
# If this is our first request, there is no previous connection state to fetch out of the database # If this is our first request, there is no previous connection state to fetch out of the database
if from_token is None or from_token.connection_position == 0: if from_token is None or from_token.connection_position == 0:
return PerConnectionState(last_used_ts=None) return PerConnectionState()
conn_id = sync_config.conn_id or "" conn_id = sync_config.conn_id or ""
@@ -99,7 +101,7 @@ class SlidingSyncConnectionStore:
If there are no changes to the state this may return the same token as If there are no changes to the state this may return the same token as
the existing per-connection state. the existing per-connection state.
""" """
if not new_connection_state.has_updates(self.clock): if not new_connection_state.has_updates():
if from_token is not None: if from_token is not None:
return from_token.connection_position return from_token.connection_position
else: else:

Some files were not shown because too many files have changed in this diff Show More