Compare commits

..

4 Commits

Author SHA1 Message Date
Eric Eastwood
7e1a2f5a96 Better label lowercase to avoid confusion 2024-11-01 11:40:04 -05:00
Eric Eastwood
150ce230cb Harden test 2024-11-01 11:35:55 -05:00
Eric Eastwood
842304df57 Add changelog 2024-11-01 11:30:34 -05:00
Eric Eastwood
d29cb8abed Remove usage on internal header encoding API 2024-11-01 11:24:22 -05:00
70 changed files with 413 additions and 1927 deletions

View File

@@ -36,11 +36,11 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
#
# For PRs, we only run each type of test with the oldest Python version supported (which
# is Python 3.9 right now)
# is Python 3.8 right now)
trial_sqlite_tests = [
{
"python-version": "3.9",
"python-version": "3.8",
"database": "sqlite",
"extras": "all",
}
@@ -53,12 +53,12 @@ if not IS_PR:
"database": "sqlite",
"extras": "all",
}
for version in ("3.10", "3.11", "3.12", "3.13")
for version in ("3.9", "3.10", "3.11", "3.12", "3.13")
)
trial_postgres_tests = [
{
"python-version": "3.9",
"python-version": "3.8",
"database": "postgres",
"postgres-version": "11",
"extras": "all",
@@ -77,7 +77,7 @@ if not IS_PR:
trial_no_extra_tests = [
{
"python-version": "3.9",
"python-version": "3.8",
"database": "sqlite",
"extras": "",
}
@@ -99,24 +99,24 @@ set_output("trial_test_matrix", test_matrix)
# First calculate the various sytest jobs.
#
# For each type of test we only run on bullseye on PRs
# For each type of test we only run on focal on PRs
sytest_tests = [
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
},
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"postgres": "postgres",
},
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"postgres": "multi-postgres",
"workers": "workers",
},
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"postgres": "multi-postgres",
"workers": "workers",
"reactor": "asyncio",
@@ -127,11 +127,11 @@ if not IS_PR:
sytest_tests.extend(
[
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"reactor": "asyncio",
},
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"postgres": "postgres",
"reactor": "asyncio",
},

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env bash
# this script is run by GitHub Actions in a plain `jammy` container; it
# this script is run by GitHub Actions in a plain `focal` container; it
# - installs the minimal system requirements, and poetry;
# - patches the project definition file to refer to old versions only;
# - creates a venv with these old versions using poetry; and finally

View File

@@ -132,9 +132,9 @@ jobs:
fail-fast: false
matrix:
include:
- sytest-tag: bullseye
- sytest-tag: focal
- sytest-tag: bullseye
- sytest-tag: focal
postgres: postgres
workers: workers
redis: redis

View File

@@ -91,19 +91,10 @@ jobs:
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Artifact name
id: artifact-name
# We can't have colons in the upload name of the artifact, so we convert
# e.g. `debian:sid` to `sid`.
env:
DISTRO: ${{ matrix.distro }}
run: |
echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT"
- name: Upload debs as artifacts
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
with:
name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
name: debs
path: debs/*
build-wheels:
@@ -111,7 +102,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-22.04, macos-12]
os: [ubuntu-20.04, macos-12]
arch: [x86_64, aarch64]
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
# It is not read by the rest of the workflow.
@@ -153,7 +144,7 @@ jobs:
- name: Only build a single wheel on PR
if: startsWith(github.ref, 'refs/pull/')
run: echo "CIBW_BUILD="cp39-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
@@ -165,9 +156,9 @@ jobs:
CARGO_NET_GIT_FETCH_WITH_CLI: true
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
with:
name: Wheel-${{ matrix.os }}-${{ matrix.arch }}
name: Wheel
path: ./wheelhouse/*.whl
build-sdist:
@@ -186,7 +177,7 @@ jobs:
- name: Build sdist
run: python -m build --sdist
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
with:
name: Sdist
path: dist/*.tar.gz
@@ -203,14 +194,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v3 # Don't upgrade to v4, it should match upload-artifact
- name: Build a tarball for the debs
# We need to merge all the debs uploads into one folder, then compress
# that.
run: |
mkdir debs
mv debs*/* debs/
tar -cvJf debs.tar.xz debs
run: tar -cvJf debs.tar.xz debs
- name: Attach to release
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
env:
@@ -218,7 +204,7 @@ jobs:
with:
files: |
Sdist/*
Wheel*/*
Wheel/*
debs.tar.xz
# if it's not already published, keep the release as a draft.
draft: true

View File

@@ -397,7 +397,7 @@ jobs:
needs:
- linting-done
- changes
runs-on: ubuntu-22.04
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
@@ -409,12 +409,12 @@ jobs:
# their build dependencies
- run: |
sudo apt-get -qq update
sudo apt-get -qq install build-essential libffi-dev python3-dev \
sudo apt-get -qq install build-essential libffi-dev python-dev \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
- uses: actions/setup-python@v5
with:
python-version: '3.9'
python-version: '3.8'
- name: Prepare old deps
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
@@ -458,7 +458,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.9"]
python-version: ["pypy-3.8"]
extras: ["all"]
steps:
@@ -580,11 +580,11 @@ jobs:
strategy:
matrix:
include:
- python-version: "3.9"
- python-version: "3.8"
postgres-version: "11"
- python-version: "3.13"
postgres-version: "17"
- python-version: "3.11"
postgres-version: "15"
services:
postgres:

View File

@@ -99,11 +99,11 @@ jobs:
if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
container:
# We're using debian:bullseye because it uses Python 3.9 which is our minimum supported Python version.
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
image: matrixdotorg/sytest-synapse:bullseye
image: matrixdotorg/sytest-synapse:focal
volumes:
- ${{ github.workspace }}:/src

View File

@@ -1,68 +1,3 @@
# Synapse 1.119.0 (2024-11-13)
No significant changes since 1.119.0rc2.
### Python 3.8 support dropped
Python 3.8 is [end-of-life](https://devguide.python.org/versions/) and is no longer supported by Synapse. The minimum supported Python version is now 3.9.
If you are running Synapse with Python 3.8, please upgrade to Python 3.9 (or greater) before upgrading Synapse.
# Synapse 1.119.0rc2 (2024-11-11)
Note that due to packaging issues there was no v1.119.0rc1.
### Features
- Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API. ([\#17374](https://github.com/element-hq/synapse/issues/17374))
- Add experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2). ([\#17888](https://github.com/element-hq/synapse/issues/17888))
### Bugfixes
- Fix bug with sliding sync where `$LAZY`-loading room members would not return `required_state` membership in incremental syncs. ([\#17809](https://github.com/element-hq/synapse/issues/17809))
- Check if user has membership in a room before tagging it. Contributed by Lama Alosaimi. ([\#17839](https://github.com/element-hq/synapse/issues/17839))
- Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in
the config option `run_background_tasks_on`. ([\#17847](https://github.com/element-hq/synapse/issues/17847))
- Fix bug where some presence and typing timeouts can expire early. ([\#17850](https://github.com/element-hq/synapse/issues/17850))
- Fix detection when the built Rust library was outdated when using source installations. ([\#17861](https://github.com/element-hq/synapse/issues/17861))
- Fix a long-standing bug in Synapse which could cause one-time keys to be issued in the incorrect order, causing message decryption failures. ([\#17903](https://github.com/element-hq/synapse/pull/17903))
- Fix experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2) where we would return the full state on incremental syncs when using lazy loaded members and there were no new events in the timeline. ([\#17915](https://github.com/element-hq/synapse/pull/17915))
### Internal Changes
- Remove support for python 3.8. ([\#17908](https://github.com/element-hq/synapse/issues/17908))
- Add a test for downloading and thumbnailing a CMYK JPEG. ([\#17786](https://github.com/element-hq/synapse/issues/17786))
- Refactor database calls to remove `Generator` usage. ([\#17813](https://github.com/element-hq/synapse/issues/17813), [\#17814](https://github.com/element-hq/synapse/issues/17814), [\#17815](https://github.com/element-hq/synapse/issues/17815), [\#17816](https://github.com/element-hq/synapse/issues/17816), [\#17817](https://github.com/element-hq/synapse/issues/17817), [\#17818](https://github.com/element-hq/synapse/issues/17818), [\#17890](https://github.com/element-hq/synapse/issues/17890))
- Include the destination in the error of 'Destination mismatch' on federation requests. ([\#17830](https://github.com/element-hq/synapse/issues/17830))
- The nix flake inside the repository no longer tracks nixpkgs/master to not catch the latest bugs from a PR merged 5 minutes ago. ([\#17852](https://github.com/element-hq/synapse/issues/17852))
- Minor speed-up of sliding sync by computing extensions results in parallel. ([\#17884](https://github.com/element-hq/synapse/issues/17884))
- Bump the default Python version in the Synapse Dockerfile from 3.11 -> 3.12. ([\#17887](https://github.com/element-hq/synapse/issues/17887))
- Remove usage of internal header encoding API. ([\#17894](https://github.com/element-hq/synapse/issues/17894))
- Use unique name for each os.arch variant when uploading Wheel artifacts. ([\#17905](https://github.com/element-hq/synapse/issues/17905))
- Fix tests to run with latest Twisted. ([\#17906](https://github.com/element-hq/synapse/pull/17906), [\#17907](https://github.com/element-hq/synapse/pull/17907), [\#17911](https://github.com/element-hq/synapse/pull/17911))
- Update version constraint to allow the latest poetry-core 1.9.1. ([\#17902](https://github.com/element-hq/synapse/pull/17902))
- Update the portdb CI to use Python 3.13 and Postgres 17 as latest dependencies. ([\#17909](https://github.com/element-hq/synapse/pull/17909))
- Add an index to `current_state_delta_stream` table. ([\#17912](https://github.com/element-hq/synapse/issues/17912))
- Fix building and attaching release artifacts during the release process. ([\#17921](https://github.com/element-hq/synapse/issues/17921))
### Updates to locked dependencies
* Bump actions/download-artifact & actions/upload-artifact from 3 to 4 in /.github/workflows. ([\#17657](https://github.com/element-hq/synapse/issues/17657))
* Bump anyhow from 1.0.89 to 1.0.92. ([\#17858](https://github.com/element-hq/synapse/issues/17858), [\#17876](https://github.com/element-hq/synapse/issues/17876), [\#17901](https://github.com/element-hq/synapse/issues/17901))
* Bump bytes from 1.7.2 to 1.8.0. ([\#17877](https://github.com/element-hq/synapse/issues/17877))
* Bump cryptography from 43.0.1 to 43.0.3. ([\#17853](https://github.com/element-hq/synapse/issues/17853))
* Bump mypy-zope from 1.0.7 to 1.0.8. ([\#17898](https://github.com/element-hq/synapse/issues/17898))
* Bump phonenumbers from 8.13.47 to 8.13.49. ([\#17880](https://github.com/element-hq/synapse/issues/17880), [\#17899](https://github.com/element-hq/synapse/issues/17899))
* Bump python-multipart from 0.0.12 to 0.0.16. ([\#17879](https://github.com/element-hq/synapse/issues/17879))
* Bump regex from 1.11.0 to 1.11.1. ([\#17874](https://github.com/element-hq/synapse/issues/17874))
* Bump ruff from 0.6.9 to 0.7.2. ([\#17868](https://github.com/element-hq/synapse/issues/17868), [\#17897](https://github.com/element-hq/synapse/issues/17897))
* Bump serde from 1.0.210 to 1.0.214. ([\#17875](https://github.com/element-hq/synapse/issues/17875), [\#17900](https://github.com/element-hq/synapse/issues/17900))
* Bump serde_json from 1.0.128 to 1.0.132. ([\#17857](https://github.com/element-hq/synapse/issues/17857))
* Bump types-psycopg2 from 2.9.21.20240819 to 2.9.21.20241019. ([\#17855](https://github.com/element-hq/synapse/issues/17855))
* Bump types-setuptools from 75.1.0.20241014 to 75.2.0.20241019. ([\#17856](https://github.com/element-hq/synapse/issues/17856))
# Synapse 1.118.0 (2024-10-29)
No significant changes since 1.118.0rc1.

12
Cargo.lock generated
View File

@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.92"
version = "1.0.91"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74f37166d7d48a0284b99dd824694c26119c700b53bf0d1540cdb147dbdaaf13"
checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8"
[[package]]
name = "arc-swap"
@@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
version = "1.0.214"
version = "1.0.213"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5"
checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.214"
version = "1.0.213"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766"
checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5"
dependencies = [
"proc-macro2",
"quote",

View File

@@ -0,0 +1 @@
Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API.

1
changelog.d/17786.misc Normal file
View File

@@ -0,0 +1 @@
Add a test for downloading and thumbnailing a CMYK JPEG.

1
changelog.d/17813.bugfix Normal file
View File

@@ -0,0 +1 @@
Avoid lost data on some database query retries.

1
changelog.d/17814.bugfix Normal file
View File

@@ -0,0 +1 @@
Avoid lost data on some database query retries.

1
changelog.d/17815.bugfix Normal file
View File

@@ -0,0 +1 @@
Avoid lost data on some database query retries.

1
changelog.d/17816.bugfix Normal file
View File

@@ -0,0 +1 @@
Avoid lost data on some database query retries.

1
changelog.d/17817.bugfix Normal file
View File

@@ -0,0 +1 @@
Avoid lost data on some database query retries.

1
changelog.d/17818.bugfix Normal file
View File

@@ -0,0 +1 @@
Avoid lost data on some database query retries.

1
changelog.d/17830.misc Normal file
View File

@@ -0,0 +1 @@
Include the destination in the error of 'Destination mismatch' on federation requests.

1
changelog.d/17839.bugfix Normal file
View File

@@ -0,0 +1 @@
Check if user has membership in a room before tagging it. Contributed by Lama Alosaimi.

2
changelog.d/17847.bugfix Normal file
View File

@@ -0,0 +1,2 @@
Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in
the config option `run_background_tasks_on`.

1
changelog.d/17861.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix detection when the built Rust library was outdated when using source installations.

1
changelog.d/17884.misc Normal file
View File

@@ -0,0 +1 @@
Minor speed-up of sliding sync by computing extensions results in parallel.

1
changelog.d/17894.misc Normal file
View File

@@ -0,0 +1 @@
Remove usage of internal header encoding API.

18
debian/changelog vendored
View File

@@ -1,21 +1,3 @@
matrix-synapse-py3 (1.119.0) stable; urgency=medium
* New Synapse release 1.119.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 13 Nov 2024 13:57:51 +0000
matrix-synapse-py3 (1.119.0~rc2) stable; urgency=medium
* New Synapse release 1.119.0rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 11 Nov 2024 14:33:02 +0000
matrix-synapse-py3 (1.119.0~rc1) stable; urgency=medium
* New Synapse release 1.119.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 06 Nov 2024 08:59:43 -0700
matrix-synapse-py3 (1.118.0) stable; urgency=medium
* New Synapse release 1.118.0.

View File

@@ -20,7 +20,7 @@
# `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in
# in `poetry export` in the past.
ARG PYTHON_VERSION=3.12
ARG PYTHON_VERSION=3.11
###
### Stage 0: generate requirements.txt

View File

@@ -5,7 +5,6 @@ basis. The currently supported features are:
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
for another client
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
- [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222): adding `state_after` to sync v2
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).

View File

@@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common
configuration:
```sh
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:bullseye
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
```
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)

View File

@@ -208,7 +208,7 @@ When following this route please make sure that the [Platform-specific prerequis
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
- Python 3.9 or later, up to Python 3.13.
- Python 3.8 or later, up to Python 3.11.
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
If building on an uncommon architecture for which pre-built wheels are

View File

@@ -117,17 +117,6 @@ each upgrade are complete before moving on to the next upgrade, to avoid
stacking them up. You can monitor the currently running background updates with
[the Admin API](usage/administration/admin_api/background_updates.html#status).
# Upgrading to v1.119.0
## Minimum supported Python version
The minimum supported Python version has been increased from v3.8 to v3.9.
You will need Python 3.9+ to run Synapse v1.119.0 (due out Nov 7th, 2024).
If you use current versions of the Matrix.org-distributed Docker images, no action is required.
Please note that support for Ubuntu `focal` was dropped as well since it uses Python 3.8.
# Upgrading to v1.111.0
## New worker endpoints for authenticated client and federation media

8
flake.lock generated
View File

@@ -186,16 +186,16 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1729265718,
"narHash": "sha256-4HQI+6LsO3kpWTYuVGIzhJs1cetFcwT7quWCk/6rqeo=",
"lastModified": 1690535733,
"narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ccc0c2126893dd20963580b6478d1a10a4512185",
"rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"ref": "master",
"repo": "nixpkgs",
"type": "github"
}

View File

@@ -3,13 +3,13 @@
# (https://github.com/matrix-org/complement) Matrix homeserver test suites are also
# installed automatically.
#
# You must have already installed Nix (https://nixos.org/download/) on your system to use this.
# Nix can be installed on any Linux distribiution or MacOS; NixOS is not required.
# Windows is not directly supported, but Nix can be installed inside of WSL2 or even Docker
# You must have already installed Nix (https://nixos.org) on your system to use this.
# Nix can be installed on Linux or MacOS; NixOS is not required. Windows is not
# directly supported, but Nix can be installed inside of WSL2 or even Docker
# containers. Please refer to https://nixos.org/download for details.
#
# You must also enable support for flakes in Nix. See the following for how to
# do so permanently: https://wiki.nixos.org/wiki/Flakes#Other_Distros,_without_Home-Manager
# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes
#
# Be warned: you'll need over 3.75 GB of free space to download all the dependencies.
#
@@ -20,7 +20,7 @@
# locally from "services", such as PostgreSQL and Redis.
#
# You should now be dropped into a new shell with all programs and dependencies
# available to you!
# availabile to you!
#
# You can start up pre-configured local Synapse, PostgreSQL and Redis instances by
# running: `devenv up`. To stop them, use Ctrl-C.
@@ -39,9 +39,9 @@
{
inputs = {
# Use the rolling/unstable branch of nixpkgs. Used to fetch the latest
# Use the master/unstable branch of nixpkgs. Used to fetch the latest
# available versions of packages.
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
nixpkgs.url = "github:NixOS/nixpkgs/master";
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
systems.url = "github:nix-systems/default";
# A development environment manager built on Nix. See https://devenv.sh.
@@ -50,7 +50,7 @@
rust-overlay.url = "github:oxalica/rust-overlay";
};
outputs = { nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
let
forEachSystem = nixpkgs.lib.genAttrs (import systems);
in {
@@ -126,7 +126,7 @@
# Automatically activate the poetry virtualenv upon entering the shell.
languages.python.poetry.activate.enable = true;
# Install all extra Python dependencies; this is needed to run the unit
# tests and utilise all Synapse features.
# tests and utilitise all Synapse features.
languages.python.poetry.install.arguments = ["--extras all"];
# Install the 'matrix-synapse' package from the local checkout.
languages.python.poetry.install.installRootPackage = true;
@@ -163,8 +163,8 @@
# Create a postgres user called 'synapse_user' which has ownership
# over the 'synapse' database.
services.postgres.initialScript = ''
CREATE USER synapse_user;
ALTER DATABASE synapse OWNER TO synapse_user;
CREATE USER synapse_user;
ALTER DATABASE synapse OWNER TO synapse_user;
'';
# Redis is needed in order to run Synapse in worker mode.

View File

@@ -26,7 +26,7 @@ strict_equality = True
# Run mypy type checking with the minimum supported Python version to catch new usage
# that isn't backwards-compatible (types, overloads, etc).
python_version = 3.9
python_version = 3.8
files =
docker/,

78
poetry.lock generated
View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -11,6 +11,9 @@ files = [
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
]
[package.dependencies]
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""}
[[package]]
name = "attrs"
version = "24.2.0"
@@ -871,7 +874,9 @@ files = [
[package.dependencies]
attrs = ">=22.2.0"
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
jsonschema-specifications = ">=2023.03.6"
pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
referencing = ">=0.28.4"
rpds-py = ">=0.7.1"
@@ -891,6 +896,7 @@ files = [
]
[package.dependencies]
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
referencing = ">=0.28.0"
[[package]]
@@ -906,6 +912,7 @@ files = [
[package.dependencies]
importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""}
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
"jaraco.classes" = "*"
jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""}
pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""}
@@ -1373,17 +1380,17 @@ files = [
[[package]]
name = "mypy-zope"
version = "1.0.8"
version = "1.0.7"
description = "Plugin for mypy to support zope interfaces"
optional = false
python-versions = "*"
files = [
{file = "mypy_zope-1.0.8-py3-none-any.whl", hash = "sha256:8794a77dae0c7e2f28b8ac48569091310b3ee45bb9d6cd4797dcb837c40f9976"},
{file = "mypy_zope-1.0.8.tar.gz", hash = "sha256:854303a95aefc4289e8a0796808e002c2c7ecde0a10a8f7b8f48092f94ef9b9f"},
{file = "mypy_zope-1.0.7-py3-none-any.whl", hash = "sha256:f19de249574319d81083b15f8a022c6b15583582f23340a860922141f1b651ca"},
{file = "mypy_zope-1.0.7.tar.gz", hash = "sha256:32a79ce78647c0bea61e7e0c0eb1233fcb97bb94e8950cca73f17d3419c602f7"},
]
[package.dependencies]
mypy = ">=1.0.0,<1.13.0"
mypy = ">=1.0.0,<1.12.0"
"zope.interface" = "*"
"zope.schema" = "*"
@@ -1444,13 +1451,13 @@ dev = ["jinja2"]
[[package]]
name = "phonenumbers"
version = "8.13.49"
version = "8.13.48"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
optional = false
python-versions = "*"
files = [
{file = "phonenumbers-8.13.49-py2.py3-none-any.whl", hash = "sha256:e17140955ab3d8f9580727372ea64c5ada5327932d6021ef6fd203c3db8c8139"},
{file = "phonenumbers-8.13.49.tar.gz", hash = "sha256:e608ccb61f0bd42e6db1d2c421f7c22186b88f494870bf40aa31d1a2718ab0ae"},
{file = "phonenumbers-8.13.48-py2.py3-none-any.whl", hash = "sha256:5c51939acefa390eb74119750afb10a85d3c628dc83fd62c52d6f532fcf5d205"},
{file = "phonenumbers-8.13.48.tar.gz", hash = "sha256:62d8df9b0f3c3c41571c6b396f044ddd999d61631534001b8be7fdf7ba1b18f3"},
]
[[package]]
@@ -1564,6 +1571,17 @@ files = [
[package.extras]
testing = ["pytest", "pytest-cov"]
[[package]]
name = "pkgutil-resolve-name"
version = "1.3.10"
description = "Resolve a name to an object."
optional = false
python-versions = ">=3.6"
files = [
{file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"},
{file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"},
]
[[package]]
name = "prometheus-client"
version = "0.21.0"
@@ -1930,6 +1948,7 @@ files = [
[package.dependencies]
cryptography = ">=3.1"
defusedxml = "*"
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
pyopenssl = "*"
python-dateutil = "*"
pytz = "*"
@@ -2145,6 +2164,7 @@ files = [
[package.dependencies]
markdown-it-py = ">=2.2.0,<3.0.0"
pygments = ">=2.13.0,<3.0.0"
typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""}
[package.extras]
jupyter = ["ipywidgets (>=7.5.1,<9)"]
@@ -2257,29 +2277,29 @@ files = [
[[package]]
name = "ruff"
version = "0.7.2"
version = "0.7.1"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.7.2-py3-none-linux_armv6l.whl", hash = "sha256:b73f873b5f52092e63ed540adefc3c36f1f803790ecf2590e1df8bf0a9f72cb8"},
{file = "ruff-0.7.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:5b813ef26db1015953daf476202585512afd6a6862a02cde63f3bafb53d0b2d4"},
{file = "ruff-0.7.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:853277dbd9675810c6826dad7a428d52a11760744508340e66bf46f8be9701d9"},
{file = "ruff-0.7.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21aae53ab1490a52bf4e3bf520c10ce120987b047c494cacf4edad0ba0888da2"},
{file = "ruff-0.7.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ccc7e0fc6e0cb3168443eeadb6445285abaae75142ee22b2b72c27d790ab60ba"},
{file = "ruff-0.7.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd77877a4e43b3a98e5ef4715ba3862105e299af0c48942cc6d51ba3d97dc859"},
{file = "ruff-0.7.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e00163fb897d35523c70d71a46fbaa43bf7bf9af0f4534c53ea5b96b2e03397b"},
{file = "ruff-0.7.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3c54b538633482dc342e9b634d91168fe8cc56b30a4b4f99287f4e339103e88"},
{file = "ruff-0.7.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b792468e9804a204be221b14257566669d1db5c00d6bb335996e5cd7004ba80"},
{file = "ruff-0.7.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dba53ed84ac19ae4bfb4ea4bf0172550a2285fa27fbb13e3746f04c80f7fa088"},
{file = "ruff-0.7.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b19fafe261bf741bca2764c14cbb4ee1819b67adb63ebc2db6401dcd652e3748"},
{file = "ruff-0.7.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:28bd8220f4d8f79d590db9e2f6a0674f75ddbc3847277dd44ac1f8d30684b828"},
{file = "ruff-0.7.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9fd67094e77efbea932e62b5d2483006154794040abb3a5072e659096415ae1e"},
{file = "ruff-0.7.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:576305393998b7bd6c46018f8104ea3a9cb3fa7908c21d8580e3274a3b04b691"},
{file = "ruff-0.7.2-py3-none-win32.whl", hash = "sha256:fa993cfc9f0ff11187e82de874dfc3611df80852540331bc85c75809c93253a8"},
{file = "ruff-0.7.2-py3-none-win_amd64.whl", hash = "sha256:dd8800cbe0254e06b8fec585e97554047fb82c894973f7ff18558eee33d1cb88"},
{file = "ruff-0.7.2-py3-none-win_arm64.whl", hash = "sha256:bb8368cd45bba3f57bb29cbb8d64b4a33f8415d0149d2655c5c8539452ce7760"},
{file = "ruff-0.7.2.tar.gz", hash = "sha256:2b14e77293380e475b4e3a7a368e14549288ed2931fce259a6f99978669e844f"},
{file = "ruff-0.7.1-py3-none-linux_armv6l.whl", hash = "sha256:cb1bc5ed9403daa7da05475d615739cc0212e861b7306f314379d958592aaa89"},
{file = "ruff-0.7.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27c1c52a8d199a257ff1e5582d078eab7145129aa02721815ca8fa4f9612dc35"},
{file = "ruff-0.7.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:588a34e1ef2ea55b4ddfec26bbe76bc866e92523d8c6cdec5e8aceefeff02d99"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94fc32f9cdf72dc75c451e5f072758b118ab8100727168a3df58502b43a599ca"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:985818742b833bffa543a84d1cc11b5e6871de1b4e0ac3060a59a2bae3969250"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32f1e8a192e261366c702c5fb2ece9f68d26625f198a25c408861c16dc2dea9c"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:699085bf05819588551b11751eff33e9ca58b1b86a6843e1b082a7de40da1565"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344cc2b0814047dc8c3a8ff2cd1f3d808bb23c6658db830d25147339d9bf9ea7"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4316bbf69d5a859cc937890c7ac7a6551252b6a01b1d2c97e8fc96e45a7c8b4a"},
{file = "ruff-0.7.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79d3af9dca4c56043e738a4d6dd1e9444b6d6c10598ac52d146e331eb155a8ad"},
{file = "ruff-0.7.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c5c121b46abde94a505175524e51891f829414e093cd8326d6e741ecfc0a9112"},
{file = "ruff-0.7.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8422104078324ea250886954e48f1373a8fe7de59283d747c3a7eca050b4e378"},
{file = "ruff-0.7.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:56aad830af8a9db644e80098fe4984a948e2b6fc2e73891538f43bbe478461b8"},
{file = "ruff-0.7.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:658304f02f68d3a83c998ad8bf91f9b4f53e93e5412b8f2388359d55869727fd"},
{file = "ruff-0.7.1-py3-none-win32.whl", hash = "sha256:b517a2011333eb7ce2d402652ecaa0ac1a30c114fbbd55c6b8ee466a7f600ee9"},
{file = "ruff-0.7.1-py3-none-win_amd64.whl", hash = "sha256:f38c41fcde1728736b4eb2b18850f6d1e3eedd9678c914dede554a70d5241307"},
{file = "ruff-0.7.1-py3-none-win_arm64.whl", hash = "sha256:19aa200ec824c0f36d0c9114c8ec0087082021732979a359d6f3c390a6ff2a37"},
{file = "ruff-0.7.1.tar.gz", hash = "sha256:9d8a41d4aa2dad1575adb98a82870cf5db5f76b2938cf2206c22c940034a36f4"},
]
[[package]]
@@ -3101,5 +3121,5 @@ user-search = ["pyicu"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9.0"
content-hash = "0cd942a5193d01cbcef135a0bebd3fa0f12f7dbc63899d6f1c301e0649e9d902"
python-versions = "^3.8.0"
content-hash = "aa1f6d97809596c23a6d160c0c5804971dad0ba49e34b137bbfb79df038fe6f0"

View File

@@ -36,7 +36,7 @@
[tool.ruff]
line-length = 88
target-version = "py39"
target-version = "py38"
[tool.ruff.lint]
# See https://beta.ruff.rs/docs/rules/#error-e
@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
version = "1.119.0"
version = "1.118.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later"
@@ -155,7 +155,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.poetry.dependencies]
python = "^3.9.0"
python = "^3.8.0"
# Mandatory Dependencies
# ----------------------
@@ -178,7 +178,7 @@ Twisted = {extras = ["tls"], version = ">=18.9.0"}
treq = ">=15.1"
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
pyOpenSSL = ">=16.0.0"
PyYAML = ">=5.3"
PyYAML = ">=3.13"
pyasn1 = ">=0.1.9"
pyasn1-modules = ">=0.0.7"
bcrypt = ">=3.1.7"
@@ -241,7 +241,7 @@ authlib = { version = ">=0.15.1", optional = true }
# `contrib/systemd/log_config.yaml`.
# Note: systemd-python 231 appears to have been yanked from pypi
systemd-python = { version = ">=231", optional = true }
lxml = { version = ">=4.5.2", optional = true }
lxml = { version = ">=4.2.0", optional = true }
sentry-sdk = { version = ">=0.7.2", optional = true }
opentracing = { version = ">=2.2.0", optional = true }
jaeger-client = { version = ">=4.0.0", optional = true }
@@ -320,7 +320,7 @@ all = [
# failing on new releases. Keeping lower bounds loose here means that dependabot
# can bump versions without having to update the content-hash in the lockfile.
# This helps prevents merge conflicts when running a batch of dependabot updates.
ruff = "0.7.2"
ruff = "0.7.1"
# Type checking only works with the pydantic.v1 compat module from pydantic v2
pydantic = "^2"
@@ -370,7 +370,7 @@ tomli = ">=1.2.3"
# runtime errors caused by build system changes.
# We are happy to raise these upper bounds upon request,
# provided we check that it's safe to do so (i.e. that CI passes).
requires = ["poetry-core>=1.1.0,<=1.9.1", "setuptools_rust>=1.3,<=1.8.1"]
requires = ["poetry-core>=1.1.0,<=1.9.0", "setuptools_rust>=1.3,<=1.8.1"]
build-backend = "poetry.core.masonry.api"
@@ -378,13 +378,13 @@ build-backend = "poetry.core.masonry.api"
# Skip unsupported platforms (by us or by Rust).
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
# We skip:
# - CPython 3.6, 3.7 and 3.8: EOLed
# - PyPy 3.7 and 3.8: we only support Python 3.9+
# - CPython 3.6 and 3.7: EOLed
# - PyPy 3.7: we only support Python 3.8+
# - musllinux i686: excluded to reduce number of wheels we build.
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
# c.f. https://github.com/matrix-org/synapse/pull/14259
skip = "cp36* cp37* cp38* pp37* pp38* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
# We need a rust compiler
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"

View File

@@ -28,8 +28,9 @@ from typing import Collection, Optional, Sequence, Set
# example)
DISTS = (
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
"debian:bookworm", # (EOL 2026-06) (our EOL forced by Python 3.11 is 2027-10-24)
"debian:sid", # (rolling distro, no EOL)
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
"ubuntu:noble", # 24.04 LTS (EOL 2029-06)
"ubuntu:oracular", # 24.10 (EOL 2025-07)

View File

@@ -39,8 +39,8 @@ ImageFile.LOAD_TRUNCATED_IMAGES = True
# Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the
# if-statement completely.
py_version = sys.version_info
if py_version < (3, 9):
print("Synapse requires Python 3.9 or above.")
if py_version < (3, 8):
print("Synapse requires Python 3.8 or above.")
sys.exit(1)
# Allow using the asyncio reactor via env var.

View File

@@ -450,6 +450,3 @@ class ExperimentalConfig(Config):
# MSC4210: Remove legacy mentions
self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False)
# MSC4222: Adding `state_after` to sync v2
self.msc4222_enabled: bool = experimental.get("msc4222_enabled", False)

View File

@@ -615,7 +615,7 @@ class E2eKeysHandler:
3. Attempt to fetch fallback keys from the database.
Args:
local_query: An iterable of tuples of (user ID, device ID, algorithm, number of keys).
local_query: An iterable of tuples of (user ID, device ID, algorithm).
always_include_fallback_keys: True to always include fallback keys.
Returns:

View File

@@ -196,9 +196,7 @@ class MessageHandler:
AuthError (403) if the user doesn't have permission to view
members of this room.
"""
if state_filter is None:
state_filter = StateFilter.all()
state_filter = state_filter or StateFilter.all()
user_id = requester.user.to_string()
if at_token:

View File

@@ -12,7 +12,6 @@
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import itertools
import logging
from itertools import chain
from typing import TYPE_CHECKING, AbstractSet, Dict, List, Mapping, Optional, Set, Tuple
@@ -80,15 +79,6 @@ sync_processing_time = Histogram(
["initial"],
)
# Limit the number of state_keys we should remember sending down the connection for each
# (room_id, user_id). We don't want to store and pull out too much data in the database.
#
# 100 is an arbitrary but small-ish number. The idea is that we probably won't send down
# too many redundant member state events (that the client already knows about) for a
# given ongoing conversation if we keep 100 around. Most rooms don't have 100 members
# anyway and it takes a while to cycle through 100 members.
MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER = 100
class SlidingSyncHandler:
def __init__(self, hs: "HomeServer"):
@@ -883,14 +873,6 @@ class SlidingSyncHandler:
#
# Calculate the `StateFilter` based on the `required_state` for the room
required_state_filter = StateFilter.none()
# The requested `required_state_map` with the lazy membership expanded and
# `$ME` replaced with the user's ID. This allows us to see what membership we've
# sent down to the client in the next request.
#
# Make a copy so we can modify it. Still need to be careful to make a copy of
# the state key sets if we want to add/remove from them. We could make a deep
# copy but this saves us some work.
expanded_required_state_map = dict(room_sync_config.required_state_map)
if room_membership_for_user_at_to_token.membership not in (
Membership.INVITE,
Membership.KNOCK,
@@ -956,48 +938,21 @@ class SlidingSyncHandler:
):
lazy_load_room_members = True
# Everyone in the timeline is relevant
#
# FIXME: We probably also care about invite, ban, kick, targets, etc
# but the spec only mentions "senders".
timeline_membership: Set[str] = set()
if timeline_events is not None:
for timeline_event in timeline_events:
timeline_membership.add(timeline_event.sender)
# Update the required state filter so we pick up the new
# membership
for user_id in timeline_membership:
required_state_types.append(
(EventTypes.Member, user_id)
)
# Add an explicit entry for each user in the timeline
#
# Make a new set or copy of the state key set so we can
# modify it without affecting the original
# `required_state_map`
expanded_required_state_map[EventTypes.Member] = (
expanded_required_state_map.get(
EventTypes.Member, set()
)
| timeline_membership
)
# FIXME: We probably also care about invite, ban, kick, targets, etc
# but the spec only mentions "senders".
elif state_key == StateValues.ME:
num_others += 1
required_state_types.append((state_type, user.to_string()))
# Replace `$ME` with the user's ID so we can deduplicate
# when someone requests the same state with `$ME` or with
# their user ID.
#
# Make a new set or copy of the state key set so we can
# modify it without affecting the original
# `required_state_map`
expanded_required_state_map[EventTypes.Member] = (
expanded_required_state_map.get(
EventTypes.Member, set()
)
| {user.to_string()}
)
else:
num_others += 1
required_state_types.append((state_type, state_key))
@@ -1061,8 +1016,8 @@ class SlidingSyncHandler:
changed_required_state_map, added_state_filter = (
_required_state_changes(
user.to_string(),
prev_required_state_map=prev_room_sync_config.required_state_map,
request_required_state_map=expanded_required_state_map,
previous_room_config=prev_room_sync_config,
room_sync_config=room_sync_config,
state_deltas=room_state_delta_id_map,
)
)
@@ -1176,9 +1131,7 @@ class SlidingSyncHandler:
# sensible order again.
bump_stamp = 0
room_sync_required_state_map_to_persist: Mapping[str, AbstractSet[str]] = (
expanded_required_state_map
)
room_sync_required_state_map_to_persist = room_sync_config.required_state_map
if changed_required_state_map:
room_sync_required_state_map_to_persist = changed_required_state_map
@@ -1232,10 +1185,7 @@ class SlidingSyncHandler:
)
else:
new_connection_state.room_configs[room_id] = RoomSyncConfig(
timeline_limit=room_sync_config.timeline_limit,
required_state_map=room_sync_required_state_map_to_persist,
)
new_connection_state.room_configs[room_id] = room_sync_config
set_tag(SynapseTags.RESULT_PREFIX + "initial", initial)
@@ -1370,8 +1320,8 @@ class SlidingSyncHandler:
def _required_state_changes(
user_id: str,
*,
prev_required_state_map: Mapping[str, AbstractSet[str]],
request_required_state_map: Mapping[str, AbstractSet[str]],
previous_room_config: "RoomSyncConfig",
room_sync_config: RoomSyncConfig,
state_deltas: StateMap[str],
) -> Tuple[Optional[Mapping[str, AbstractSet[str]]], StateFilter]:
"""Calculates the changes between the required state room config from the
@@ -1392,6 +1342,10 @@ def _required_state_changes(
and the state filter to use to fetch extra current state that we need to
return.
"""
prev_required_state_map = previous_room_config.required_state_map
request_required_state_map = room_sync_config.required_state_map
if prev_required_state_map == request_required_state_map:
# There has been no change. Return immediately.
return None, StateFilter.none()
@@ -1424,19 +1378,12 @@ def _required_state_changes(
# client. Passed to `StateFilter.from_types(...)`
added: List[Tuple[str, Optional[str]]] = []
# Convert the list of state deltas to map from type to state_keys that have
# changed.
changed_types_to_state_keys: Dict[str, Set[str]] = {}
for event_type, state_key in state_deltas:
changed_types_to_state_keys.setdefault(event_type, set()).add(state_key)
# First we calculate what, if anything, has been *added*.
for event_type in (
prev_required_state_map.keys() | request_required_state_map.keys()
):
old_state_keys = prev_required_state_map.get(event_type, set())
request_state_keys = request_required_state_map.get(event_type, set())
changed_state_keys = changed_types_to_state_keys.get(event_type, set())
if old_state_keys == request_state_keys:
# No change to this type
@@ -1446,55 +1393,8 @@ def _required_state_changes(
# Nothing *added*, so we skip. Removals happen below.
continue
# We only remove state keys from the effective state if they've been
# removed from the request *and* the state has changed. This ensures
# that if a client removes and then re-adds a state key, we only send
# down the associated current state event if its changed (rather than
# sending down the same event twice).
invalidated_state_keys = (
old_state_keys - request_state_keys
) & changed_state_keys
# Figure out which state keys we should remember sending down the connection
inheritable_previous_state_keys = (
# Retain the previous state_keys that we've sent down before.
# Wildcard and lazy state keys are not sticky from previous requests.
(old_state_keys - {StateValues.WILDCARD, StateValues.LAZY})
- invalidated_state_keys
)
# Always update changes to include the newly added keys (we've expanded the set
# of state keys), use the new requested set with whatever hasn't been
# invalidated from the previous set.
changes[event_type] = request_state_keys | inheritable_previous_state_keys
# Limit the number of state_keys we should remember sending down the connection
# for each (room_id, user_id). We don't want to store and pull out too much data
# in the database. This is a happy-medium between remembering nothing and
# everything. We can avoid sending redundant state down the connection most of
# the time given that most rooms don't have 100 members anyway and it takes a
# while to cycle through 100 members.
#
# Only remember up to (MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER)
if len(changes[event_type]) > MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER:
# Reset back to only the requested state keys
changes[event_type] = request_state_keys
# Skip if there isn't any room to fill in the rest with previous state keys
if len(request_state_keys) < MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER:
# Fill the rest with previous state_keys. Ideally, we could sort
# these by recency but it's just a set so just pick an arbitrary
# subset (good enough).
changes[event_type] = changes[event_type] | set(
itertools.islice(
inheritable_previous_state_keys,
# Just taking the difference isn't perfect as there could be
# overlap in the keys between the requested and previous but we
# will decide to just take the easy route for now and avoid
# additional set operations to figure it out.
MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER
- len(request_state_keys),
)
)
# Always update changes to include the newly added keys
changes[event_type] = request_state_keys
if StateValues.WILDCARD in old_state_keys:
# We were previously fetching everything for this type, so we don't need to
@@ -1521,6 +1421,12 @@ def _required_state_changes(
added_state_filter = StateFilter.from_types(added)
# Convert the list of state deltas to map from type to state_keys that have
# changed.
changed_types_to_state_keys: Dict[str, Set[str]] = {}
for event_type, state_key in state_deltas:
changed_types_to_state_keys.setdefault(event_type, set()).add(state_key)
# Figure out what changes we need to apply to the effective required state
# config.
for event_type, changed_state_keys in changed_types_to_state_keys.items():
@@ -1531,23 +1437,15 @@ def _required_state_changes(
# No change.
continue
# If we see the `user_id` as a state_key, also add "$ME" to the list of state
# that has changed to account for people requesting `required_state` with `$ME`
# or their user ID.
if user_id in changed_state_keys:
changed_state_keys.add(StateValues.ME)
# We only remove state keys from the effective state if they've been
# removed from the request *and* the state has changed. This ensures
# that if a client removes and then re-adds a state key, we only send
# down the associated current state event if its changed (rather than
# sending down the same event twice).
invalidated_state_keys = (
old_state_keys - request_state_keys
) & changed_state_keys
# We've expanded the set of state keys, ... (already handled above)
if request_state_keys - old_state_keys:
# We've expanded the set of state keys, so we just clobber the
# current set with the new set.
#
# We could also ensure that we keep entries where the state hasn't
# changed, but are no longer in the requested required state, but
# that's a sufficient edge case that we can ignore (as its only a
# performance optimization).
changes[event_type] = request_state_keys
continue
old_state_key_wildcard = StateValues.WILDCARD in old_state_keys
@@ -1569,6 +1467,11 @@ def _required_state_changes(
changes[event_type] = request_state_keys
continue
# Handle "$ME" values by adding "$ME" if the state key matches the user
# ID.
if user_id in changed_state_keys:
changed_state_keys.add(StateValues.ME)
# At this point there are no wildcards and no additions to the set of
# state keys requested, only deletions.
#
@@ -1577,8 +1480,9 @@ def _required_state_changes(
# that if a client removes and then re-adds a state key, we only send
# down the associated current state event if its changed (rather than
# sending down the same event twice).
if invalidated_state_keys:
changes[event_type] = old_state_keys - invalidated_state_keys
invalidated = (old_state_keys - request_state_keys) & changed_state_keys
if invalidated:
changes[event_type] = old_state_keys - invalidated
if changes:
# Update the required state config based on the changes.

View File

@@ -143,7 +143,6 @@ class SyncConfig:
filter_collection: FilterCollection
is_guest: bool
device_id: Optional[str]
use_state_after: bool
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -1142,7 +1141,6 @@ class SyncHandler:
since_token: Optional[StreamToken],
end_token: StreamToken,
full_state: bool,
joined: bool,
) -> MutableStateMap[EventBase]:
"""Works out the difference in state between the end of the previous sync and
the start of the timeline.
@@ -1157,7 +1155,6 @@ class SyncHandler:
the point just after their leave event.
full_state: Whether to force returning the full state.
`lazy_load_members` still applies when `full_state` is `True`.
joined: whether the user is currently joined to the room
Returns:
The state to return in the sync response for the room.
@@ -1233,12 +1230,11 @@ class SyncHandler:
if full_state:
state_ids = await self._compute_state_delta_for_full_sync(
room_id,
sync_config,
sync_config.user,
batch,
end_token,
members_to_fetch,
timeline_state,
joined,
)
else:
# If this is an initial sync then full_state should be set, and
@@ -1248,7 +1244,6 @@ class SyncHandler:
state_ids = await self._compute_state_delta_for_incremental_sync(
room_id,
sync_config,
batch,
since_token,
end_token,
@@ -1321,24 +1316,20 @@ class SyncHandler:
async def _compute_state_delta_for_full_sync(
self,
room_id: str,
sync_config: SyncConfig,
syncing_user: UserID,
batch: TimelineBatch,
end_token: StreamToken,
members_to_fetch: Optional[Set[str]],
timeline_state: StateMap[str],
joined: bool,
) -> StateMap[str]:
"""Calculate the state events to be included in a full sync response.
As with `_compute_state_delta_for_incremental_sync`, the result will include
the membership events for the senders of each event in `members_to_fetch`.
Note that whether this returns the state at the start or the end of the
batch depends on `sync_config.use_state_after` (c.f. MSC4222).
Args:
room_id: The room we are calculating for.
sync_confg: The user that is calling `/sync`.
syncing_user: The user that is calling `/sync`.
batch: The timeline batch for the room that will be sent to the user.
end_token: Token of the end of the current batch. Normally this will be
the same as the global "now_token", but if the user has left the room,
@@ -1347,11 +1338,10 @@ class SyncHandler:
events in the timeline.
timeline_state: The contribution to the room state from state events in
`batch`. Only contains the last event for any given state key.
joined: whether the user is currently joined to the room
Returns:
A map from (type, state_key) to event_id, for each event that we believe
should be included in the `state` or `state_after` part of the sync response.
should be included in the `state` part of the sync response.
"""
if members_to_fetch is not None:
# Lazy-loading of membership events is enabled.
@@ -1369,7 +1359,7 @@ class SyncHandler:
# is no guarantee that our membership will be in the auth events of
# timeline events when the room is partial stated.
state_filter = StateFilter.from_lazy_load_member_list(
members_to_fetch.union((sync_config.user.to_string(),))
members_to_fetch.union((syncing_user.to_string(),))
)
# We are happy to use partial state to compute the `/sync` response.
@@ -1383,61 +1373,6 @@ class SyncHandler:
await_full_state = True
lazy_load_members = False
# Check if we are wanting to return the state at the start or end of the
# timeline. If at the end we can just use the current state.
if sync_config.use_state_after:
# If we're getting the state at the end of the timeline, we can just
# use the current state of the room (and roll back any changes
# between when we fetched the current state and `end_token`).
#
# For rooms we're not joined to, there might be a very large number
# of deltas between `end_token` and "now", and so instead we fetch
# the state at the end of the timeline.
if joined:
state_ids = await self._state_storage_controller.get_current_state_ids(
room_id,
state_filter=state_filter,
await_full_state=await_full_state,
)
# Now roll back the state by looking at the state deltas between
# end_token and now.
deltas = await self.store.get_current_state_deltas_for_room(
room_id,
from_token=end_token.room_key,
to_token=self.store.get_room_max_token(),
)
if deltas:
mutable_state_ids = dict(state_ids)
# We iterate over the deltas backwards so that if there are
# multiple changes of the same type/state_key we'll
# correctly pick the earliest delta.
for delta in reversed(deltas):
if delta.prev_event_id:
mutable_state_ids[(delta.event_type, delta.state_key)] = (
delta.prev_event_id
)
elif (delta.event_type, delta.state_key) in mutable_state_ids:
mutable_state_ids.pop((delta.event_type, delta.state_key))
state_ids = mutable_state_ids
return state_ids
else:
# Just use state groups to get the state at the end of the
# timeline, i.e. the state at the leave/etc event.
state_at_timeline_end = (
await self._state_storage_controller.get_state_ids_at(
room_id,
stream_position=end_token,
state_filter=state_filter,
await_full_state=await_full_state,
)
)
return state_at_timeline_end
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
room_id,
stream_position=end_token,
@@ -1470,7 +1405,6 @@ class SyncHandler:
async def _compute_state_delta_for_incremental_sync(
self,
room_id: str,
sync_config: SyncConfig,
batch: TimelineBatch,
since_token: StreamToken,
end_token: StreamToken,
@@ -1485,12 +1419,8 @@ class SyncHandler:
(`compute_state_delta`) is responsible for keeping track of which membership
events we have already sent to the client, and hence ripping them out.
Note that whether this returns the state at the start or the end of the
batch depends on `sync_config.use_state_after` (c.f. MSC4222).
Args:
room_id: The room we are calculating for.
sync_config
batch: The timeline batch for the room that will be sent to the user.
since_token: Token of the end of the previous batch.
end_token: Token of the end of the current batch. Normally this will be
@@ -1503,7 +1433,7 @@ class SyncHandler:
Returns:
A map from (type, state_key) to event_id, for each event that we believe
should be included in the `state` or `state_after` part of the sync response.
should be included in the `state` part of the sync response.
"""
if members_to_fetch is not None:
# Lazy-loading is enabled. Only return the state that is needed.
@@ -1515,51 +1445,6 @@ class SyncHandler:
await_full_state = True
lazy_load_members = False
# Check if we are wanting to return the state at the start or end of the
# timeline. If at the end we can just use the current state delta stream.
if sync_config.use_state_after:
delta_state_ids: MutableStateMap[str] = {}
if members_to_fetch:
# We're lazy-loading, so the client might need some more member
# events to understand the events in this timeline. So we always
# fish out all the member events corresponding to the timeline
# here. The caller will then dedupe any redundant ones.
member_ids = await self._state_storage_controller.get_current_state_ids(
room_id=room_id,
state_filter=StateFilter.from_types(
(EventTypes.Member, member) for member in members_to_fetch
),
await_full_state=await_full_state,
)
delta_state_ids.update(member_ids)
# We don't do LL filtering for incremental syncs - see
# https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
# N.B. this slows down incr syncs as we are now processing way more
# state in the server than if we were LLing.
#
# i.e. we return all state deltas, including membership changes that
# we'd normally exclude due to LL.
deltas = await self.store.get_current_state_deltas_for_room(
room_id=room_id,
from_token=since_token.room_key,
to_token=end_token.room_key,
)
for delta in deltas:
if delta.event_id is None:
# There was a state reset and this state entry is no longer
# present, but we have no way of informing the client about
# this, so we just skip it for now.
continue
# Note that deltas are in stream ordering, so if there are
# multiple deltas for a given type/state_key we'll always pick
# the latest one.
delta_state_ids[(delta.event_type, delta.state_key)] = delta.event_id
return delta_state_ids
# For a non-gappy sync if the events in the timeline are simply a linear
# chain (i.e. no merging/branching of the graph), then we know the state
# delta between the end of the previous sync and start of the new one is
@@ -2982,7 +2867,6 @@ class SyncHandler:
since_token,
room_builder.end_token,
full_state=full_state,
joined=room_builder.rtype == "joined",
)
else:
# An out of band room won't have any state changes.

View File

@@ -39,7 +39,7 @@ from twisted.internet.endpoints import (
)
from twisted.internet.interfaces import (
IPushProducer,
IReactorTime,
IReactorTCP,
IStreamClientEndpoint,
)
from twisted.internet.protocol import Factory, Protocol
@@ -113,7 +113,7 @@ class RemoteHandler(logging.Handler):
port: int,
maximum_buffer: int = 1000,
level: int = logging.NOTSET,
_reactor: Optional[IReactorTime] = None,
_reactor: Optional[IReactorTCP] = None,
):
super().__init__(level=level)
self.host = host

View File

@@ -43,15 +43,12 @@ class ExperimentalFeature(str, Enum):
MSC3881 = "msc3881"
MSC3575 = "msc3575"
MSC4222 = "msc4222"
def is_globally_enabled(self, config: "HomeServerConfig") -> bool:
if self is ExperimentalFeature.MSC3881:
return config.experimental.msc3881_enabled
if self is ExperimentalFeature.MSC3575:
return config.experimental.msc3575_enabled
if self is ExperimentalFeature.MSC4222:
return config.experimental.msc4222_enabled
assert_never(self)

View File

@@ -152,14 +152,6 @@ class SyncRestServlet(RestServlet):
filter_id = parse_string(request, "filter")
full_state = parse_boolean(request, "full_state", default=False)
use_state_after = False
if await self.store.is_feature_enabled(
user.to_string(), ExperimentalFeature.MSC4222
):
use_state_after = parse_boolean(
request, "org.matrix.msc4222.use_state_after", default=False
)
logger.debug(
"/sync: user=%r, timeout=%r, since=%r, "
"set_presence=%r, filter_id=%r, device_id=%r",
@@ -192,7 +184,6 @@ class SyncRestServlet(RestServlet):
full_state,
device_id,
last_ignore_accdata_streampos,
use_state_after,
)
if filter_id is None:
@@ -229,7 +220,6 @@ class SyncRestServlet(RestServlet):
filter_collection=filter_collection,
is_guest=requester.is_guest,
device_id=device_id,
use_state_after=use_state_after,
)
since_token = None
@@ -268,7 +258,7 @@ class SyncRestServlet(RestServlet):
# We know that the the requester has an access token since appservices
# cannot use sync.
response_content = await self.encode_response(
time_now, sync_config, sync_result, requester, filter_collection
time_now, sync_result, requester, filter_collection
)
logger.debug("Event formatting complete")
@@ -278,7 +268,6 @@ class SyncRestServlet(RestServlet):
async def encode_response(
self,
time_now: int,
sync_config: SyncConfig,
sync_result: SyncResult,
requester: Requester,
filter: FilterCollection,
@@ -303,7 +292,7 @@ class SyncRestServlet(RestServlet):
)
joined = await self.encode_joined(
sync_config, sync_result.joined, time_now, serialize_options
sync_result.joined, time_now, serialize_options
)
invited = await self.encode_invited(
@@ -315,7 +304,7 @@ class SyncRestServlet(RestServlet):
)
archived = await self.encode_archived(
sync_config, sync_result.archived, time_now, serialize_options
sync_result.archived, time_now, serialize_options
)
logger.debug("building sync response dict")
@@ -383,7 +372,6 @@ class SyncRestServlet(RestServlet):
@trace_with_opname("sync.encode_joined")
async def encode_joined(
self,
sync_config: SyncConfig,
rooms: List[JoinedSyncResult],
time_now: int,
serialize_options: SerializeEventConfig,
@@ -392,7 +380,6 @@ class SyncRestServlet(RestServlet):
Encode the joined rooms in a sync result
Args:
sync_config
rooms: list of sync results for rooms this user is joined to
time_now: current time - used as a baseline for age calculations
serialize_options: Event serializer options
@@ -402,11 +389,7 @@ class SyncRestServlet(RestServlet):
joined = {}
for room in rooms:
joined[room.room_id] = await self.encode_room(
sync_config,
room,
time_now,
joined=True,
serialize_options=serialize_options,
room, time_now, joined=True, serialize_options=serialize_options
)
return joined
@@ -494,7 +477,6 @@ class SyncRestServlet(RestServlet):
@trace_with_opname("sync.encode_archived")
async def encode_archived(
self,
sync_config: SyncConfig,
rooms: List[ArchivedSyncResult],
time_now: int,
serialize_options: SerializeEventConfig,
@@ -503,7 +485,6 @@ class SyncRestServlet(RestServlet):
Encode the archived rooms in a sync result
Args:
sync_config
rooms: list of sync results for rooms this user is joined to
time_now: current time - used as a baseline for age calculations
serialize_options: Event serializer options
@@ -513,18 +494,13 @@ class SyncRestServlet(RestServlet):
joined = {}
for room in rooms:
joined[room.room_id] = await self.encode_room(
sync_config,
room,
time_now,
joined=False,
serialize_options=serialize_options,
room, time_now, joined=False, serialize_options=serialize_options
)
return joined
async def encode_room(
self,
sync_config: SyncConfig,
room: Union[JoinedSyncResult, ArchivedSyncResult],
time_now: int,
joined: bool,
@@ -532,7 +508,6 @@ class SyncRestServlet(RestServlet):
) -> JsonDict:
"""
Args:
sync_config
room: sync result for a single room
time_now: current time - used as a baseline for age calculations
token_id: ID of the user's auth token - used for namespacing
@@ -573,20 +548,13 @@ class SyncRestServlet(RestServlet):
account_data = room.account_data
# We either include a `state` or `state_after` field depending on
# whether the client has opted in to the newer `state_after` behavior.
if sync_config.use_state_after:
state_key_name = "org.matrix.msc4222.state_after"
else:
state_key_name = "state"
result: JsonDict = {
"timeline": {
"events": serialized_timeline,
"prev_batch": await room.timeline.prev_batch.to_string(self.store),
"limited": room.timeline.limited,
},
state_key_name: {"events": serialized_state},
"state": {"events": serialized_state},
"account_data": {"events": account_data},
}
@@ -720,7 +688,6 @@ class SlidingSyncE2eeRestServlet(RestServlet):
filter_collection=self.only_member_events_filter_collection,
is_guest=requester.is_guest,
device_id=device_id,
use_state_after=False, # We don't return any rooms so this flag is a no-op
)
since_token = None

View File

@@ -234,11 +234,8 @@ class StateStorageController:
RuntimeError if we don't have a state group for one or more of the events
(ie they are outliers or unknown)
"""
if state_filter is None:
state_filter = StateFilter.all()
await_full_state = True
if not state_filter.must_await_full_state(self._is_mine_id):
if state_filter and not state_filter.must_await_full_state(self._is_mine_id):
await_full_state = False
event_to_groups = await self.get_state_group_for_events(
@@ -247,7 +244,7 @@ class StateStorageController:
groups = set(event_to_groups.values())
group_to_state = await self.stores.state._get_state_for_groups(
groups, state_filter
groups, state_filter or StateFilter.all()
)
state_event_map = await self.stores.main.get_events(
@@ -295,11 +292,10 @@ class StateStorageController:
RuntimeError if we don't have a state group for one or more of the events
(ie they are outliers or unknown)
"""
if state_filter is None:
state_filter = StateFilter.all()
if await_full_state and not state_filter.must_await_full_state(
self._is_mine_id
if (
await_full_state
and state_filter
and not state_filter.must_await_full_state(self._is_mine_id)
):
# Full state is not required if the state filter is restrictive enough.
await_full_state = False
@@ -310,7 +306,7 @@ class StateStorageController:
groups = set(event_to_groups.values())
group_to_state = await self.stores.state._get_state_for_groups(
groups, state_filter
groups, state_filter or StateFilter.all()
)
event_to_state = {
@@ -339,10 +335,9 @@ class StateStorageController:
RuntimeError if we don't have a state group for the event (ie it is an
outlier or is unknown)
"""
if state_filter is None:
state_filter = StateFilter.all()
state_map = await self.get_state_for_events([event_id], state_filter)
state_map = await self.get_state_for_events(
[event_id], state_filter or StateFilter.all()
)
return state_map[event_id]
@trace
@@ -370,12 +365,9 @@ class StateStorageController:
RuntimeError if we don't have a state group for the event (ie it is an
outlier or is unknown)
"""
if state_filter is None:
state_filter = StateFilter.all()
state_map = await self.get_state_ids_for_events(
[event_id],
state_filter,
state_filter or StateFilter.all(),
await_full_state=await_full_state,
)
return state_map[event_id]
@@ -396,12 +388,9 @@ class StateStorageController:
at the event and `state_filter` is not satisfied by partial state.
Defaults to `True`.
"""
if state_filter is None:
state_filter = StateFilter.all()
state_ids = await self.get_state_ids_for_event(
event_id,
state_filter=state_filter,
state_filter=state_filter or StateFilter.all(),
await_full_state=await_full_state,
)
@@ -437,9 +426,6 @@ class StateStorageController:
at the last event in the room before `stream_position` and
`state_filter` is not satisfied by partial state. Defaults to `True`.
"""
if state_filter is None:
state_filter = StateFilter.all()
# FIXME: This gets the state at the latest event before the stream ordering,
# which might not be the same as the "current state" of the room at the time
# of the stream token if there were multiple forward extremities at the time.
@@ -456,7 +442,7 @@ class StateStorageController:
if last_event_id:
state = await self.get_state_after_event(
last_event_id,
state_filter=state_filter,
state_filter=state_filter or StateFilter.all(),
await_full_state=await_full_state,
)
@@ -514,10 +500,9 @@ class StateStorageController:
Returns:
Dict of state group to state map.
"""
if state_filter is None:
state_filter = StateFilter.all()
return await self.stores.state._get_state_for_groups(groups, state_filter)
return await self.stores.state._get_state_for_groups(
groups, state_filter or StateFilter.all()
)
@trace
@tag_args
@@ -598,13 +583,12 @@ class StateStorageController:
Returns:
The current state of the room.
"""
if state_filter is None:
state_filter = StateFilter.all()
if await_full_state and state_filter.must_await_full_state(self._is_mine_id):
if await_full_state and (
not state_filter or state_filter.must_await_full_state(self._is_mine_id)
):
await self._partial_state_room_tracker.await_full_state(room_id)
if state_filter is not None and not state_filter.is_full():
if state_filter and not state_filter.is_full():
return await self.stores.main.get_partial_filtered_current_state_ids(
room_id, state_filter
)

View File

@@ -99,13 +99,6 @@ class EndToEndKeyBackgroundStore(SQLBaseStore):
unique=True,
)
self.db_pool.updates.register_background_index_update(
update_name="add_otk_ts_added_index",
index_name="e2e_one_time_keys_json_user_id_device_id_algorithm_ts_added_idx",
table="e2e_one_time_keys_json",
columns=("user_id", "device_id", "algorithm", "ts_added_ms"),
)
class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorkerStore):
def __init__(
@@ -1129,7 +1122,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
"""Take a list of one time keys out of the database.
Args:
query_list: An iterable of tuples of (user ID, device ID, algorithm, number of keys).
query_list: An iterable of tuples of (user ID, device ID, algorithm).
Returns:
A tuple (results, missing) of:
@@ -1317,14 +1310,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
OTK was found.
"""
# Return the oldest keys from this device (based on `ts_added_ms`).
# Doing so means that keys are issued in the same order they were uploaded,
# which reduces the chances of a client expiring its copy of a (private)
# key while the public key is still on the server, waiting to be issued.
sql = """
SELECT key_id, key_json FROM e2e_one_time_keys_json
WHERE user_id = ? AND device_id = ? AND algorithm = ?
ORDER BY ts_added_ms
LIMIT ?
"""
@@ -1366,22 +1354,13 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
A list of tuples (user_id, device_id, algorithm, key_id, key_json)
for each OTK claimed.
"""
# Find, delete, and return the oldest keys from each device (based on
# `ts_added_ms`).
#
# Doing so means that keys are issued in the same order they were uploaded,
# which reduces the chances of a client expiring its copy of a (private)
# key while the public key is still on the server, waiting to be issued.
sql = """
WITH claims(user_id, device_id, algorithm, claim_count) AS (
VALUES ?
), ranked_keys AS (
SELECT
user_id, device_id, algorithm, key_id, claim_count,
ROW_NUMBER() OVER (
PARTITION BY (user_id, device_id, algorithm)
ORDER BY ts_added_ms
) AS r
ROW_NUMBER() OVER (PARTITION BY (user_id, device_id, algorithm)) AS r
FROM e2e_one_time_keys_json
JOIN claims USING (user_id, device_id, algorithm)
)

View File

@@ -2550,9 +2550,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
still contains events with partial state.
"""
try:
async with (
self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id
):
async with self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id:
await self.db_pool.runInteraction(
"clear_partial_state_room",
self._clear_partial_state_room_txn,

View File

@@ -572,10 +572,10 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
Returns:
Map from type/state_key to event ID.
"""
if state_filter is None:
state_filter = StateFilter.all()
where_clause, where_args = (state_filter).make_sql_filter_clause()
where_clause, where_args = (
state_filter or StateFilter.all()
).make_sql_filter_clause()
if not where_clause:
# We delegate to the cached version
@@ -584,7 +584,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
def _get_filtered_current_state_ids_txn(
txn: LoggingTransaction,
) -> StateMap[str]:
results = StateMapWrapper(state_filter=state_filter)
results = StateMapWrapper(state_filter=state_filter or StateFilter.all())
sql = """
SELECT type, state_key, event_id FROM current_state_events
@@ -681,9 +681,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
context: EventContext,
) -> None:
"""Update the state group for a partial state event"""
async with (
self._un_partial_stated_events_stream_id_gen.get_next() as un_partial_state_event_stream_id
):
async with self._un_partial_stated_events_stream_id_gen.get_next() as un_partial_state_event_stream_id:
await self.db_pool.runInteraction(
"update_state_for_partial_state_event",
self._update_state_for_partial_state_event_txn,

View File

@@ -20,26 +20,18 @@
#
import logging
from typing import TYPE_CHECKING, List, Optional, Tuple
from typing import List, Optional, Tuple
import attr
from synapse.logging.opentracing import trace
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
make_in_list_sql_clause,
)
from synapse.storage.database import LoggingTransaction, make_in_list_sql_clause
from synapse.storage.databases.main.stream import _filter_results_by_stream
from synapse.types import RoomStreamToken, StrCollection
from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.iterutils import batch_iter
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -62,21 +54,6 @@ class StateDeltasStore(SQLBaseStore):
# attribute. TODO: can we get static analysis to enforce this?
_curr_state_delta_stream_cache: StreamChangeCache
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
self.db_pool.updates.register_background_index_update(
update_name="current_state_delta_stream_room_index",
index_name="current_state_delta_stream_room_idx",
table="current_state_delta_stream",
columns=("room_id", "stream_id"),
)
async def get_partial_current_state_deltas(
self, prev_stream_id: int, max_stream_id: int
) -> Tuple[int, List[StateDelta]]:

View File

@@ -112,8 +112,8 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore):
Returns:
Map from state_group to a StateMap at that point.
"""
if state_filter is None:
state_filter = StateFilter.all()
state_filter = state_filter or StateFilter.all()
results: Dict[int, MutableStateMap[str]] = {group: {} for group in groups}

View File

@@ -284,8 +284,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
Returns:
Dict of state group to state map.
"""
if state_filter is None:
state_filter = StateFilter.all()
state_filter = state_filter or StateFilter.all()
member_filter, non_member_filter = state_filter.get_member_split()

View File

@@ -1,18 +0,0 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2024 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- Add an index on (user_id, device_id, algorithm, ts_added_ms) on e2e_one_time_keys_json, so that OTKs can
-- efficiently be issued in the same order they were uploaded.
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(8803, 'add_otk_ts_added_index', '{}');

View File

@@ -1,18 +0,0 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2024 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- Add an index on (user_id, device_id, algorithm, ts_added_ms) on e2e_one_time_keys_json, so that OTKs can
-- efficiently be issued in the same order they were uploaded.
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(8804, 'current_state_delta_stream_room_index', '{}');

View File

@@ -68,23 +68,15 @@ class StateFilter:
include_others: bool = False
def __attrs_post_init__(self) -> None:
# If `include_others` is set we canonicalise the filter by removing
# wildcards from the types dictionary
if self.include_others:
# If `include_others` is set we canonicalise the filter by removing
# wildcards from the types dictionary
# this is needed to work around the fact that StateFilter is frozen
object.__setattr__(
self,
"types",
immutabledict({k: v for k, v in self.types.items() if v is not None}),
)
else:
# Otherwise we remove entries where the value is the empty set.
object.__setattr__(
self,
"types",
immutabledict({k: v for k, v in self.types.items() if v is None or v}),
)
@staticmethod
def all() -> "StateFilter":

View File

@@ -47,6 +47,7 @@ class WheelTimer(Generic[T]):
"""
self.bucket_size: int = bucket_size
self.entries: List[_Entry[T]] = []
self.current_tick: int = 0
def insert(self, now: int, obj: T, then: int) -> None:
"""Inserts object into timer.
@@ -77,10 +78,11 @@ class WheelTimer(Generic[T]):
self.entries[max(min_key, then_key) - min_key].elements.add(obj)
return
next_key = now_key + 1
if self.entries:
last_key = self.entries[-1].end_key + 1
last_key = self.entries[-1].end_key
else:
last_key = now_key + 1
last_key = next_key
# Handle the case when `then` is in the past and `entries` is empty.
then_key = max(last_key, then_key)

View File

@@ -151,30 +151,18 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
def test_claim_one_time_key(self) -> None:
local_user = "@boris:" + self.hs.hostname
device_id = "xyz"
keys = {"alg1:k1": "key1"}
res = self.get_success(
self.handler.upload_keys_for_user(
local_user, device_id, {"one_time_keys": {"alg1:k1": "key1"}}
local_user, device_id, {"one_time_keys": keys}
)
)
self.assertDictEqual(
res, {"one_time_key_counts": {"alg1": 1, "signed_curve25519": 0}}
)
# Keys should be returned in the order they were uploaded. To test, advance time
# a little, then upload a second key with an earlier key ID; it should get
# returned second.
self.reactor.advance(1)
res = self.get_success(
self.handler.upload_keys_for_user(
local_user, device_id, {"one_time_keys": {"alg1:k0": "key0"}}
)
)
self.assertDictEqual(
res, {"one_time_key_counts": {"alg1": 2, "signed_curve25519": 0}}
)
# now claim both keys back. They should be in the same order
res = self.get_success(
res2 = self.get_success(
self.handler.claim_one_time_keys(
{local_user: {device_id: {"alg1": 1}}},
self.requester,
@@ -183,27 +171,12 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
)
)
self.assertEqual(
res,
res2,
{
"failures": {},
"one_time_keys": {local_user: {device_id: {"alg1:k1": "key1"}}},
},
)
res = self.get_success(
self.handler.claim_one_time_keys(
{local_user: {device_id: {"alg1": 1}}},
self.requester,
timeout=None,
always_include_fallback_keys=False,
)
)
self.assertEqual(
res,
{
"failures": {},
"one_time_keys": {local_user: {device_id: {"alg1:k0": "key0"}}},
},
)
def test_claim_one_time_key_bulk(self) -> None:
"""Like test_claim_one_time_key but claims multiple keys in one handler call."""
@@ -363,47 +336,6 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
counts_by_alg, expected_counts_by_alg, f"{user_id}:{device_id}"
)
def test_claim_one_time_key_bulk_ordering(self) -> None:
"""Keys returned by the bulk claim call should be returned in the correct order"""
# Alice has lots of keys, uploaded in a specific order
alice = f"@alice:{self.hs.hostname}"
alice_dev = "alice_dev_1"
self.get_success(
self.handler.upload_keys_for_user(
alice,
alice_dev,
{"one_time_keys": {"alg1:k20": 20, "alg1:k21": 21, "alg1:k22": 22}},
)
)
# Advance time by 1s, to ensure that there is a difference in upload time.
self.reactor.advance(1)
self.get_success(
self.handler.upload_keys_for_user(
alice,
alice_dev,
{"one_time_keys": {"alg1:k10": 10, "alg1:k11": 11, "alg1:k12": 12}},
)
)
# Now claim some, and check we get the right ones.
claim_res = self.get_success(
self.handler.claim_one_time_keys(
{alice: {alice_dev: {"alg1": 2}}},
self.requester,
timeout=None,
always_include_fallback_keys=False,
)
)
# We should get the first-uploaded keys, even though they have later key ids.
# We should get a random set of two of k20, k21, k22.
self.assertEqual(claim_res["failures"], {})
claimed_keys = claim_res["one_time_keys"]["@alice:test"]["alice_dev_1"]
self.assertEqual(len(claimed_keys), 2)
for key_id in claimed_keys.keys():
self.assertIn(key_id, ["alg1:k20", "alg1:k21", "alg1:k22"])
def test_fallback_key(self) -> None:
local_user = "@boris:" + self.hs.hostname
device_id = "xyz"

View File

@@ -661,12 +661,9 @@ class PartialJoinTestCase(unittest.FederatingHomeserverTestCase):
)
)
with (
patch.object(
fed_client, "make_membership_event", mock_make_membership_event
),
patch.object(fed_client, "send_join", mock_send_join),
):
with patch.object(
fed_client, "make_membership_event", mock_make_membership_event
), patch.object(fed_client, "send_join", mock_send_join):
# Join and check that our join event is rejected
# (The join event is rejected because it doesn't have any signatures)
join_exc = self.get_failure(
@@ -711,12 +708,9 @@ class PartialJoinTestCase(unittest.FederatingHomeserverTestCase):
fed_handler = self.hs.get_federation_handler()
store = self.hs.get_datastores().main
with (
patch.object(
fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room
),
patch.object(store, "is_partial_state_room", mock_is_partial_state_room),
):
with patch.object(
fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room
), patch.object(store, "is_partial_state_room", mock_is_partial_state_room):
# Start the partial state sync.
fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id")
self.assertEqual(mock_sync_partial_state_room.call_count, 1)
@@ -766,12 +760,9 @@ class PartialJoinTestCase(unittest.FederatingHomeserverTestCase):
fed_handler = self.hs.get_federation_handler()
store = self.hs.get_datastores().main
with (
patch.object(
fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room
),
patch.object(store, "is_partial_state_room", mock_is_partial_state_room),
):
with patch.object(
fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room
), patch.object(store, "is_partial_state_room", mock_is_partial_state_room):
# Start the partial state sync.
fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id")
self.assertEqual(mock_sync_partial_state_room.call_count, 1)

View File

@@ -172,25 +172,20 @@ class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase):
)
)
with (
patch.object(
self.handler.federation_handler.federation_client,
"make_membership_event",
mock_make_membership_event,
),
patch.object(
self.handler.federation_handler.federation_client,
"send_join",
mock_send_join,
),
patch(
"synapse.event_auth._is_membership_change_allowed",
return_value=None,
),
patch(
"synapse.handlers.federation_event.check_state_dependent_auth_rules",
return_value=None,
),
with patch.object(
self.handler.federation_handler.federation_client,
"make_membership_event",
mock_make_membership_event,
), patch.object(
self.handler.federation_handler.federation_client,
"send_join",
mock_send_join,
), patch(
"synapse.event_auth._is_membership_change_allowed",
return_value=None,
), patch(
"synapse.handlers.federation_event.check_state_dependent_auth_rules",
return_value=None,
):
self.get_success(
self.handler.update_membership(

View File

@@ -33,7 +33,6 @@ from synapse.api.constants import (
)
from synapse.api.room_versions import RoomVersions
from synapse.handlers.sliding_sync import (
MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER,
RoomsForUserType,
RoomSyncConfig,
StateValues,
@@ -3320,32 +3319,6 @@ class RequiredStateChangesTestCase(unittest.TestCase):
),
),
),
(
"simple_retain_previous_state_keys",
"""Test adding a state key to the config and retaining a previously sent state_key""",
RequiredStateChangesTestParameters(
previous_required_state_map={"type": {"state_key1"}},
request_required_state_map={"type": {"state_key2", "state_key3"}},
state_deltas={("type", "state_key2"): "$event_id"},
expected_with_state_deltas=(
# We've added a key so we should persist the changed required state
# config.
#
# Retain `state_key1` from the `previous_required_state_map`
{"type": {"state_key1", "state_key2", "state_key3"}},
# We should see the new state_keys added
StateFilter.from_types(
[("type", "state_key2"), ("type", "state_key3")]
),
),
expected_without_state_deltas=(
{"type": {"state_key1", "state_key2", "state_key3"}},
StateFilter.from_types(
[("type", "state_key2"), ("type", "state_key3")]
),
),
),
),
(
"simple_remove_type",
"""
@@ -3751,249 +3724,6 @@ class RequiredStateChangesTestCase(unittest.TestCase):
),
),
),
(
"state_key_lazy_keep_previous_memberships_and_no_new_memberships",
"""
This test mimics a request with lazy-loading room members enabled where
we have previously sent down user2 and user3's membership events and now
we're sending down another response without any timeline events.
""",
RequiredStateChangesTestParameters(
previous_required_state_map={
EventTypes.Member: {
StateValues.LAZY,
"@user2:test",
"@user3:test",
}
},
request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
expected_with_state_deltas=(
# Remove "@user2:test" since that state has changed and is no
# longer being requested anymore. Since something was removed,
# we should persist the changed to required state. That way next
# time, they request "@user2:test", we see that we haven't sent
# it before and send the new state. (we should still keep track
# that we've sent specific `EventTypes.Member` before)
{
EventTypes.Member: {
StateValues.LAZY,
"@user3:test",
}
},
# We don't need to request anything more if they are requesting
# less state now
StateFilter.none(),
),
expected_without_state_deltas=(
# We're not requesting any specific `EventTypes.Member` now but
# since that state hasn't changed, nothing should change (we
# should still keep track that we've sent specific
# `EventTypes.Member` before).
None,
# We don't need to request anything more if they are requesting
# less state now
StateFilter.none(),
),
),
),
(
"state_key_lazy_keep_previous_memberships_with_new_memberships",
"""
This test mimics a request with lazy-loading room members enabled where
we have previously sent down user2 and user3's membership events and now
we're sending down another response with a new event from user4.
""",
RequiredStateChangesTestParameters(
previous_required_state_map={
EventTypes.Member: {
StateValues.LAZY,
"@user2:test",
"@user3:test",
}
},
request_required_state_map={
EventTypes.Member: {StateValues.LAZY, "@user4:test"}
},
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
expected_with_state_deltas=(
# Since "@user4:test" was added, we should persist the changed
# required state config.
#
# Also remove "@user2:test" since that state has changed and is no
# longer being requested anymore. Since something was removed,
# we also should persist the changed to required state. That way next
# time, they request "@user2:test", we see that we haven't sent
# it before and send the new state. (we should still keep track
# that we've sent specific `EventTypes.Member` before)
{
EventTypes.Member: {
StateValues.LAZY,
"@user3:test",
"@user4:test",
}
},
# We should see the new state_keys added
StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
),
expected_without_state_deltas=(
# Since "@user4:test" was added, we should persist the changed
# required state config.
{
EventTypes.Member: {
StateValues.LAZY,
"@user2:test",
"@user3:test",
"@user4:test",
}
},
# We should see the new state_keys added
StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
),
),
),
(
"state_key_expand_lazy_keep_previous_memberships",
"""
Test expanding the `required_state` to lazy-loading room members.
""",
RequiredStateChangesTestParameters(
previous_required_state_map={
EventTypes.Member: {"@user2:test", "@user3:test"}
},
request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
expected_with_state_deltas=(
# Since `StateValues.LAZY` was added, we should persist the
# changed required state config.
#
# Also remove "@user2:test" since that state has changed and is no
# longer being requested anymore. Since something was removed,
# we also should persist the changed to required state. That way next
# time, they request "@user2:test", we see that we haven't sent
# it before and send the new state. (we should still keep track
# that we've sent specific `EventTypes.Member` before)
{
EventTypes.Member: {
StateValues.LAZY,
"@user3:test",
}
},
# We don't need to request anything more if they are requesting
# less state now
StateFilter.none(),
),
expected_without_state_deltas=(
# Since `StateValues.LAZY` was added, we should persist the
# changed required state config.
{
EventTypes.Member: {
StateValues.LAZY,
"@user2:test",
"@user3:test",
}
},
# We don't need to request anything more if they are requesting
# less state now
StateFilter.none(),
),
),
),
(
"state_key_retract_lazy_keep_previous_memberships_no_new_memberships",
"""
Test retracting the `required_state` to no longer lazy-loading room members.
""",
RequiredStateChangesTestParameters(
previous_required_state_map={
EventTypes.Member: {
StateValues.LAZY,
"@user2:test",
"@user3:test",
}
},
request_required_state_map={},
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
expected_with_state_deltas=(
# Remove `EventTypes.Member` since there's been a change to that
# state, (persist the change to required state). That way next
# time, they request `EventTypes.Member`, we see that we haven't
# sent it before and send the new state. (if we were tracking
# that we sent any other state, we should still keep track
# that).
#
# This acts the same as the `simple_remove_type` test. It's
# possible that we could remember the specific `state_keys` that
# we have sent down before but this currently just acts the same
# as if a whole `type` was removed. Perhaps it's good that we
# "garbage collect" and forget what we've sent before for a
# given `type` when the client stops caring about a certain
# `type`.
{},
# We don't need to request anything more if they are requesting
# less state now
StateFilter.none(),
),
expected_without_state_deltas=(
# `EventTypes.Member` is no longer requested but since that
# state hasn't changed, nothing should change (we should still
# keep track that we've sent `EventTypes.Member` before).
None,
# We don't need to request anything more if they are requesting
# less state now
StateFilter.none(),
),
),
),
(
"state_key_retract_lazy_keep_previous_memberships_with_new_memberships",
"""
Test retracting the `required_state` to no longer lazy-loading room members.
""",
RequiredStateChangesTestParameters(
previous_required_state_map={
EventTypes.Member: {
StateValues.LAZY,
"@user2:test",
"@user3:test",
}
},
request_required_state_map={EventTypes.Member: {"@user4:test"}},
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
expected_with_state_deltas=(
# Since "@user4:test" was added, we should persist the changed
# required state config.
#
# Also remove "@user2:test" since that state has changed and is no
# longer being requested anymore. Since something was removed,
# we also should persist the changed to required state. That way next
# time, they request "@user2:test", we see that we haven't sent
# it before and send the new state. (we should still keep track
# that we've sent specific `EventTypes.Member` before)
{
EventTypes.Member: {
"@user3:test",
"@user4:test",
}
},
# We should see the new state_keys added
StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
),
expected_without_state_deltas=(
# Since "@user4:test" was added, we should persist the changed
# required state config.
{
EventTypes.Member: {
"@user2:test",
"@user3:test",
"@user4:test",
}
},
# We should see the new state_keys added
StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
),
),
),
(
"type_wildcard_with_state_key_wildcard_to_explicit_state_keys",
"""
@@ -4094,7 +3824,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
),
),
(
"explicit_state_keys_to_wildcard_state_key",
"state_key_wildcard_to_explicit_state_keys",
"""Test switching from a wildcard to explicit state keys with a concrete type""",
RequiredStateChangesTestParameters(
previous_required_state_map={
@@ -4107,18 +3837,11 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# request. And we need to request all of the state for that type
# because we previously, only sent down a few keys.
expected_with_state_deltas=(
{"type1": {StateValues.WILDCARD, "state_key2", "state_key3"}},
{"type1": {StateValues.WILDCARD}},
StateFilter.from_types([("type1", None)]),
),
expected_without_state_deltas=(
{
"type1": {
StateValues.WILDCARD,
"state_key1",
"state_key2",
"state_key3",
}
},
{"type1": {StateValues.WILDCARD}},
StateFilter.from_types([("type1", None)]),
),
),
@@ -4134,8 +3857,14 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# Without `state_deltas`
changed_required_state_map, added_state_filter = _required_state_changes(
user_id="@user:test",
prev_required_state_map=test_parameters.previous_required_state_map,
request_required_state_map=test_parameters.request_required_state_map,
previous_room_config=RoomSyncConfig(
timeline_limit=0,
required_state_map=test_parameters.previous_required_state_map,
),
room_sync_config=RoomSyncConfig(
timeline_limit=0,
required_state_map=test_parameters.request_required_state_map,
),
state_deltas={},
)
@@ -4153,8 +3882,14 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# With `state_deltas`
changed_required_state_map, added_state_filter = _required_state_changes(
user_id="@user:test",
prev_required_state_map=test_parameters.previous_required_state_map,
request_required_state_map=test_parameters.request_required_state_map,
previous_room_config=RoomSyncConfig(
timeline_limit=0,
required_state_map=test_parameters.previous_required_state_map,
),
room_sync_config=RoomSyncConfig(
timeline_limit=0,
required_state_map=test_parameters.request_required_state_map,
),
state_deltas=test_parameters.state_deltas,
)
@@ -4168,121 +3903,3 @@ class RequiredStateChangesTestCase(unittest.TestCase):
test_parameters.expected_with_state_deltas[1],
"added_state_filter does not match (with state_deltas)",
)
@parameterized.expand(
[
# Test with a normal arbitrary type (no special meaning)
("arbitrary_type", "type", set()),
# Test with membership
("membership", EventTypes.Member, set()),
# Test with lazy-loading room members
("lazy_loading_membership", EventTypes.Member, {StateValues.LAZY}),
]
)
def test_limit_retained_previous_state_keys(
self,
_test_label: str,
event_type: str,
extra_state_keys: Set[str],
) -> None:
"""
Test that we limit the number of state_keys that we remember but always include
the state_keys that we've just requested.
"""
previous_required_state_map = {
event_type: {
# Prefix the state_keys we've "prev_"iously sent so they are easier to
# identify in our assertions.
f"prev_state_key{i}"
for i in range(MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER - 30)
}
| extra_state_keys
}
request_required_state_map = {
event_type: {f"state_key{i}" for i in range(50)} | extra_state_keys
}
# (function under test)
changed_required_state_map, added_state_filter = _required_state_changes(
user_id="@user:test",
prev_required_state_map=previous_required_state_map,
request_required_state_map=request_required_state_map,
state_deltas={},
)
assert changed_required_state_map is not None
# We should only remember up to the maximum number of state keys
self.assertGreaterEqual(
len(changed_required_state_map[event_type]),
# Most of the time this will be `MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER` but
# because we are just naively selecting enough previous state_keys to fill
# the limit, there might be some overlap in what's added back which means we
# might have slightly less than the limit.
#
# `extra_state_keys` overlaps in the previous and requested
# `required_state_map` so we might see this this scenario.
MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER - len(extra_state_keys),
)
# Should include all of the requested state
self.assertIncludes(
changed_required_state_map[event_type],
request_required_state_map[event_type],
)
# And the rest is filled with the previous state keys
#
# We can't assert the exact state_keys since we don't know the order so we just
# check that they all start with "prev_" and that we have the correct amount.
remaining_state_keys = (
changed_required_state_map[event_type]
- request_required_state_map[event_type]
)
self.assertGreater(
len(remaining_state_keys),
0,
)
assert all(
state_key.startswith("prev_") for state_key in remaining_state_keys
), "Remaining state_keys should be the previous state_keys"
def test_request_more_state_keys_than_remember_limit(self) -> None:
"""
Test requesting more state_keys than fit in our limit to remember from previous
requests.
"""
previous_required_state_map = {
"type": {
# Prefix the state_keys we've "prev_"iously sent so they are easier to
# identify in our assertions.
f"prev_state_key{i}"
for i in range(MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER - 30)
}
}
request_required_state_map = {
"type": {
f"state_key{i}"
# Requesting more than the MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER
for i in range(MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER + 20)
}
}
# Ensure that we are requesting more than the limit
self.assertGreater(
len(request_required_state_map["type"]),
MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER,
)
# (function under test)
changed_required_state_map, added_state_filter = _required_state_changes(
user_id="@user:test",
prev_required_state_map=previous_required_state_map,
request_required_state_map=request_required_state_map,
state_deltas={},
)
assert changed_required_state_map is not None
# Should include all of the requested state
self.assertIncludes(
changed_required_state_map["type"],
request_required_state_map["type"],
exact=True,
)

View File

@@ -20,7 +20,7 @@
from typing import Collection, ContextManager, List, Optional
from unittest.mock import AsyncMock, Mock, patch
from parameterized import parameterized, parameterized_class
from parameterized import parameterized
from twisted.internet import defer
from twisted.test.proto_helpers import MemoryReactor
@@ -32,13 +32,7 @@ from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.federation.federation_base import event_from_pdu_json
from synapse.handlers.sync import (
SyncConfig,
SyncRequestKey,
SyncResult,
SyncVersion,
TimelineBatch,
)
from synapse.handlers.sync import SyncConfig, SyncRequestKey, SyncResult, SyncVersion
from synapse.rest import admin
from synapse.rest.client import knock, login, room
from synapse.server import HomeServer
@@ -64,21 +58,9 @@ def generate_request_key() -> SyncRequestKey:
return ("request_key", _request_key)
@parameterized_class(
("use_state_after",),
[
(True,),
(False,),
],
class_name_func=lambda cls,
num,
params_dict: f"{cls.__name__}_{'state_after' if params_dict['use_state_after'] else 'state'}",
)
class SyncTestCase(tests.unittest.HomeserverTestCase):
"""Tests Sync Handler."""
use_state_after: bool
servlets = [
admin.register_servlets,
knock.register_servlets,
@@ -97,9 +79,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
def test_wait_for_sync_for_user_auth_blocking(self) -> None:
user_id1 = "@user1:test"
user_id2 = "@user2:test"
sync_config = generate_sync_config(
user_id1, use_state_after=self.use_state_after
)
sync_config = generate_sync_config(user_id1)
requester = create_requester(user_id1)
self.reactor.advance(100) # So we get not 0 time
@@ -132,9 +112,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.auth_blocking._hs_disabled = False
sync_config = generate_sync_config(
user_id2, use_state_after=self.use_state_after
)
sync_config = generate_sync_config(user_id2)
requester = create_requester(user_id2)
e = self.get_failure(
@@ -163,9 +141,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
initial_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(
user, device_id="dev", use_state_after=self.use_state_after
),
sync_config=generate_sync_config(user, device_id="dev"),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@@ -199,9 +175,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(
user, use_state_after=self.use_state_after
),
sync_config=generate_sync_config(user),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@@ -214,9 +188,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(
user, device_id="dev", use_state_after=self.use_state_after
),
sync_config=generate_sync_config(user, device_id="dev"),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
since_token=initial_result.next_batch,
@@ -248,9 +220,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(
user, use_state_after=self.use_state_after
),
sync_config=generate_sync_config(user),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@@ -263,9 +233,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(
user, device_id="dev", use_state_after=self.use_state_after
),
sync_config=generate_sync_config(user, device_id="dev"),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
since_token=initial_result.next_batch,
@@ -308,7 +276,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
alice_sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
create_requester(owner),
generate_sync_config(owner, use_state_after=self.use_state_after),
generate_sync_config(owner),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@@ -328,9 +296,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# Eve syncs.
eve_requester = create_requester(eve)
eve_sync_config = generate_sync_config(
eve, use_state_after=self.use_state_after
)
eve_sync_config = generate_sync_config(eve)
eve_sync_after_ban: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
eve_requester,
@@ -401,7 +367,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
alice_requester,
generate_sync_config(alice, use_state_after=self.use_state_after),
generate_sync_config(alice),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@@ -430,7 +396,6 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
filter_collection=FilterCollection(
self.hs, {"room": {"timeline": {"limit": 2}}}
),
use_state_after=self.use_state_after,
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
@@ -477,7 +442,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
alice_requester,
generate_sync_config(alice, use_state_after=self.use_state_after),
generate_sync_config(alice),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@@ -516,7 +481,6 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
}
},
),
use_state_after=self.use_state_after,
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
@@ -554,8 +518,6 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
... and a filter that means we only return 1 event, represented by the dashed
horizontal lines: `S2` must be included in the `state` section on the second sync.
When `use_state_after` is enabled, then we expect to see `s2` in the first sync.
"""
alice = self.register_user("alice", "password")
alice_tok = self.login(alice, "password")
@@ -566,7 +528,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
alice_requester,
generate_sync_config(alice, use_state_after=self.use_state_after),
generate_sync_config(alice),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@@ -592,7 +554,6 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
filter_collection=FilterCollection(
self.hs, {"room": {"timeline": {"limit": 1}}}
),
use_state_after=self.use_state_after,
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
@@ -606,18 +567,10 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
[e.event_id for e in room_sync.timeline.events],
[e3_event],
)
if self.use_state_after:
# When using `state_after` we get told about s2 immediately
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[s2_event],
)
else:
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[],
)
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[],
)
# Now send another event that points to S2, but not E3.
with self._patch_get_latest_events([s2_event]):
@@ -632,7 +585,6 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
filter_collection=FilterCollection(
self.hs, {"room": {"timeline": {"limit": 1}}}
),
use_state_after=self.use_state_after,
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
@@ -646,19 +598,10 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
[e.event_id for e in room_sync.timeline.events],
[e4_event],
)
if self.use_state_after:
# When using `state_after` we got told about s2 previously, so we
# don't again.
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[],
)
else:
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[s2_event],
)
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[s2_event],
)
def test_state_includes_changes_on_ungappy_syncs(self) -> None:
"""Test `state` where the sync is not gappy.
@@ -695,8 +638,6 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
This is the last chance for us to tell the client about S2, so it *must* be
included in the response.
When `use_state_after` is enabled, then we expect to see `s2` in the first sync.
"""
alice = self.register_user("alice", "password")
alice_tok = self.login(alice, "password")
@@ -707,7 +648,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
alice_requester,
generate_sync_config(alice, use_state_after=self.use_state_after),
generate_sync_config(alice),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@@ -732,7 +673,6 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
filter_collection=FilterCollection(
self.hs, {"room": {"timeline": {"limit": 1}}}
),
use_state_after=self.use_state_after,
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
@@ -744,11 +684,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
[e.event_id for e in room_sync.timeline.events],
[e3_event],
)
if self.use_state_after:
# When using `state_after` we get told about s2 immediately
self.assertIn(s2_event, [e.event_id for e in room_sync.state.values()])
else:
self.assertNotIn(s2_event, [e.event_id for e in room_sync.state.values()])
self.assertNotIn(s2_event, [e.event_id for e in room_sync.state.values()])
# More events, E4 and E5
with self._patch_get_latest_events([e3_event]):
@@ -759,7 +695,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
incremental_sync = self.get_success(
self.sync_handler.wait_for_sync_for_user(
alice_requester,
generate_sync_config(alice, use_state_after=self.use_state_after),
generate_sync_config(alice),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
since_token=initial_sync_result.next_batch,
@@ -774,19 +710,10 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
[e.event_id for e in room_sync.timeline.events],
[e4_event, e5_event],
)
if self.use_state_after:
# When using `state_after` we got told about s2 previously, so we
# don't again.
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[],
)
else:
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[s2_event],
)
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[s2_event],
)
@parameterized.expand(
[
@@ -794,8 +721,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
(True, False),
(False, True),
(True, True),
],
name_func=lambda func, num, p: f"{func.__name__}_{p.args[0]}_{p.args[1]}",
]
)
def test_archived_rooms_do_not_include_state_after_leave(
self, initial_sync: bool, empty_timeline: bool
@@ -823,7 +749,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
bob_requester,
generate_sync_config(bob, use_state_after=self.use_state_after),
generate_sync_config(bob),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@@ -854,9 +780,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.sync_handler.wait_for_sync_for_user(
bob_requester,
generate_sync_config(
bob,
filter_collection=FilterCollection(self.hs, filter_dict),
use_state_after=self.use_state_after,
bob, filter_collection=FilterCollection(self.hs, filter_dict)
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
@@ -867,15 +791,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
if empty_timeline:
# The timeline should be empty
self.assertEqual(sync_room_result.timeline.events, [])
else:
# The last three events in the timeline should be those leading up to the
# leave
self.assertEqual(
[e.event_id for e in sync_room_result.timeline.events[-3:]],
[before_message_event, before_state_event, leave_event],
)
if empty_timeline or self.use_state_after:
# And the state should include the leave event...
self.assertEqual(
sync_room_result.state[("m.room.member", bob)].event_id, leave_event
@@ -885,6 +801,12 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
sync_room_result.state[("test_state", "")].event_id, before_state_event
)
else:
# The last three events in the timeline should be those leading up to the
# leave
self.assertEqual(
[e.event_id for e in sync_room_result.timeline.events[-3:]],
[before_message_event, before_state_event, leave_event],
)
# ... And the state should be empty
self.assertEqual(sync_room_result.state, {})
@@ -957,7 +879,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
create_requester(user),
generate_sync_config(user, use_state_after=self.use_state_after),
generate_sync_config(user),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@@ -1006,7 +928,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
private_sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
create_requester(user2),
generate_sync_config(user2, use_state_after=self.use_state_after),
generate_sync_config(user2),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@@ -1032,7 +954,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
create_requester(user),
generate_sync_config(user, use_state_after=self.use_state_after),
generate_sync_config(user),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@@ -1069,7 +991,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
sync_d = defer.ensureDeferred(
self.sync_handler.wait_for_sync_for_user(
create_requester(user),
generate_sync_config(user, use_state_after=self.use_state_after),
generate_sync_config(user),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
since_token=since_token,
@@ -1124,7 +1046,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
sync_d = defer.ensureDeferred(
self.sync_handler.wait_for_sync_for_user(
create_requester(user),
generate_sync_config(user, use_state_after=self.use_state_after),
generate_sync_config(user),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
since_token=since_token,
@@ -1140,7 +1062,6 @@ def generate_sync_config(
user_id: str,
device_id: Optional[str] = "device_id",
filter_collection: Optional[FilterCollection] = None,
use_state_after: bool = False,
) -> SyncConfig:
"""Generate a sync config (with a unique request key).
@@ -1148,8 +1069,7 @@ def generate_sync_config(
user_id: user who is syncing.
device_id: device that is syncing. Defaults to "device_id".
filter_collection: filter to apply. Defaults to the default filter (ie,
return everything, with a default limit)
use_state_after: whether the `use_state_after` flag was set.
return everything, with a default limit)
"""
if filter_collection is None:
filter_collection = Filtering(Mock()).DEFAULT_FILTER_COLLECTION
@@ -1159,138 +1079,4 @@ def generate_sync_config(
filter_collection=filter_collection,
is_guest=False,
device_id=device_id,
use_state_after=use_state_after,
)
class SyncStateAfterTestCase(tests.unittest.HomeserverTestCase):
"""Tests Sync Handler state behavior when using `use_state_after."""
servlets = [
admin.register_servlets,
knock.register_servlets,
login.register_servlets,
room.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.sync_handler = self.hs.get_sync_handler()
self.store = self.hs.get_datastores().main
# AuthBlocking reads from the hs' config on initialization. We need to
# modify its config instead of the hs'
self.auth_blocking = self.hs.get_auth_blocking()
def test_initial_sync_multiple_deltas(self) -> None:
"""Test that if multiple state deltas have happened during processing of
a full state sync we return the correct state"""
user = self.register_user("user", "password")
tok = self.login("user", "password")
# Create a room as the user and set some custom state.
joined_room = self.helper.create_room_as(user, tok=tok)
first_state = self.helper.send_state(
joined_room, event_type="m.test_event", body={"num": 1}, tok=tok
)
# Take a snapshot of the stream token, to simulate doing an initial sync
# at this point.
end_stream_token = self.hs.get_event_sources().get_current_token()
# Send some state *after* the stream token
self.helper.send_state(
joined_room, event_type="m.test_event", body={"num": 2}, tok=tok
)
# Calculating the full state will return the first state, and not the
# second.
state = self.get_success(
self.sync_handler._compute_state_delta_for_full_sync(
room_id=joined_room,
sync_config=generate_sync_config(user, use_state_after=True),
batch=TimelineBatch(
prev_batch=end_stream_token, events=[], limited=True
),
end_token=end_stream_token,
members_to_fetch=None,
timeline_state={},
joined=True,
)
)
self.assertEqual(state[("m.test_event", "")], first_state["event_id"])
def test_incremental_sync_multiple_deltas(self) -> None:
"""Test that if multiple state deltas have happened since an incremental
state sync we return the correct state"""
user = self.register_user("user", "password")
tok = self.login("user", "password")
# Create a room as the user and set some custom state.
joined_room = self.helper.create_room_as(user, tok=tok)
# Take a snapshot of the stream token, to simulate doing an incremental sync
# from this point.
since_token = self.hs.get_event_sources().get_current_token()
self.helper.send_state(
joined_room, event_type="m.test_event", body={"num": 1}, tok=tok
)
# Send some state *after* the stream token
second_state = self.helper.send_state(
joined_room, event_type="m.test_event", body={"num": 2}, tok=tok
)
end_stream_token = self.hs.get_event_sources().get_current_token()
# Calculating the incrementals state will return the second state, and not the
# first.
state = self.get_success(
self.sync_handler._compute_state_delta_for_incremental_sync(
room_id=joined_room,
sync_config=generate_sync_config(user, use_state_after=True),
batch=TimelineBatch(
prev_batch=end_stream_token, events=[], limited=True
),
since_token=since_token,
end_token=end_stream_token,
members_to_fetch=None,
timeline_state={},
)
)
self.assertEqual(state[("m.test_event", "")], second_state["event_id"])
def test_incremental_sync_lazy_loaded_no_timeline(self) -> None:
"""Test that lazy-loading with an empty timeline doesn't return the full
state.
There was a bug where an empty state filter would cause the DB to return
the full state, rather than an empty set.
"""
user = self.register_user("user", "password")
tok = self.login("user", "password")
# Create a room as the user and set some custom state.
joined_room = self.helper.create_room_as(user, tok=tok)
since_token = self.hs.get_event_sources().get_current_token()
end_stream_token = self.hs.get_event_sources().get_current_token()
state = self.get_success(
self.sync_handler._compute_state_delta_for_incremental_sync(
room_id=joined_room,
sync_config=generate_sync_config(user, use_state_after=True),
batch=TimelineBatch(
prev_batch=end_stream_token, events=[], limited=True
),
since_token=since_token,
end_token=end_stream_token,
members_to_fetch=set(),
timeline_state={},
)
)
self.assertEqual(state, {})

View File

@@ -27,7 +27,6 @@ from typing import (
Callable,
ContextManager,
Dict,
Generator,
List,
Optional,
Set,
@@ -50,10 +49,7 @@ from synapse.http.server import (
respond_with_json,
)
from synapse.http.site import SynapseRequest
from synapse.logging.context import (
LoggingContext,
make_deferred_yieldable,
)
from synapse.logging.context import LoggingContext, make_deferred_yieldable
from synapse.types import JsonDict
from tests.server import FakeChannel, make_request
@@ -203,7 +199,7 @@ def make_request_with_cancellation_test(
#
# We would like to trigger a cancellation at the first `await`, re-run the
# request and cancel at the second `await`, and so on. By patching
# `Deferred.__await__`, we can intercept `await`s, track which ones we have or
# `Deferred.__next__`, we can intercept `await`s, track which ones we have or
# have not seen, and force them to block when they wouldn't have.
# The set of previously seen `await`s.
@@ -215,7 +211,7 @@ def make_request_with_cancellation_test(
)
for request_number in itertools.count(1):
deferred_patch = Deferred__await__Patch(seen_awaits, request_number)
deferred_patch = Deferred__next__Patch(seen_awaits, request_number)
try:
with mock.patch(
@@ -254,8 +250,6 @@ def make_request_with_cancellation_test(
)
if respond_mock.called:
_log_for_request(request_number, "--- response finished ---")
# The request ran to completion and we are done with testing it.
# `respond_with_json` writes the response asynchronously, so we
@@ -317,8 +311,8 @@ def make_request_with_cancellation_test(
assert False, "unreachable" # noqa: B011
class Deferred__await__Patch:
"""A `Deferred.__await__` patch that will intercept `await`s and force them
class Deferred__next__Patch:
"""A `Deferred.__next__` patch that will intercept `await`s and force them
to block once it sees a new `await`.
When done with the patch, `unblock_awaits()` must be called to clean up after any
@@ -328,7 +322,7 @@ class Deferred__await__Patch:
Usage:
seen_awaits = set()
deferred_patch = Deferred__await__Patch(seen_awaits, 1)
deferred_patch = Deferred__next__Patch(seen_awaits, 1)
try:
with deferred_patch.patch():
# do things
@@ -341,14 +335,14 @@ class Deferred__await__Patch:
"""
Args:
seen_awaits: The set of stack traces of `await`s that have been previously
seen. When the `Deferred.__await__` patch sees a new `await`, it will add
seen. When the `Deferred.__next__` patch sees a new `await`, it will add
it to the set.
request_number: The request number to log against.
"""
self._request_number = request_number
self._seen_awaits = seen_awaits
self._original_Deferred__await__ = Deferred.__await__ # type: ignore[misc,unused-ignore]
self._original_Deferred___next__ = Deferred.__next__ # type: ignore[misc,unused-ignore]
# The number of `await`s on `Deferred`s we have seen so far.
self.awaits_seen = 0
@@ -356,13 +350,8 @@ class Deferred__await__Patch:
# Whether we have seen a new `await` not in `seen_awaits`.
self.new_await_seen = False
# Whether to block new await points we see. This gets set to False once
# we have cancelled the request to allow things to run after
# cancellation.
self._block_new_awaits = True
# To force `await`s on resolved `Deferred`s to block, we make up a new
# unresolved `Deferred` and return it out of `Deferred.__await__` /
# unresolved `Deferred` and return it out of `Deferred.__next__` /
# `coroutine.send()`. We have to resolve it later, in case the `await`ing
# coroutine is part of some shared processing, such as `@cached`.
self._to_unblock: Dict[Deferred, Union[object, Failure]] = {}
@@ -371,15 +360,15 @@ class Deferred__await__Patch:
self._previous_stack: List[inspect.FrameInfo] = []
def patch(self) -> ContextManager[Mock]:
"""Returns a context manager which patches `Deferred.__await__`."""
"""Returns a context manager which patches `Deferred.__next__`."""
def Deferred___await__(
deferred: "Deferred[T]",
) -> Generator["Deferred[T]", None, T]:
"""Intercepts calls to `__await__`, which returns a generator
yielding deferreds that we await on.
def Deferred___next__(
deferred: "Deferred[T]", value: object = None
) -> "Deferred[T]":
"""Intercepts `await`s on `Deferred`s and rigs them to block once we have
seen enough of them.
The generator for `__await__` will normally:
`Deferred.__next__` will normally:
* return `self` if the `Deferred` is unresolved, in which case
`coroutine.send()` will return the `Deferred`, and
`_defer.inlineCallbacks` will stop running the coroutine until the
@@ -387,43 +376,9 @@ class Deferred__await__Patch:
* raise a `StopIteration(result)`, containing the result of the `await`.
* raise another exception, which will come out of the `await`.
"""
# Get the original generator.
gen = self._original_Deferred__await__(deferred)
# Run the generator, handling each iteration to see if we need to
# block.
try:
while True:
# We've hit a new await point (or the deferred has
# completed), handle it.
handle_next_iteration(deferred)
# Continue on.
yield gen.send(None)
except StopIteration as e:
# We need to convert `StopIteration` into a normal return.
return e.value
def handle_next_iteration(
deferred: "Deferred[T]",
) -> None:
"""Intercepts `await`s on `Deferred`s and rigs them to block once we have
seen enough of them.
Args:
deferred: The deferred that we've captured and are intercepting
`await` calls within.
"""
if not self._block_new_awaits:
# We're no longer blocking awaits points
return
self.awaits_seen += 1
stack = _get_stack(
skip_frames=2 # Ignore this function and `Deferred___await__` in stack trace
)
stack = _get_stack(skip_frames=1)
stack_hash = _hash_stack(stack)
if stack_hash not in self._seen_awaits:
@@ -434,29 +389,20 @@ class Deferred__await__Patch:
if not self.new_await_seen:
# This `await` isn't interesting. Let it proceed normally.
_log_await_stack(
stack,
self._previous_stack,
self._request_number,
"already seen",
)
# Don't log the stack. It's been seen before in a previous run.
self._previous_stack = stack
return
return self._original_Deferred___next__(deferred, value)
# We want to block at the current `await`.
if deferred.called and not deferred.paused:
# This `Deferred` already has a result. We chain a new,
# unresolved, `Deferred` to the end of this Deferred that it
# will wait on. This blocks the coroutine that did this `await`.
# This `Deferred` already has a result.
# We return a new, unresolved, `Deferred` for `_inlineCallbacks` to wait
# on. This blocks the coroutine that did this `await`.
# We queue it up for unblocking later.
new_deferred: "Deferred[T]" = Deferred()
self._to_unblock[new_deferred] = deferred.result
deferred.addBoth(lambda _: make_deferred_yieldable(new_deferred))
_log_await_stack(
stack,
self._previous_stack,
@@ -465,9 +411,7 @@ class Deferred__await__Patch:
)
self._previous_stack = stack
# Continue iterating on the deferred now that we've blocked it
# again.
return
return make_deferred_yieldable(new_deferred)
# This `Deferred` does not have a result yet.
# The `await` will block normally, so we don't have to do anything.
@@ -479,9 +423,9 @@ class Deferred__await__Patch:
)
self._previous_stack = stack
return
return self._original_Deferred___next__(deferred, value)
return mock.patch.object(Deferred, "__await__", new=Deferred___await__)
return mock.patch.object(Deferred, "__next__", new=Deferred___next__)
def unblock_awaits(self) -> None:
"""Unblocks any shared processing that we forced to block.
@@ -489,9 +433,6 @@ class Deferred__await__Patch:
Must be called when done, otherwise processing shared between multiple requests,
such as database queries started by `@cached`, will become permanently stuck.
"""
# Also disable blocking at future await points
self._block_new_awaits = False
to_unblock = self._to_unblock
self._to_unblock = {}
for deferred, result in to_unblock.items():

View File

@@ -903,10 +903,9 @@ class FederationClientProxyTests(BaseMultiWorkerStreamTestCase):
headers=Headers(
{
"Content-Type": ["application/json"],
"X-Test": ["test"],
# Define some hop-by-hop headers (try with varying casing to
# make sure we still match-up the headers)
"Connection": ["close, X-fOo, X-Bar, X-baz"],
"Connection": ["close, X-fOo, X-Bar", "X-baz"],
# Should be removed because it's defined in the `Connection` header
"X-Foo": ["foo"],
"X-Bar": ["bar"],
@@ -945,17 +944,9 @@ class FederationClientProxyTests(BaseMultiWorkerStreamTestCase):
header_names = set(headers.keys())
# Make sure the response does not include the hop-by-hop headers
self.assertIncludes(
header_names,
{
b"Content-Type",
b"X-Test",
# Default headers from Twisted
b"Date",
b"Server",
},
exact=True,
)
self.assertNotIn(b"X-Foo", header_names)
self.assertNotIn(b"X-Bar", header_names)
self.assertNotIn(b"Proxy-Authorization", header_names)
# Make sure the response is as expected back on the main worker
self.assertEqual(res, {"foo": "bar"})

View File

@@ -22,21 +22,11 @@ from typing import Set
from parameterized import parameterized
from synapse.http.proxy import (
HOP_BY_HOP_HEADERS_LOWERCASE,
parse_connection_header_value,
)
from synapse.http.proxy import parse_connection_header_value
from tests.unittest import TestCase
def mix_case(s: str) -> str:
"""
Mix up the case of each character in the string (upper or lower case)
"""
return "".join(c.upper() if i % 2 == 0 else c.lower() for i, c in enumerate(s))
class ProxyTests(TestCase):
@parameterized.expand(
[
@@ -54,11 +44,6 @@ class ProxyTests(TestCase):
b"keep-alive, x foo, x bar",
{"keep-alive", "x foo", "x bar"},
],
# Make sure we handle all of the hop-by-hop headers
[
mix_case(", ".join(HOP_BY_HOP_HEADERS_LOWERCASE)).encode("ascii"),
HOP_BY_HOP_HEADERS_LOWERCASE,
],
]
)
def test_parse_connection_header_value(
@@ -69,8 +54,7 @@ class ProxyTests(TestCase):
"""
Tests that the connection header value is parsed correctly
"""
self.assertIncludes(
self.assertEqual(
expected_extra_headers_to_remove,
parse_connection_header_value(connection_header_value),
exact=True,
)

View File

@@ -120,11 +120,9 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
#
# We have seen stringy and null values for "room" in the wild, so presumably
# some of this validation was missing in the past.
with (
patch("synapse.events.validator.validate_canonicaljson"),
patch("synapse.events.validator.jsonschema.validate"),
patch("synapse.handlers.event_auth.check_state_dependent_auth_rules"),
):
with patch("synapse.events.validator.validate_canonicaljson"), patch(
"synapse.events.validator.jsonschema.validate"
), patch("synapse.handlers.event_auth.check_state_dependent_auth_rules"):
pl_event_id = self.helper.send_state(
self.room_id,
"m.room.power_levels",

View File

@@ -381,10 +381,10 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_lazy_loading_room_members_initial_sync(self) -> None:
def test_rooms_required_state_lazy_loading_room_members(self) -> None:
"""
On initial sync, test `rooms.required_state` returns people relevant to the
timeline when lazy-loading room members, `["m.room.member","$LAZY"]`.
Test `rooms.required_state` returns people relevant to the timeline when
lazy-loading room members, `["m.room.member","$LAZY"]`.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
@@ -432,255 +432,6 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_lazy_loading_room_members_incremental_sync(
self,
) -> None:
"""
On incremental sync, test `rooms.required_state` returns people relevant to the
timeline when lazy-loading room members, `["m.room.member","$LAZY"]`.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.join(room_id1, user3_id, tok=user3_tok)
self.helper.join(room_id1, user4_id, tok=user4_tok)
self.helper.send(room_id1, "1", tok=user2_tok)
self.helper.send(room_id1, "2", tok=user2_tok)
self.helper.send(room_id1, "3", tok=user2_tok)
# Make the Sliding Sync request with lazy loading for the room members
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
[EventTypes.Member, StateValues.LAZY],
],
"timeline_limit": 3,
}
}
}
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
# Send more timeline events into the room
self.helper.send(room_id1, "4", tok=user2_tok)
self.helper.send(room_id1, "5", tok=user4_tok)
self.helper.send(room_id1, "6", tok=user4_tok)
# Make an incremental Sliding Sync request
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# Only user2 and user4 sent events in the last 3 events we see in the `timeline`
# but since we've seen user2 in the last sync (and their membership hasn't
# changed), we should only see user4 here.
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Member, user4_id)],
},
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_expand_lazy_loading_room_members_incremental_sync(
self,
) -> None:
"""
Test that when we expand the `required_state` to include lazy-loading room
members, it returns people relevant to the timeline.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.join(room_id1, user3_id, tok=user3_tok)
self.helper.join(room_id1, user4_id, tok=user4_tok)
self.helper.send(room_id1, "1", tok=user2_tok)
self.helper.send(room_id1, "2", tok=user2_tok)
self.helper.send(room_id1, "3", tok=user2_tok)
# Make the Sliding Sync request *without* lazy loading for the room members
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
],
"timeline_limit": 3,
}
}
}
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
# Send more timeline events into the room
self.helper.send(room_id1, "4", tok=user2_tok)
self.helper.send(room_id1, "5", tok=user4_tok)
self.helper.send(room_id1, "6", tok=user4_tok)
# Expand `required_state` and make an incremental Sliding Sync request *with*
# lazy-loading room members
sync_body["lists"]["foo-list"]["required_state"] = [
[EventTypes.Create, ""],
[EventTypes.Member, StateValues.LAZY],
]
response_body, from_token = self.do_sync(
sync_body, since=from_token, tok=user1_tok
)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# Only user2 and user4 sent events in the last 3 events we see in the `timeline`
# and we haven't seen any membership before this sync so we should see both
# users.
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Member, user2_id)],
state_map[(EventTypes.Member, user4_id)],
},
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
# Send a message so the room comes down sync.
self.helper.send(room_id1, "7", tok=user2_tok)
self.helper.send(room_id1, "8", tok=user4_tok)
self.helper.send(room_id1, "9", tok=user4_tok)
# Make another incremental Sliding Sync request
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# Only user2 and user4 sent events in the last 3 events we see in the `timeline`
# but since we've seen both memberships in the last sync, they shouldn't appear
# again.
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1].get("required_state", []),
set(),
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_expand_retract_expand_lazy_loading_room_members_incremental_sync(
self,
) -> None:
"""
Test that when we expand the `required_state` to include lazy-loading room
members, it returns people relevant to the timeline.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.join(room_id1, user3_id, tok=user3_tok)
self.helper.join(room_id1, user4_id, tok=user4_tok)
self.helper.send(room_id1, "1", tok=user2_tok)
self.helper.send(room_id1, "2", tok=user2_tok)
self.helper.send(room_id1, "3", tok=user2_tok)
# Make the Sliding Sync request *without* lazy loading for the room members
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
],
"timeline_limit": 3,
}
}
}
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
# Send more timeline events into the room
self.helper.send(room_id1, "4", tok=user2_tok)
self.helper.send(room_id1, "5", tok=user4_tok)
self.helper.send(room_id1, "6", tok=user4_tok)
# Expand `required_state` and make an incremental Sliding Sync request *with*
# lazy-loading room members
sync_body["lists"]["foo-list"]["required_state"] = [
[EventTypes.Create, ""],
[EventTypes.Member, StateValues.LAZY],
]
response_body, from_token = self.do_sync(
sync_body, since=from_token, tok=user1_tok
)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# Only user2 and user4 sent events in the last 3 events we see in the `timeline`
# and we haven't seen any membership before this sync so we should see both
# users because we're lazy-loading the room members.
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Member, user2_id)],
state_map[(EventTypes.Member, user4_id)],
},
exact=True,
)
# Send a message so the room comes down sync.
self.helper.send(room_id1, "msg", tok=user4_tok)
# Retract `required_state` and make an incremental Sliding Sync request
# requesting a few memberships
sync_body["lists"]["foo-list"]["required_state"] = [
[EventTypes.Create, ""],
[EventTypes.Member, StateValues.ME],
[EventTypes.Member, user2_id],
]
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# We've seen user2's membership in the last sync so we shouldn't see it here
# even though it's requested. We should only see user1's membership.
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Member, user1_id)],
},
exact=True,
)
def test_rooms_required_state_me(self) -> None:
"""
Test `rooms.required_state` correctly handles $ME.
@@ -810,7 +561,7 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
)
self.helper.leave(room_id1, user3_id, tok=user3_tok)
# Make an incremental Sliding Sync request
# Make the Sliding Sync request with lazy loading for the room members
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# Only user2 and user3 sent events in the 3 events we see in the `timeline`

View File

@@ -58,7 +58,6 @@ import twisted
from twisted.enterprise import adbapi
from twisted.internet import address, tcp, threads, udp
from twisted.internet._resolver import SimpleResolverComplexifier
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet.defer import Deferred, fail, maybeDeferred, succeed
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import (
@@ -74,7 +73,6 @@ from twisted.internet.interfaces import (
IReactorPluggableNameResolver,
IReactorTime,
IResolverSimple,
ITCPTransport,
ITransport,
)
from twisted.internet.protocol import ClientFactory, DatagramProtocol, Factory
@@ -782,7 +780,7 @@ def get_clock() -> Tuple[ThreadedMemoryReactorClock, Clock]:
return clock, hs_clock
@implementer(ITCPTransport)
@implementer(ITransport)
@attr.s(cmp=False, auto_attribs=True)
class FakeTransport:
"""
@@ -811,12 +809,12 @@ class FakeTransport:
will get called back for connectionLost() notifications etc.
"""
_peer_address: Union[IPv4Address, IPv6Address] = attr.Factory(
_peer_address: IAddress = attr.Factory(
lambda: address.IPv4Address("TCP", "127.0.0.1", 5678)
)
"""The value to be returned by getPeer"""
_host_address: Union[IPv4Address, IPv6Address] = attr.Factory(
_host_address: IAddress = attr.Factory(
lambda: address.IPv4Address("TCP", "127.0.0.1", 1234)
)
"""The value to be returned by getHost"""
@@ -828,10 +826,10 @@ class FakeTransport:
producer: Optional[IPushProducer] = None
autoflush: bool = True
def getPeer(self) -> Union[IPv4Address, IPv6Address]:
def getPeer(self) -> IAddress:
return self._peer_address
def getHost(self) -> Union[IPv4Address, IPv6Address]:
def getHost(self) -> IAddress:
return self._host_address
def loseConnection(self) -> None:
@@ -941,51 +939,6 @@ class FakeTransport:
logger.info("FakeTransport: Buffer now empty, completing disconnect")
self.disconnected = True
## ITCPTransport methods. ##
def loseWriteConnection(self) -> None:
"""
Half-close the write side of a TCP connection.
If the protocol instance this is attached to provides
IHalfCloseableProtocol, it will get notified when the operation is
done. When closing write connection, as with loseConnection this will
only happen when buffer has emptied and there is no registered
producer.
"""
raise NotImplementedError()
def getTcpNoDelay(self) -> bool:
"""
Return if C{TCP_NODELAY} is enabled.
"""
return False
def setTcpNoDelay(self, enabled: bool) -> None:
"""
Enable/disable C{TCP_NODELAY}.
Enabling C{TCP_NODELAY} turns off Nagle's algorithm. Small packets are
sent sooner, possibly at the expense of overall throughput.
"""
# Ignore setting this.
def getTcpKeepAlive(self) -> bool:
"""
Return if C{SO_KEEPALIVE} is enabled.
"""
return False
def setTcpKeepAlive(self, enabled: bool) -> None:
"""
Enable/disable C{SO_KEEPALIVE}.
Enabling C{SO_KEEPALIVE} sends packets periodically when the connection
is otherwise idle, usually once every two hours. They are intended
to allow detection of lost peers in a non-infinite amount of time.
"""
# Ignore setting this.
def connect_client(
reactor: ThreadedMemoryReactorClock, client_id: int

View File

@@ -1465,25 +1465,20 @@ class GetCurrentStateDeltaMembershipChangesForUserFederationTestCase(
)
)
with (
patch.object(
self.room_member_handler.federation_handler.federation_client,
"make_membership_event",
mock_make_membership_event,
),
patch.object(
self.room_member_handler.federation_handler.federation_client,
"send_join",
mock_send_join,
),
patch(
"synapse.event_auth._is_membership_change_allowed",
return_value=None,
),
patch(
"synapse.handlers.federation_event.check_state_dependent_auth_rules",
return_value=None,
),
with patch.object(
self.room_member_handler.federation_handler.federation_client,
"make_membership_event",
mock_make_membership_event,
), patch.object(
self.room_member_handler.federation_handler.federation_client,
"send_join",
mock_send_join,
), patch(
"synapse.event_auth._is_membership_change_allowed",
return_value=None,
), patch(
"synapse.handlers.federation_event.check_state_dependent_auth_rules",
return_value=None,
):
self.get_success(
self.room_member_handler.update_membership(

View File

@@ -320,19 +320,12 @@ class ConcurrentlyExecuteTest(TestCase):
await concurrently_execute(callback, [1], 2)
except _TestException as e:
tb = traceback.extract_tb(e.__traceback__)
# Remove twisted internals from the stack, as we don't care
# about the precise details.
tb = traceback.StackSummary(
t for t in tb if "/twisted/" not in t.filename
)
# we expect to see "caller", "concurrently_execute" at the top of the stack
# we expect to see "caller", "concurrently_execute", "callback",
# and some magic from inside ensureDeferred that happens when .fail
# is called.
self.assertEqual(tb[0].name, "caller")
self.assertEqual(tb[1].name, "concurrently_execute")
# ... some stack frames from the implementation of `concurrently_execute` ...
# and at the bottom of the stack we expect to see "callback"
self.assertEqual(tb[-1].name, "callback")
self.assertEqual(tb[-2].name, "callback")
else:
self.fail("No exception thrown")

View File

@@ -109,13 +109,10 @@ class TestDependencyChecker(TestCase):
def test_checks_ignore_dev_dependencies(self) -> None:
"""Both generic and per-extra checks should ignore dev dependencies."""
with (
patch(
"synapse.util.check_dependencies.metadata.requires",
return_value=["dummypkg >= 1; extra == 'mypy'"],
),
patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}),
):
with patch(
"synapse.util.check_dependencies.metadata.requires",
return_value=["dummypkg >= 1; extra == 'mypy'"],
), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}):
# We're testing that none of these calls raise.
with self.mock_installed_package(None):
check_requirements()
@@ -144,13 +141,10 @@ class TestDependencyChecker(TestCase):
def test_check_for_extra_dependencies(self) -> None:
"""Complain if a package required for an extra is missing or old."""
with (
patch(
"synapse.util.check_dependencies.metadata.requires",
return_value=["dummypkg >= 1; extra == 'cool-extra'"],
),
patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}),
):
with patch(
"synapse.util.check_dependencies.metadata.requires",
return_value=["dummypkg >= 1; extra == 'cool-extra'"],
), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}):
with self.mock_installed_package(None):
self.assertRaises(DependencyException, check_requirements, "cool-extra")
with self.mock_installed_package(old):

View File

@@ -28,55 +28,53 @@ class WheelTimerTestCase(unittest.TestCase):
def test_single_insert_fetch(self) -> None:
wheel: WheelTimer[object] = WheelTimer(bucket_size=5)
wheel.insert(100, "1", 150)
obj = object()
wheel.insert(100, obj, 150)
self.assertListEqual(wheel.fetch(101), [])
self.assertListEqual(wheel.fetch(110), [])
self.assertListEqual(wheel.fetch(120), [])
self.assertListEqual(wheel.fetch(130), [])
self.assertListEqual(wheel.fetch(149), [])
self.assertListEqual(wheel.fetch(156), ["1"])
self.assertListEqual(wheel.fetch(156), [obj])
self.assertListEqual(wheel.fetch(170), [])
def test_multi_insert(self) -> None:
wheel: WheelTimer[object] = WheelTimer(bucket_size=5)
wheel.insert(100, "1", 150)
wheel.insert(105, "2", 130)
wheel.insert(106, "3", 160)
obj1 = object()
obj2 = object()
obj3 = object()
wheel.insert(100, obj1, 150)
wheel.insert(105, obj2, 130)
wheel.insert(106, obj3, 160)
self.assertListEqual(wheel.fetch(110), [])
self.assertListEqual(wheel.fetch(135), ["2"])
self.assertListEqual(wheel.fetch(135), [obj2])
self.assertListEqual(wheel.fetch(149), [])
self.assertListEqual(wheel.fetch(158), ["1"])
self.assertListEqual(wheel.fetch(158), [obj1])
self.assertListEqual(wheel.fetch(160), [])
self.assertListEqual(wheel.fetch(200), ["3"])
self.assertListEqual(wheel.fetch(200), [obj3])
self.assertListEqual(wheel.fetch(210), [])
def test_insert_past(self) -> None:
wheel: WheelTimer[object] = WheelTimer(bucket_size=5)
wheel.insert(100, "1", 50)
self.assertListEqual(wheel.fetch(120), ["1"])
obj = object()
wheel.insert(100, obj, 50)
self.assertListEqual(wheel.fetch(120), [obj])
def test_insert_past_multi(self) -> None:
wheel: WheelTimer[object] = WheelTimer(bucket_size=5)
wheel.insert(100, "1", 150)
wheel.insert(100, "2", 140)
wheel.insert(100, "3", 50)
self.assertListEqual(wheel.fetch(110), ["3"])
obj1 = object()
obj2 = object()
obj3 = object()
wheel.insert(100, obj1, 150)
wheel.insert(100, obj2, 140)
wheel.insert(100, obj3, 50)
self.assertListEqual(wheel.fetch(110), [obj3])
self.assertListEqual(wheel.fetch(120), [])
self.assertListEqual(wheel.fetch(147), ["2"])
self.assertListEqual(wheel.fetch(200), ["1"])
self.assertListEqual(wheel.fetch(147), [obj2])
self.assertListEqual(wheel.fetch(200), [obj1])
self.assertListEqual(wheel.fetch(240), [])
def test_multi_insert_then_past(self) -> None:
wheel: WheelTimer[object] = WheelTimer(bucket_size=5)
wheel.insert(100, "1", 150)
wheel.insert(100, "2", 160)
wheel.insert(100, "3", 155)
self.assertListEqual(wheel.fetch(110), [])
self.assertListEqual(wheel.fetch(158), ["1"])

View File

@@ -1,5 +1,5 @@
[tox]
envlist = py39, py310, py311, py312, py313
envlist = py37, py38, py39, py310
# we require tox>=2.3.2 for the fix to https://github.com/tox-dev/tox/issues/208
minversion = 2.3.2