Compare commits

..

3 Commits

Author SHA1 Message Date
Andrew Morgan
d50c5996b9 changelog 2024-01-26 13:32:13 -07:00
Andrew Morgan
dfb5598c45 temporarily build docs on PRs, but don't deploy 2024-01-26 13:31:23 -07:00
Andrew Morgan
3e63e90e8d Pin Python version in dev-docs CI to 3.11 2024-01-26 13:28:02 -07:00
165 changed files with 1683 additions and 3368 deletions

View File

@@ -30,7 +30,7 @@ jobs:
run: docker buildx inspect
- name: Install Cosign
uses: sigstore/cosign-installer@v3.4.0
uses: sigstore/cosign-installer@v3.3.0
- name: Checkout repository
uses: actions/checkout@v4

View File

@@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
uses: dawidd6/action-download-artifact@09f2f74827fd3a8607589e5ad7f9398816f540fe # v3.1.4
uses: dawidd6/action-download-artifact@e7466d1a7587ed14867642c2ca74b5bcc1e19a2d # v3.0.0
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}

View File

@@ -9,6 +9,7 @@ on:
- 'release-v*'
# stable docs
- master
pull_request:
workflow_dispatch:
@@ -79,12 +80,12 @@ jobs:
cp book/welcome_and_overview.html book/index.html
# Deploy to the target directory.
- name: Deploy to gh pages
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book
destination_dir: ./${{ needs.pre.outputs.branch-version }}
# - name: Deploy to gh pages
# uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
# with:
# github_token: ${{ secrets.GITHUB_TOKEN }}
# publish_dir: ./book
# destination_dir: ./${{ needs.pre.outputs.branch-version }}
################################################################################
pages-devdocs:
@@ -98,7 +99,7 @@ jobs:
- name: "Set up Sphinx"
uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
python-version: "3.11"
poetry-version: "1.3.2"
groups: "dev-docs"
extras: ""
@@ -109,9 +110,9 @@ jobs:
poetry run make html
# Deploy to the target directory.
- name: Deploy to gh pages
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./dev-docs/_build/html
destination_dir: ./dev-docs/${{ needs.pre.outputs.branch-version }}
# - name: Deploy to gh pages
# uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
# with:
# github_token: ${{ secrets.GITHUB_TOKEN }}
# publish_dir: ./dev-docs/_build/html
# destination_dir: ./dev-docs/${{ needs.pre.outputs.branch-version }}

View File

@@ -226,7 +226,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:

View File

@@ -22,7 +22,7 @@ jobs:
integration: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.integration }}
linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }}
steps:
- uses: dorny/paths-filter@v3
- uses: dorny/paths-filter@v2
id: filter
# We only check on PRs
if: startsWith(github.ref, 'refs/pull/')

View File

@@ -207,7 +207,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:

View File

@@ -1,193 +1,3 @@
# Synapse 1.104.0 (2024-04-02)
### Bugfixes
- Fix regression when using OIDC provider. Introduced in v1.104.0rc1. ([\#17031](https://github.com/element-hq/synapse/issues/17031))
# Synapse 1.104.0rc1 (2024-03-26)
### Features
- Add an OIDC config to specify extra parameters for the authorization grant URL. IT can be useful to pass an ACR value for example. ([\#16971](https://github.com/element-hq/synapse/issues/16971))
- Add support for OIDC provider returning JWT. ([\#16972](https://github.com/element-hq/synapse/issues/16972), [\#17031](https://github.com/element-hq/synapse/issues/17031))
### Bugfixes
- Fix a bug which meant that, under certain circumstances, we might never retry sending events or to-device messages over federation after a failure. ([\#16925](https://github.com/element-hq/synapse/issues/16925))
- Fix various long-standing bugs which could cause incorrect state to be returned from `/sync` in certain situations. ([\#16949](https://github.com/element-hq/synapse/issues/16949))
- Fix case in which `m.fully_read` marker would not get updated. Contributed by @SpiritCroc. ([\#16990](https://github.com/element-hq/synapse/issues/16990))
- Fix bug which did not retract a user's pending knocks at rooms when their account was deactivated. Contributed by @hanadi92. ([\#17010](https://github.com/element-hq/synapse/issues/17010))
### Updates to the Docker image
- Updated `start.py` to generate config using the correct user ID when running as root (fixes [\#16824](https://github.com/element-hq/synapse/issues/16824), [\#15202](https://github.com/element-hq/synapse/issues/15202)). ([\#16978](https://github.com/element-hq/synapse/issues/16978))
### Improved Documentation
- Add a query to force a refresh of a remote user's device list to the "Useful SQL for Admins" documentation page. ([\#16892](https://github.com/element-hq/synapse/issues/16892))
- Minor grammatical corrections to the upgrade documentation. ([\#16965](https://github.com/element-hq/synapse/issues/16965))
- Fix the sort order for the documentation version picker, so that newer releases appear above older ones. ([\#16966](https://github.com/element-hq/synapse/issues/16966))
- Remove recommendation for a specific poetry version from contributing guide. ([\#17002](https://github.com/element-hq/synapse/issues/17002))
### Internal Changes
- Improve lock performance when a lot of locks are all waiting for a single lock to be released. ([\#16840](https://github.com/element-hq/synapse/issues/16840))
- Update power level default for public rooms. ([\#16907](https://github.com/element-hq/synapse/issues/16907))
- Improve event validation. ([\#16908](https://github.com/element-hq/synapse/issues/16908))
- Multi-worker-docker-container: disable log buffering. ([\#16919](https://github.com/element-hq/synapse/issues/16919))
- Refactor state delta calculation in `/sync` handler. ([\#16929](https://github.com/element-hq/synapse/issues/16929))
- Clarify docs for some room state functions. ([\#16950](https://github.com/element-hq/synapse/issues/16950))
- Specify IP subnets in canonical form. ([\#16953](https://github.com/element-hq/synapse/issues/16953))
- As done for SAML mapping provider, let's pass the module API to the OIDC one so the mapper can do more logic in its code. ([\#16974](https://github.com/element-hq/synapse/issues/16974))
- Allow containers building on top of Synapse's Complement container is use the included PostgreSQL cluster. ([\#16985](https://github.com/element-hq/synapse/issues/16985))
- Raise poetry-core version cap to 1.9.0. ([\#16986](https://github.com/element-hq/synapse/issues/16986))
- Patch the db conn pool sooner in tests. ([\#17017](https://github.com/element-hq/synapse/issues/17017))
### Updates to locked dependencies
* Bump anyhow from 1.0.80 to 1.0.81. ([\#17009](https://github.com/element-hq/synapse/issues/17009))
* Bump black from 23.10.1 to 24.2.0. ([\#16936](https://github.com/element-hq/synapse/issues/16936))
* Bump cryptography from 41.0.7 to 42.0.5. ([\#16958](https://github.com/element-hq/synapse/issues/16958))
* Bump dawidd6/action-download-artifact from 3.1.1 to 3.1.2. ([\#16960](https://github.com/element-hq/synapse/issues/16960))
* Bump dawidd6/action-download-artifact from 3.1.2 to 3.1.4. ([\#17008](https://github.com/element-hq/synapse/issues/17008))
* Bump jinja2 from 3.1.2 to 3.1.3. ([\#17005](https://github.com/element-hq/synapse/issues/17005))
* Bump log from 0.4.20 to 0.4.21. ([\#16977](https://github.com/element-hq/synapse/issues/16977))
* Bump mypy from 1.5.1 to 1.8.0. ([\#16901](https://github.com/element-hq/synapse/issues/16901))
* Bump netaddr from 0.9.0 to 1.2.1. ([\#17006](https://github.com/element-hq/synapse/issues/17006))
* Bump pydantic from 2.6.0 to 2.6.4. ([\#17004](https://github.com/element-hq/synapse/issues/17004))
* Bump pyo3 from 0.20.2 to 0.20.3. ([\#16962](https://github.com/element-hq/synapse/issues/16962))
* Bump ruff from 0.1.14 to 0.3.2. ([\#16994](https://github.com/element-hq/synapse/issues/16994))
* Bump serde from 1.0.196 to 1.0.197. ([\#16963](https://github.com/element-hq/synapse/issues/16963))
* Bump serde_json from 1.0.113 to 1.0.114. ([\#16961](https://github.com/element-hq/synapse/issues/16961))
* Bump types-jsonschema from 4.21.0.20240118 to 4.21.0.20240311. ([\#17007](https://github.com/element-hq/synapse/issues/17007))
* Bump types-psycopg2 from 2.9.21.16 to 2.9.21.20240311. ([\#16995](https://github.com/element-hq/synapse/issues/16995))
* Bump types-pyopenssl from 23.3.0.0 to 24.0.0.20240311. ([\#17003](https://github.com/element-hq/synapse/issues/17003))
# Synapse 1.103.0 (2024-03-19)
No significant changes since 1.103.0rc1.
# Synapse 1.103.0rc1 (2024-03-12)
### Features
- Add a new [List Accounts v3](https://element-hq.github.io/synapse/v1.103/admin_api/user_admin_api.html#list-accounts-v3) Admin API with improved deactivated user filtering capabilities. ([\#16874](https://github.com/element-hq/synapse/issues/16874))
- Include `Retry-After` header by default per [MSC4041](https://github.com/matrix-org/matrix-spec-proposals/pull/4041). Contributed by @clokep. ([\#16947](https://github.com/element-hq/synapse/issues/16947))
### Bugfixes
- Fix joining remote rooms when a module uses the `on_new_event` callback. This callback may now pass partial state events instead of the full state for remote rooms. Introduced in v1.76.0. ([\#16973](https://github.com/element-hq/synapse/issues/16973))
- Fix performance issue when joining very large rooms that can cause the server to lock up. Introduced in v1.100.0. Contributed by @ggogel. ([\#16968](https://github.com/element-hq/synapse/issues/16968))
### Improved Documentation
- Add HAProxy example for single port operation to reverse proxy documentation. Contributed by Georg Pfuetzenreuter (@tacerus). ([\#16768](https://github.com/element-hq/synapse/issues/16768))
- Improve the documentation around running Complement tests with new configuration parameters. ([\#16946](https://github.com/element-hq/synapse/issues/16946))
- Add docs on upgrading from a very old version. ([\#16951](https://github.com/element-hq/synapse/issues/16951))
### Updates to locked dependencies
* Bump JasonEtco/create-an-issue from 2.9.1 to 2.9.2. ([\#16934](https://github.com/element-hq/synapse/issues/16934))
* Bump anyhow from 1.0.79 to 1.0.80. ([\#16935](https://github.com/element-hq/synapse/issues/16935))
* Bump dawidd6/action-download-artifact from 3.0.0 to 3.1.1. ([\#16933](https://github.com/element-hq/synapse/issues/16933))
* Bump furo from 2023.9.10 to 2024.1.29. ([\#16939](https://github.com/element-hq/synapse/issues/16939))
* Bump pyopenssl from 23.3.0 to 24.0.0. ([\#16937](https://github.com/element-hq/synapse/issues/16937))
* Bump types-netaddr from 0.10.0.20240106 to 1.2.0.20240219. ([\#16938](https://github.com/element-hq/synapse/issues/16938))
# Synapse 1.102.0 (2024-03-05)
### Bugfixes
- Revert https://github.com/element-hq/synapse/pull/16756, which caused incorrect notification counts on mobile clients since v1.100.0. ([\#16979](https://github.com/element-hq/synapse/issues/16979))
# Synapse 1.102.0rc1 (2024-02-20)
### Features
- A metric was added for emails sent by Synapse, broken down by type: `synapse_emails_sent_total`. Contributed by Remi Rampin. ([\#16881](https://github.com/element-hq/synapse/issues/16881))
### Bugfixes
- Do not send multiple concurrent requests for keys for the same server. ([\#16894](https://github.com/element-hq/synapse/issues/16894))
- Fix performance issue when joining very large rooms that can cause the server to lock up. Introduced in v1.100.0. ([\#16903](https://github.com/element-hq/synapse/issues/16903))
- Always prefer unthreaded receipt when >1 exist ([MSC4102](https://github.com/matrix-org/matrix-spec-proposals/pull/4102)). ([\#16927](https://github.com/element-hq/synapse/issues/16927))
### Improved Documentation
- Fix a small typo in the Rooms section of the Admin API documentation. Contributed by @RainerZufall187. ([\#16857](https://github.com/element-hq/synapse/issues/16857))
### Internal Changes
- Don't invalidate the entire event cache when we purge history. ([\#16905](https://github.com/element-hq/synapse/issues/16905))
- Add experimental config option to not send device list updates for specific users. ([\#16909](https://github.com/element-hq/synapse/issues/16909))
- Fix incorrect docker hub link in release script. ([\#16910](https://github.com/element-hq/synapse/issues/16910))
### Updates to locked dependencies
* Bump attrs from 23.1.0 to 23.2.0. ([\#16899](https://github.com/element-hq/synapse/issues/16899))
* Bump bcrypt from 4.0.1 to 4.1.2. ([\#16900](https://github.com/element-hq/synapse/issues/16900))
* Bump pygithub from 2.1.1 to 2.2.0. ([\#16902](https://github.com/element-hq/synapse/issues/16902))
* Bump sentry-sdk from 1.40.0 to 1.40.3. ([\#16898](https://github.com/element-hq/synapse/issues/16898))
# Synapse 1.101.0 (2024-02-13)
### Bugfixes
- Fix performance regression when fetching auth chains from the DB. Introduced in v1.100.0. ([\#16893](https://github.com/element-hq/synapse/issues/16893))
# Synapse 1.101.0rc1 (2024-02-06)
### Improved Documentation
- Fix broken links in the documentation. ([\#16853](https://github.com/element-hq/synapse/issues/16853))
- Update MacOS installation instructions to mention that libicu is optional. ([\#16854](https://github.com/element-hq/synapse/issues/16854))
- The version picker now correctly lists versions after `v1.98.0`. ([\#16880](https://github.com/element-hq/synapse/issues/16880))
### Internal Changes
- Add support for stabilised [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981) that adds a `recurse` parameter on the `/relations` API. ([\#16842](https://github.com/element-hq/synapse/issues/16842))
### Updates to locked dependencies
* Bump dorny/paths-filter from 2 to 3. ([\#16869](https://github.com/element-hq/synapse/issues/16869))
* Bump gitpython from 3.1.40 to 3.1.41. ([\#16850](https://github.com/element-hq/synapse/issues/16850))
* Bump hiredis from 2.2.3 to 2.3.2. ([\#16862](https://github.com/element-hq/synapse/issues/16862))
* Bump jsonschema from 4.20.0 to 4.21.1. ([\#16887](https://github.com/element-hq/synapse/issues/16887))
* Bump lxml-stubs from 0.4.0 to 0.5.1. ([\#16885](https://github.com/element-hq/synapse/issues/16885))
* Bump mypy-zope from 1.0.1 to 1.0.3. ([\#16865](https://github.com/element-hq/synapse/issues/16865))
* Bump phonenumbers from 8.13.26 to 8.13.29. ([\#16868](https://github.com/element-hq/synapse/issues/16868))
* Bump pydantic from 2.5.3 to 2.6.0. ([\#16888](https://github.com/element-hq/synapse/issues/16888))
* Bump sentry-sdk from 1.39.1 to 1.40.0. ([\#16889](https://github.com/element-hq/synapse/issues/16889))
* Bump serde from 1.0.195 to 1.0.196. ([\#16867](https://github.com/element-hq/synapse/issues/16867))
* Bump serde_json from 1.0.111 to 1.0.113. ([\#16866](https://github.com/element-hq/synapse/issues/16866))
* Bump sigstore/cosign-installer from 3.3.0 to 3.4.0. ([\#16890](https://github.com/element-hq/synapse/issues/16890))
* Bump types-pillow from 10.1.0.2 to 10.2.0.20240125. ([\#16864](https://github.com/element-hq/synapse/issues/16864))
* Bump types-requests from 2.31.0.10 to 2.31.0.20240125. ([\#16886](https://github.com/element-hq/synapse/issues/16886))
* Bump types-setuptools from 69.0.0.0 to 69.0.0.20240125. ([\#16863](https://github.com/element-hq/synapse/issues/16863))
# Synapse 1.100.0 (2024-01-30)
No significant changes since 1.100.0rc3.
# Synapse 1.100.0rc3 (2024-01-24)
### Bugfixes

48
Cargo.lock generated
View File

@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.81"
version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247"
checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca"
[[package]]
name = "arc-swap"
@@ -138,9 +138,9 @@ dependencies = [
[[package]]
name = "log"
version = "0.4.21"
version = "0.4.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "memchr"
@@ -186,12 +186,6 @@ dependencies = [
"windows-sys",
]
[[package]]
name = "portable-atomic"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0"
[[package]]
name = "proc-macro2"
version = "1.0.76"
@@ -203,9 +197,9 @@ dependencies = [
[[package]]
name = "pyo3"
version = "0.20.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53bdbb96d49157e65d45cc287af5f32ffadd5f4761438b527b055fb0d4bb8233"
checksum = "9a89dc7a5850d0e983be1ec2a463a171d20990487c3cfcd68b5363f1ee3d6fe0"
dependencies = [
"anyhow",
"cfg-if",
@@ -213,7 +207,6 @@ dependencies = [
"libc",
"memoffset",
"parking_lot",
"portable-atomic",
"pyo3-build-config",
"pyo3-ffi",
"pyo3-macros",
@@ -222,9 +215,9 @@ dependencies = [
[[package]]
name = "pyo3-build-config"
version = "0.20.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "deaa5745de3f5231ce10517a1f5dd97d53e5a2fd77aa6b5842292085831d48d7"
checksum = "07426f0d8fe5a601f26293f300afd1a7b1ed5e78b2a705870c5f30893c5163be"
dependencies = [
"once_cell",
"target-lexicon",
@@ -232,9 +225,9 @@ dependencies = [
[[package]]
name = "pyo3-ffi"
version = "0.20.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b42531d03e08d4ef1f6e85a2ed422eb678b8cd62b762e53891c05faf0d4afa"
checksum = "dbb7dec17e17766b46bca4f1a4215a85006b4c2ecde122076c562dd058da6cf1"
dependencies = [
"libc",
"pyo3-build-config",
@@ -253,9 +246,9 @@ dependencies = [
[[package]]
name = "pyo3-macros"
version = "0.20.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7305c720fa01b8055ec95e484a6eca7a83c841267f0dd5280f0c8b8551d2c158"
checksum = "05f738b4e40d50b5711957f142878cfa0f28e054aa0ebdfc3fd137a843f74ed3"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
@@ -265,13 +258,12 @@ dependencies = [
[[package]]
name = "pyo3-macros-backend"
version = "0.20.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c7e9b68bb9c3149c5b0cade5d07f953d6d125eb4337723c4ccdb665f1f96185"
checksum = "0fc910d4851847827daf9d6cdd4a823fbdaab5b8818325c5e97a86da79e8881f"
dependencies = [
"heck",
"proc-macro2",
"pyo3-build-config",
"quote",
"syn",
]
@@ -347,18 +339,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.197"
version = "1.0.195"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.197"
version = "1.0.195"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c"
dependencies = [
"proc-macro2",
"quote",
@@ -367,9 +359,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.114"
version = "1.0.111"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0"
checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4"
dependencies = [
"itoa",
"ryu",

1
changelog.d/16856.doc Normal file
View File

@@ -0,0 +1 @@
Fix building the developer documentation on the `develop` branch.

View File

@@ -1 +0,0 @@
Fix various long-standing bugs which could cause incorrect state to be returned from `/sync` in certain situations.

View File

@@ -1 +0,0 @@
Fix various long-standing bugs which could cause incorrect state to be returned from `/sync` in certain situations.

View File

@@ -1 +0,0 @@
Fix various long-standing bugs which could cause incorrect state to be returned from `/sync` in certain situations.

View File

@@ -1 +0,0 @@
Add support for moving `/pushrules` off of main process.

View File

@@ -1 +0,0 @@
Add support for moving `/pushrules` off of main process.

View File

@@ -1 +0,0 @@
Refactor auth chain fetching to reduce duplication.

View File

@@ -1 +0,0 @@
Improve database performance by adding a missing index to `access_tokens.refresh_token_id`.

View File

@@ -1 +0,0 @@
Improve database performance by reducing number of receipts fetched when sending push notifications.

View File

@@ -1 +0,0 @@
Document [`/v1/make_knock`](https://spec.matrix.org/v1.10/server-server-api/#get_matrixfederationv1make_knockroomiduserid) and [`/v1/send_knock/](https://spec.matrix.org/v1.10/server-server-api/#put_matrixfederationv1send_knockroomideventid) federation endpoints as worker-compatible.

54
debian/changelog vendored
View File

@@ -1,57 +1,3 @@
matrix-synapse-py3 (1.104.0) stable; urgency=medium
* New Synapse release 1.104.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Apr 2024 17:15:45 +0100
matrix-synapse-py3 (1.104.0~rc1) stable; urgency=medium
* New Synapse release 1.104.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Mar 2024 11:48:58 +0000
matrix-synapse-py3 (1.103.0) stable; urgency=medium
* New Synapse release 1.103.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Mar 2024 12:24:36 +0000
matrix-synapse-py3 (1.103.0~rc1) stable; urgency=medium
* New Synapse release 1.103.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Mar 2024 15:02:56 +0000
matrix-synapse-py3 (1.102.0) stable; urgency=medium
* New Synapse release 1.102.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Mar 2024 14:47:03 +0000
matrix-synapse-py3 (1.102.0~rc1) stable; urgency=medium
* New Synapse release 1.102.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Feb 2024 15:50:36 +0000
matrix-synapse-py3 (1.101.0) stable; urgency=medium
* New Synapse release 1.101.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Feb 2024 10:45:35 +0000
matrix-synapse-py3 (1.101.0~rc1) stable; urgency=medium
* New Synapse release 1.101.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Feb 2024 16:02:02 +0000
matrix-synapse-py3 (1.100.0) stable; urgency=medium
* New Synapse release 1.100.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Jan 2024 16:58:19 +0000
matrix-synapse-py3 (1.100.0~rc3) stable; urgency=medium
* New Synapse release 1.100.0rc3.

View File

@@ -30,14 +30,3 @@ Consult `scripts-dev/complement.sh` in the repository root for a real example.
[complement]: https://github.com/matrix-org/complement
[complementEnv]: https://github.com/matrix-org/complement/pull/382
## How to modify homeserver.yaml for Complement tests
It's common for MSCs to be gated behind a feature flag like this:
```yaml
experimental_features:
faster_joins: true
```
To modify this for the Complement image, modify `./conf/workers-shared-extra.yaml.j2`. Despite the name,
this will affect non-worker mode as well. Remember to _rebuild_ the image (so don't use `-e` if using
`complement.sh`).

View File

@@ -1,7 +1,7 @@
[program:postgres]
command=/usr/local/bin/prefix-log gosu postgres postgres
# Only start if START_POSTGRES=true
# Only start if START_POSTGRES=1
autostart=%(ENV_START_POSTGRES)s
# Lower priority number = starts first

View File

@@ -32,9 +32,8 @@ case "$SYNAPSE_COMPLEMENT_DATABASE" in
;;
sqlite|"")
# Set START_POSTGRES to false unless it has already been set
# (i.e. by another container image inheriting our own).
export START_POSTGRES=${START_POSTGRES:-false}
# Configure supervisord not to start Postgres, as we don't need it
export START_POSTGRES=false
;;
*)

View File

@@ -310,13 +310,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"shared_extra_conf": {},
"worker_extra_conf": "",
},
"push_rules": {
"app": "synapse.app.generic_worker",
"listener_resources": ["client", "replication"],
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/"],
"shared_extra_conf": {},
"worker_extra_conf": "",
},
}
# Templates for sections that may be inserted multiple times in config files
@@ -408,7 +401,6 @@ def add_worker_roles_to_shared_config(
"receipts",
"to_device",
"typing",
"push_rules",
]
# Worker-type specific sharding config. Now a single worker can fulfill multiple

View File

@@ -7,9 +7,6 @@
# prefix-log command [args...]
#
# '-W interactive' is a `mawk` extension which disables buffering on stdout and sets line-buffered reads on
# stdin. The effect is that the output is flushed after each line, rather than being batched, which helps reduce
# confusion due to to interleaving of the different processes.
exec 1> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&1)
exec 2> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&2)
exec 1> >(awk '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0}' >&1)
exec 2> >(awk '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0}' >&2)
exec "$@"

View File

@@ -160,6 +160,11 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
if ownership is not None:
# make sure that synapse has perms to write to the data dir.
log(f"Setting ownership on {data_dir} to {ownership}")
subprocess.run(["chown", ownership, data_dir], check=True)
# create a suitable log config from our template
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
if not os.path.exists(log_config_file):
@@ -184,15 +189,9 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
"--generate-config",
"--open-private-ports",
]
if ownership is not None:
# make sure that synapse has perms to write to the data dir.
log(f"Setting ownership on {data_dir} to {ownership}")
subprocess.run(["chown", ownership, data_dir], check=True)
args = ["gosu", ownership] + args
# log("running %s" % (args, ))
subprocess.run(args, check=True)
flush_buffers()
os.execv(sys.executable, args)
def main(args: List[str], environ: MutableMapping[str, str]) -> None:

View File

@@ -913,7 +913,7 @@ With all that being said, if you still want to try and recover the room:
them handle rejoining themselves.
4. If `new_room_user_id` was given, a 'Content Violation' will have been
created. Consider whether you want to delete that room.
created. Consider whether you want to delete that roomm.
# Make Room Admin API

View File

@@ -164,7 +164,6 @@ Body parameters:
Other allowed options are: `bot` and `support`.
## List Accounts
### List Accounts (V2)
This API returns all local user accounts.
By default, the response is ordered by ascending user ID.
@@ -288,19 +287,6 @@ The following fields are returned in the JSON response body:
*Added in Synapse 1.93:* the `locked` query parameter and response field.
### List Accounts (V3)
This API returns all local user accounts (see v2). In contrast to v2, the query parameter `deactivated` is handled differently.
```
GET /_synapse/admin/v3/users
```
**Parameters**
- `deactivated` - Optional flag to filter deactivated users. If `true`, only deactivated users are returned.
If `false`, deactivated users are excluded from the query. When the flag is absent (the default),
users are not filtered by deactivation status.
## Query current sessions for a user
This API returns information about the active sessions for a specific user.

View File

@@ -68,7 +68,7 @@ Of their installation methods, we recommend
```shell
pip install --user pipx
pipx install poetry
pipx install poetry==1.5.1 # Problems with Poetry 1.6, see https://github.com/matrix-org/synapse/issues/16147
```
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
@@ -329,7 +329,7 @@ This configuration should generally cover your needs.
- To run with Postgres, supply the `-e POSTGRES=1 -e MULTI_POSTGRES=1` environment flags.
- To run with Synapse in worker mode, supply the `-e WORKERS=1 -e REDIS=1` environment flags (in addition to the Postgres flags).
For more details about other configurations, see the [Docker-specific documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
For more details about other configurations, see the [Docker-specific documentation in the SyTest repo](https://github.com/vector-im/sytest/blob/develop/docker/README.md).
## Run the integration tests ([Complement](https://github.com/matrix-org/complement)).

View File

@@ -142,10 +142,6 @@ Called after sending an event into a room. The module is passed the event, as we
as the state of the room _after_ the event. This means that if the event is a state event,
it will be included in this state.
The state map may not be complete if Synapse hasn't yet loaded the full state
of the room. This can happen for events in rooms that were just joined from
a remote server.
Note that this callback is called when the event has already been processed and stored
into the room, which means this callback cannot be used to deny persisting the event. To
deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#check_event_for_spam) instead.

View File

@@ -12,7 +12,7 @@ This is the main reason people have a poor matrix experience on resource constra
While synapse does have some performance issues with presence [#3971](https://github.com/matrix-org/synapse/issues/3971), the fundamental problem is that this is an easy feature to implement for a centralised service at nearly no overhead, but federation makes it combinatorial [#8055](https://github.com/matrix-org/synapse/issues/8055). There is also a client-side config option which disables the UI and idle tracking [enable_presence_by_hs_url] to blacklist the largest instances but I didn't notice much difference, so I recommend disabling the feature entirely at the server level as well.
[enable_presence_by_hs_url]: https://github.com/element-hq/element-web/blob/v1.7.8/config.sample.json#L45
[enable_presence_by_hs_url]: https://github.com/vector-im/element-web/blob/v1.7.8/config.sample.json#L45
### Joining

View File

@@ -182,7 +182,7 @@ synapse_port_db --sqlite-database homeserver.db.snapshot \
--postgres-config homeserver-postgres.yaml
```
The flag `--curses` displays a coloured curses progress UI. (NOTE: if your terminal is too small the script will error out)
The flag `--curses` displays a coloured curses progress UI.
If the script took a long time to complete, or time has otherwise passed
since the original snapshot was taken, repeat the previous steps with a

View File

@@ -186,25 +186,6 @@ Example configuration, if using a UNIX socket. The configuration lines regarding
backend matrix
server matrix unix@/run/synapse/main_public.sock
```
Example configuration when using a single port for both client and federation traffic.
```
frontend https
bind *:443,[::]:443 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
http-request set-header X-Forwarded-Proto https if { ssl_fc }
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
http-request set-header X-Forwarded-For %[src]
acl matrix-host hdr(host) -i matrix.example.com matrix.example.com:443
acl matrix-sni ssl_fc_sni matrix.example.com
acl matrix-path path_beg /_matrix
acl matrix-path path_beg /_synapse/client
use_backend matrix if matrix-host matrix-path
use_backend matrix if matrix-sni
backend matrix
server matrix 127.0.0.1:8008
```
[Delegation](delegate.md) example:
```

View File

@@ -26,7 +26,7 @@ for most users.
#### Docker images and Ansible playbooks
There is an official synapse image available at
<https://hub.docker.com/r/matrixdotorg/synapse> or at [`ghcr.io/element-hq/synapse`](https://ghcr.io/element-hq/synapse)
<https://hub.docker.com/r/vectorim/synapse> or at [`ghcr.io/element-hq/synapse`](https://ghcr.io/element-hq/synapse)
which can be used with the docker-compose file available at
[contrib/docker](https://github.com/element-hq/synapse/tree/develop/contrib/docker).
Further information on this including configuration options is available in the README
@@ -326,17 +326,6 @@ Some extra dependencies may be needed. You can use Homebrew (https://brew.sh) fo
You may need to install icu, and make the icu binaries and libraries accessible.
Please follow [the official instructions of PyICU](https://pypi.org/project/PyICU/) to do so.
If you're struggling to get icu discovered, and see:
```
RuntimeError:
Please install pkg-config on your system or set the ICU_VERSION environment
variable to the version of ICU you have installed.
```
despite it being installed and having your `PATH` updated, you can omit this dependency by
not specifying `--extras all` to `poetry`. If using postgres, you can install Synapse via
`poetry install --extras saml2 --extras oidc --extras postgres --extras opentracing --extras redis --extras sentry`.
ICU is not a hard dependency on getting a working installation.
On ARM-based Macs you may also need to install libjpeg and libpq:
```sh
brew install jpeg libpq

View File

@@ -136,8 +136,8 @@ This will install and start a systemd service called `coturn`.
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
not work with any Matrix client that uses Chromium's WebRTC library. This
currently includes Element Android & iOS; for more details, see their
[respective](https://github.com/element-hq/element-android/issues/1533)
[issues](https://github.com/element-hq/element-ios/issues/2712) as well as the underlying
[respective](https://github.com/vector-im/element-android/issues/1533)
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
Consider using a ZeroSSL certificate for your TURN server as a working alternative.

View File

@@ -137,8 +137,8 @@ must be edited:
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
not work with any Matrix client that uses Chromium's WebRTC library. This
currently includes Element Android & iOS; for more details, see their
[respective](https://github.com/element-hq/element-android/issues/1533)
[issues](https://github.com/element-hq/element-ios/issues/2712) as well as the underlying
[respective](https://github.com/vector-im/element-android/issues/1533)
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
Consider using a ZeroSSL certificate for your TURN server as a working alternative.

View File

@@ -50,13 +50,11 @@ comment these options out and use those specified by the module instead.
A custom mapping provider must specify the following methods:
* `def __init__(self, parsed_config, module_api)`
* `def __init__(self, parsed_config)`
- Arguments:
- `parsed_config` - A configuration object that is the return value of the
`parse_config` method. You should set any configuration options needed by
the module here.
- `module_api` - a `synapse.module_api.ModuleApi` object which provides the
stable API available for extension modules.
* `def parse_config(config)`
- This method should have the `@staticmethod` decoration.
- Arguments:

View File

@@ -88,35 +88,15 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
Generally Synapse database schemas are compatible across multiple versions, but once
a version of Synapse is deployed you may not be able to roll back automatically.
Generally Synapse database schemas are compatible across multiple versions, once
a version of Synapse is deployed you may not be able to rollback automatically.
The following table gives the version ranges and the earliest version they can
be rolled back to. E.g. Synapse versions v1.58.0 through v1.61.1 can be rolled
back safely to v1.57.0, but starting with v1.62.0 it is only safe to roll back to
back safely to v1.57.0, but starting with v1.62.0 it is only safe to rollback to
v1.61.0.
<!-- REPLACE_WITH_SCHEMA_VERSIONS -->
## Upgrading from a very old version
You need to read all of the upgrade notes for each version between your current
version and the latest so that you can update your dependencies, environment,
config files, etc. if necessary. But you do not need to perform an
upgrade to each individual version that was missed.
We do not have a list of which versions must be installed. Instead, we recommend
that you upgrade through each incompatible database schema version, which would
give you the ability to roll back the maximum number of versions should anything
go wrong. See [Rolling back to older versions](#rolling-back-to-older-versions)
above.
Additionally, new versions of Synapse will occasionally run database migrations
and background updates to update the database. Synapse will not start until
database migrations are complete. You should wait until background updates from
each upgrade are complete before moving on to the next upgrade, to avoid
stacking them up. You can monitor the currently running background updates with
[the Admin API](usage/administration/admin_api/background_updates.html#status).
# Upgrading to v1.100.0
## Minimum supported Rust version

View File

@@ -120,11 +120,6 @@ for file in $source_directory/*; do
done
```
How do I upgrade from a very old version of Synapse to the latest?
---
See [this](../../upgrade.html#upgrading-from-a-very-old-version) section in the
upgrade docs.
Manually resetting passwords
---
Users can reset their password through their client. Alternatively, a server admin

View File

@@ -205,12 +205,3 @@ SELECT user_id, device_id, user_agent, TO_TIMESTAMP(last_seen / 1000) AS "last_s
FROM devices
WHERE last_seen < DATE_PART('epoch', NOW() - INTERVAL '3 month') * 1000;
```
## Clear the cache of a remote user's device list
Forces the resync of a remote user's device list - if you have somehow cached a bad state, and the remote server is
will not send out a device list update.
```sql
INSERT INTO device_lists_remote_resync
VALUES ('USER_ID', (EXTRACT(epoch FROM NOW()) * 1000)::BIGINT);
```

View File

@@ -3349,9 +3349,6 @@ Options for each entry include:
not included in `scopes`. Set to `userinfo_endpoint` to always use the
userinfo endpoint.
* `additional_authorization_parameters`: String to string dictionary that will be passed as
additional parameters to the authorization grant URL.
* `allow_existing_users`: set to true to allow a user logging in via OIDC to
match a pre-existing account instead of failing. This could be used if
switching from password logins to OIDC. Defaults to false.
@@ -3476,8 +3473,6 @@ oidc_providers:
token_endpoint: "https://accounts.example.com/oauth2/token"
userinfo_endpoint: "https://accounts.example.com/userinfo"
jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
additional_authorization_parameters:
acr_values: 2fa
skip_verification: true
enable_registration: true
user_mapping_provider:

View File

@@ -54,7 +54,7 @@ function fetchVersions(dropdown, dropdownMenu) {
return new Promise((resolve, reject) => {
window.addEventListener("load", () => {
fetch("https://api.github.com/repos/element-hq/synapse/git/trees/gh-pages", {
fetch("https://api.github.com/repos/matrix-org/synapse/git/trees/gh-pages", {
cache: "force-cache",
}).then(res =>
res.json()
@@ -100,30 +100,10 @@ function sortVersions(a, b) {
if (a === 'develop' || a === 'latest') return -1;
if (b === 'develop' || b === 'latest') return 1;
// If any of the versions do not confrom to a semantic version string, they
// will be sorted behind a valid version.
const versionA = (a.match(/v(\d+(\.\d+)+)/) || [])[1]?.split('.') ?? '';
const versionB = (b.match(/v(\d+(\.\d+)+)/) || [])[1]?.split('.') ?? '';
const versionA = (a.match(/v\d+(\.\d+)+/) || [])[0];
const versionB = (b.match(/v\d+(\.\d+)+/) || [])[0];
for (let i = 0; i < Math.max(versionA.length, versionB.length); i++) {
if (versionB[i] === undefined) {
return -1;
}
if (versionA[i] === undefined) {
return 1;
}
const partA = parseInt(versionA[i], 10);
const partB = parseInt(versionB[i], 10);
if (partA > partB) {
return -1;
} else if (partB > partA) {
return 1;
}
}
return 0;
return versionB.localeCompare(versionA);
}
/**
@@ -144,4 +124,4 @@ function changeVersion(url, newVersion) {
parsedURL.pathname = pathSegments.join('/');
return parsedURL.href;
}
}

View File

@@ -211,8 +211,6 @@ information.
^/_matrix/federation/v1/make_leave/
^/_matrix/federation/(v1|v2)/send_join/
^/_matrix/federation/(v1|v2)/send_leave/
^/_matrix/federation/v1/make_knock/
^/_matrix/federation/v1/send_knock/
^/_matrix/federation/(v1|v2)/invite/
^/_matrix/federation/v1/event_auth/
^/_matrix/federation/v1/timestamp_to_event/
@@ -534,13 +532,6 @@ the stream writer for the `presence` stream:
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
##### The `push_rules` stream
The following endpoints should be routed directly to the worker configured as
the stream writer for the `push` stream:
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
#### Restrict outbound federation traffic to a specific set of workers
The
@@ -638,7 +629,7 @@ worker application type.
You can designate generic worker to sending push notifications to
a [push gateway](https://spec.matrix.org/v1.5/push-gateway-api/) such as
[sygnal](https://github.com/matrix-org/sygnal) and email.
[sygnal](https://github.com/vector-im/sygnal) and email.
This will stop the main process sending push notifications.

791
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
version = "1.104.0"
version = "1.100.0rc3"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later"
@@ -321,7 +321,7 @@ all = [
# This helps prevents merge conflicts when running a batch of dependabot updates.
isort = ">=5.10.1"
black = ">=22.7.0"
ruff = "0.3.2"
ruff = "0.1.14"
# Type checking only works with the pydantic.v1 compat module from pydantic v2
pydantic = "^2"
@@ -372,7 +372,7 @@ optional = true
sphinx = {version = "^6.1", python = "^3.8"}
sphinx-autodoc2 = {version = ">=0.4.2,<0.6.0", python = "^3.8"}
myst-parser = {version = "^1.0.0", python = "^3.8"}
furo = ">=2022.12.7,<2025.0.0"
furo = ">=2022.12.7,<2024.0.0"
[build-system]
@@ -382,7 +382,7 @@ furo = ">=2022.12.7,<2025.0.0"
# runtime errors caused by build system changes.
# We are happy to raise these upper bounds upon request,
# provided we check that it's safe to do so (i.e. that CI passes).
requires = ["poetry-core>=1.1.0,<=1.9.0", "setuptools_rust>=1.3,<=1.8.1"]
requires = ["poetry-core>=1.1.0,<=1.8.1", "setuptools_rust>=1.3,<=1.8.1"]
build-backend = "poetry.core.masonry.api"

View File

@@ -660,7 +660,7 @@ def _announce() -> None:
Hi everyone. Synapse {current_version} has just been released.
[notes](https://github.com/element-hq/synapse/releases/tag/{tag_name}) | \
[docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \
[docker](https://hub.docker.com/r/vectorim/synapse/tags?name={tag_name}) | \
[debs](https://packages.matrix.org/debian/) | \
[pypi](https://pypi.org/project/matrix-synapse/{current_version}/)"""
)

View File

@@ -60,7 +60,7 @@ from synapse.logging.context import (
)
from synapse.notifier import ReplicationNotifier
from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
from synapse.storage.databases.main import FilteringWorkerStore
from synapse.storage.databases.main import FilteringWorkerStore, PushRuleStore
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
@@ -77,8 +77,10 @@ from synapse.storage.databases.main.media_repository import (
)
from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore
from synapse.storage.databases.main.profile import ProfileWorkerStore
from synapse.storage.databases.main.push_rule import PusherWorkerStore
from synapse.storage.databases.main.pusher import PusherBackgroundUpdatesStore
from synapse.storage.databases.main.pusher import (
PusherBackgroundUpdatesStore,
PusherWorkerStore,
)
from synapse.storage.databases.main.receipts import ReceiptsBackgroundUpdateStore
from synapse.storage.databases.main.registration import (
RegistrationBackgroundUpdateStore,
@@ -243,6 +245,7 @@ class Store(
AccountDataWorkerStore,
FilteringWorkerStore,
ProfileWorkerStore,
PushRuleStore,
PusherWorkerStore,
PusherBackgroundUpdatesStore,
PresenceBackgroundUpdateStore,
@@ -1037,10 +1040,10 @@ class Porter:
return done, remaining + done
async def _setup_state_group_id_seq(self) -> None:
curr_id: Optional[int] = (
await self.sqlite_store.db_pool.simple_select_one_onecol(
table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True
)
curr_id: Optional[
int
] = await self.sqlite_store.db_pool.simple_select_one_onecol(
table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True
)
if not curr_id:
@@ -1129,13 +1132,13 @@ class Porter:
)
async def _setup_auth_chain_sequence(self) -> None:
curr_chain_id: Optional[int] = (
await self.sqlite_store.db_pool.simple_select_one_onecol(
table="event_auth_chains",
keyvalues={},
retcol="MAX(chain_id)",
allow_none=True,
)
curr_chain_id: Optional[
int
] = await self.sqlite_store.db_pool.simple_select_one_onecol(
table="event_auth_chains",
keyvalues={},
retcol="MAX(chain_id)",
allow_none=True,
)
def r(txn: LoggingTransaction) -> None:

View File

@@ -43,6 +43,7 @@ MAIN_TIMELINE: Final = "main"
class Membership:
"""Represents the membership states of a user in a room."""
INVITE: Final = "invite"
@@ -129,8 +130,6 @@ class EventTypes:
Reaction: Final = "m.reaction"
CallInvite: Final = "m.call.invite"
class ToDeviceEventTypes:
RoomKeyRequest: Final = "m.room_key_request"

View File

@@ -517,6 +517,8 @@ class InvalidCaptchaError(SynapseError):
class LimitExceededError(SynapseError):
"""A client has sent too many requests and is being throttled."""
include_retry_after_header = False
def __init__(
self,
limiter_name: str,
@@ -524,10 +526,9 @@ class LimitExceededError(SynapseError):
retry_after_ms: Optional[int] = None,
errcode: str = Codes.LIMIT_EXCEEDED,
):
# Use HTTP header Retry-After to enable library-assisted retry handling.
headers = (
{"Retry-After": str(math.ceil(retry_after_ms / 1000))}
if retry_after_ms is not None
if self.include_retry_after_header and retry_after_ms is not None
else None
)
super().__init__(code, "Too Many Requests", errcode, headers=headers)

View File

@@ -370,11 +370,9 @@ class RoomVersionCapability:
MSC3244_CAPABILITIES = {
cap.identifier: {
"preferred": (
cap.preferred_version.identifier
if cap.preferred_version is not None
else None
),
"preferred": cap.preferred_version.identifier
if cap.preferred_version is not None
else None,
"support": [
v.identifier
for v in KNOWN_ROOM_VERSIONS.values()

View File

@@ -188,9 +188,9 @@ class SynapseHomeServer(HomeServer):
PasswordResetSubmitTokenResource,
)
resources["/_synapse/client/password_reset/email/submit_token"] = (
PasswordResetSubmitTokenResource(self)
)
resources[
"/_synapse/client/password_reset/email/submit_token"
] = PasswordResetSubmitTokenResource(self)
if name == "consent":
from synapse.rest.consent.consent_resource import ConsentResource

View File

@@ -362,16 +362,16 @@ class ApplicationServiceApi(SimpleHttpClient):
# TODO: Update to stable prefixes once MSC3202 completes FCP merge
if service.msc3202_transaction_extensions:
if one_time_keys_count:
body["org.matrix.msc3202.device_one_time_key_counts"] = (
one_time_keys_count
)
body["org.matrix.msc3202.device_one_time_keys_count"] = (
one_time_keys_count
)
body[
"org.matrix.msc3202.device_one_time_key_counts"
] = one_time_keys_count
body[
"org.matrix.msc3202.device_one_time_keys_count"
] = one_time_keys_count
if unused_fallback_keys:
body["org.matrix.msc3202.device_unused_fallback_key_types"] = (
unused_fallback_keys
)
body[
"org.matrix.msc3202.device_unused_fallback_key_types"
] = unused_fallback_keys
if device_list_summary:
body["org.matrix.msc3202.device_lists"] = {
"changed": list(device_list_summary.changed),

View File

@@ -25,6 +25,7 @@ from typing import TYPE_CHECKING, Any, Optional
import attr
import attr.validators
from synapse.api.errors import LimitExceededError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.config import ConfigError
from synapse.config._base import Config, RootConfig
@@ -414,6 +415,14 @@ class ExperimentalConfig(Config):
"msc4010_push_rules_account_data", False
)
# MSC4041: Use HTTP header Retry-After to enable library-assisted retry handling
#
# This is a bit hacky, but the most reasonable way to *alway* include the
# headers.
LimitExceededError.include_retry_after_header = experimental.get(
"msc4041_enabled", False
)
self.msc4028_push_encrypted_events = experimental.get(
"msc4028_push_encrypted_events", False
)

View File

@@ -342,9 +342,6 @@ def _parse_oidc_config_dict(
user_mapping_provider_config=user_mapping_provider_config,
attribute_requirements=attribute_requirements,
enable_registration=oidc_config.get("enable_registration", True),
additional_authorization_parameters=oidc_config.get(
"additional_authorization_parameters", {}
),
)
@@ -447,6 +444,3 @@ class OidcProviderConfig:
# Whether automatic registrations are enabled in the ODIC flow. Defaults to True
enable_registration: bool
# Additional parameters that will be passed to the authorization grant URL
additional_authorization_parameters: Mapping[str, str]

View File

@@ -171,9 +171,9 @@ class RegistrationConfig(Config):
refreshable_access_token_lifetime = self.parse_duration(
refreshable_access_token_lifetime
)
self.refreshable_access_token_lifetime: Optional[int] = (
refreshable_access_token_lifetime
)
self.refreshable_access_token_lifetime: Optional[
int
] = refreshable_access_token_lifetime
if (
self.session_lifetime is not None
@@ -237,14 +237,6 @@ class RegistrationConfig(Config):
self.inhibit_user_in_use_error = config.get("inhibit_user_in_use_error", False)
# List of user IDs not to send out device list updates for when they
# register new devices. This is useful to handle bot accounts.
#
# Note: This will still send out device list updates if the device is
# later updated, e.g. end to end keys are added.
dont_notify_new_devices_for = config.get("dont_notify_new_devices_for", [])
self.dont_notify_new_devices_for = frozenset(dont_notify_new_devices_for)
def generate_config_section(
self, generate_secrets: bool = False, **kwargs: Any
) -> str:

View File

@@ -199,9 +199,9 @@ class ContentRepositoryConfig(Config):
provider_config["module"] == "file_system"
or provider_config["module"] == "synapse.rest.media.v1.storage_provider"
):
provider_config["module"] = (
"synapse.media.storage_provider.FileStorageProviderBackend"
)
provider_config[
"module"
] = "synapse.media.storage_provider.FileStorageProviderBackend"
provider_class, parsed_config = load_module(
provider_config, ("media_storage_providers", "<item %i>" % i)

View File

@@ -156,8 +156,6 @@ class WriterLocations:
can only be a single instance.
presence: The instances that write to the presence stream. Currently
can only be a single instance.
push_rules: The instances that write to the push stream. Currently
can only be a single instance.
"""
events: List[str] = attr.ib(
@@ -184,10 +182,6 @@ class WriterLocations:
default=["master"],
converter=_instance_to_list_converter,
)
push_rules: List[str] = attr.ib(
default=["master"],
converter=_instance_to_list_converter,
)
@attr.s(auto_attribs=True)
@@ -347,7 +341,6 @@ class WorkerConfig(Config):
"account_data",
"receipts",
"presence",
"push_rules",
):
instances = _instance_to_list_converter(getattr(self.writers, stream))
for instance in instances:
@@ -385,11 +378,6 @@ class WorkerConfig(Config):
"Must only specify one instance to handle `presence` messages."
)
if len(self.writers.push_rules) != 1:
raise ConfigError(
"Must only specify one instance to handle `push` messages."
)
self.events_shard_config = RoutableShardedWorkerHandlingConfig(
self.writers.events
)

View File

@@ -839,12 +839,11 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
Map from server_name -> key_id -> FetchKeyResult
"""
# We only need to do one request per server.
servers_to_fetch = {k.server_name for k in keys_to_fetch}
results = {}
async def get_keys(server_name: str) -> None:
async def get_keys(key_to_fetch_item: _FetchKeyRequest) -> None:
server_name = key_to_fetch_item.server_name
try:
keys = await self.get_server_verify_keys_v2_direct(server_name)
results[server_name] = keys
@@ -853,7 +852,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
except Exception:
logger.exception("Error getting keys from %s", server_name)
await yieldable_gather_results(get_keys, servers_to_fetch)
await yieldable_gather_results(get_keys, keys_to_fetch)
return results
async def get_server_verify_keys_v2_direct(

View File

@@ -23,20 +23,7 @@
import collections.abc
import logging
import typing
from typing import (
Any,
ChainMap,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Set,
Tuple,
Union,
cast,
)
from typing import Any, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Union
from canonicaljson import encode_canonical_json
from signedjson.key import decode_verify_key_bytes
@@ -88,7 +75,8 @@ class _EventSourceStore(Protocol):
redact_behaviour: EventRedactBehaviour,
get_prev_content: bool = False,
allow_rejected: bool = False,
) -> Dict[str, "EventBase"]: ...
) -> Dict[str, "EventBase"]:
...
def validate_event_for_room_version(event: "EventBase") -> None:
@@ -187,22 +175,12 @@ async def check_state_independent_auth_rules(
return
# 2. Reject if event has auth_events that: ...
auth_events: ChainMap[str, EventBase] = ChainMap()
if batched_auth_events:
# batched_auth_events can become very large. To avoid repeatedly copying it, which
# would significantly impact performance, we use a ChainMap.
# batched_auth_events must be cast to MutableMapping because .new_child() requires
# this type. This casting is safe as the mapping is never mutated.
auth_events = auth_events.new_child(
cast(MutableMapping[str, "EventBase"], batched_auth_events)
)
needed_auth_event_ids = [
event_id
for event_id in event.auth_event_ids()
if event_id not in batched_auth_events
]
# Copy the batched auth events to avoid mutating them.
auth_events = dict(batched_auth_events)
needed_auth_event_ids = set(event.auth_event_ids()) - batched_auth_events.keys()
if needed_auth_event_ids:
auth_events = auth_events.new_child(
auth_events.update(
await store.get_events(
needed_auth_event_ids,
redact_behaviour=EventRedactBehaviour.as_is,
@@ -210,12 +188,10 @@ async def check_state_independent_auth_rules(
)
)
else:
auth_events = auth_events.new_child(
await store.get_events(
event.auth_event_ids(),
redact_behaviour=EventRedactBehaviour.as_is,
allow_rejected=True,
)
auth_events = await store.get_events(
event.auth_event_ids(),
redact_behaviour=EventRedactBehaviour.as_is,
allow_rejected=True,
)
room_id = event.room_id

View File

@@ -93,14 +93,16 @@ class DictProperty(Generic[T]):
self,
instance: Literal[None],
owner: Optional[Type[_DictPropertyInstance]] = None,
) -> "DictProperty": ...
) -> "DictProperty":
...
@overload
def __get__(
self,
instance: _DictPropertyInstance,
owner: Optional[Type[_DictPropertyInstance]] = None,
) -> T: ...
) -> T:
...
def __get__(
self,
@@ -159,14 +161,16 @@ class DefaultDictProperty(DictProperty, Generic[T]):
self,
instance: Literal[None],
owner: Optional[Type[_DictPropertyInstance]] = None,
) -> "DefaultDictProperty": ...
) -> "DefaultDictProperty":
...
@overload
def __get__(
self,
instance: _DictPropertyInstance,
owner: Optional[Type[_DictPropertyInstance]] = None,
) -> T: ...
) -> T:
...
def __get__(
self,

View File

@@ -612,9 +612,9 @@ class EventClientSerializer:
serialized_aggregations = {}
if event_aggregations.references:
serialized_aggregations[RelationTypes.REFERENCE] = (
event_aggregations.references
)
serialized_aggregations[
RelationTypes.REFERENCE
] = event_aggregations.references
if event_aggregations.replace:
# Include information about it in the relations dict.

View File

@@ -169,9 +169,9 @@ class FederationServer(FederationBase):
# We cache responses to state queries, as they take a while and often
# come in waves.
self._state_resp_cache: ResponseCache[Tuple[str, Optional[str]]] = (
ResponseCache(hs.get_clock(), "state_resp", timeout_ms=30000)
)
self._state_resp_cache: ResponseCache[
Tuple[str, Optional[str]]
] = ResponseCache(hs.get_clock(), "state_resp", timeout_ms=30000)
self._state_ids_resp_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
hs.get_clock(), "state_ids_resp", timeout_ms=30000
)

View File

@@ -88,9 +88,9 @@ class FederationRemoteSendQueue(AbstractFederationSender):
# Stores the destinations we need to explicitly send presence to about a
# given user.
# Stream position -> (user_id, destinations)
self.presence_destinations: SortedDict[int, Tuple[str, Iterable[str]]] = (
SortedDict()
)
self.presence_destinations: SortedDict[
int, Tuple[str, Iterable[str]]
] = SortedDict()
# (destination, key) -> EDU
self.keyed_edu: Dict[Tuple[str, tuple], Edu] = {}

View File

@@ -192,9 +192,10 @@ sent_pdus_destination_dist_total = Counter(
)
# Time (in s) to wait before trying to wake up destinations that have
# catch-up outstanding.
# catch-up outstanding. This will also be the delay applied at startup
# before trying the same.
# Please note that rate limiting still applies, so while the loop is
# executed every X seconds the destinations may not be woken up because
# executed every X seconds the destinations may not be wake up because
# they are being rate limited following previous attempt failures.
WAKEUP_RETRY_PERIOD_SEC = 60
@@ -427,17 +428,18 @@ class FederationSender(AbstractFederationSender):
/ hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
)
self._external_cache = hs.get_external_cache()
self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock)
# Regularly wake up destinations that have outstanding PDUs to be caught up
self.clock.looping_call_now(
self.clock.looping_call(
run_as_background_process,
WAKEUP_RETRY_PERIOD_SEC * 1000.0,
"wake_destinations_needing_catchup",
self._wake_destinations_needing_catchup,
)
self._external_cache = hs.get_external_cache()
self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock)
def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
"""Get or create a PerDestinationQueue for the given destination

View File

@@ -118,10 +118,10 @@ class AccountHandler:
}
if self._use_account_validity_in_account_status:
status["org.matrix.expired"] = (
await self._account_validity_handler.is_user_expired(
user_id.to_string()
)
status[
"org.matrix.expired"
] = await self._account_validity_handler.is_user_expired(
user_id.to_string()
)
return status

View File

@@ -2185,7 +2185,7 @@ class PasswordAuthProvider:
# result is always the right type, but as it is 3rd party code it might not be
if not isinstance(result, tuple) or len(result) != 2:
logger.warning( # type: ignore[unreachable]
logger.warning(
"Wrong type returned by module API callback %s: %s, expected"
" Optional[Tuple[str, Optional[Callable]]]",
callback,
@@ -2248,7 +2248,7 @@ class PasswordAuthProvider:
# result is always the right type, but as it is 3rd party code it might not be
if not isinstance(result, tuple) or len(result) != 2:
logger.warning( # type: ignore[unreachable]
logger.warning(
"Wrong type returned by module API callback %s: %s, expected"
" Optional[Tuple[str, Optional[Callable]]]",
callback,

View File

@@ -18,11 +18,9 @@
# [This file includes modifications made by New Vector Limited]
#
#
import itertools
import logging
from typing import TYPE_CHECKING, Optional
from synapse.api.constants import Membership
from synapse.api.errors import SynapseError
from synapse.handlers.device import DeviceHandler
from synapse.metrics.background_process_metrics import run_as_background_process
@@ -170,9 +168,9 @@ class DeactivateAccountHandler:
# parts users from rooms (if it isn't already running)
self._start_user_parting()
# Reject all pending invites and knocks for the user, so that the
# user doesn't show up in the "invited" section of rooms' members list.
await self._reject_pending_invites_and_knocks_for_user(user_id)
# Reject all pending invites for the user, so that the user doesn't show up in the
# "invited" section of rooms' members list.
await self._reject_pending_invites_for_user(user_id)
# Remove all information on the user from the account_validity table.
if self._account_validity_enabled:
@@ -196,37 +194,34 @@ class DeactivateAccountHandler:
return identity_server_supports_unbinding
async def _reject_pending_invites_and_knocks_for_user(self, user_id: str) -> None:
"""Reject pending invites and knocks addressed to a given user ID.
async def _reject_pending_invites_for_user(self, user_id: str) -> None:
"""Reject pending invites addressed to a given user ID.
Args:
user_id: The user ID to reject pending invites and knocks for.
user_id: The user ID to reject pending invites for.
"""
user = UserID.from_string(user_id)
pending_invites = await self.store.get_invited_rooms_for_local_user(user_id)
pending_knocks = await self.store.get_knocked_at_rooms_for_local_user(user_id)
for room in itertools.chain(pending_invites, pending_knocks):
for room in pending_invites:
try:
await self._room_member_handler.update_membership(
create_requester(user, authenticated_entity=self._server_name),
user,
room.room_id,
Membership.LEAVE,
"leave",
ratelimit=False,
require_consent=False,
)
logger.info(
"Rejected %r for deactivated user %r in room %r",
room.membership,
"Rejected invite for deactivated user %r in room %r",
user_id,
room.room_id,
)
except Exception:
logger.exception(
"Failed to reject %r for user %r in room %r:"
"Failed to reject invite for user %r in room %r:"
" ignoring and continuing",
room.membership,
user_id,
room.room_id,
)

View File

@@ -429,10 +429,6 @@ class DeviceHandler(DeviceWorkerHandler):
self._storage_controllers = hs.get_storage_controllers()
self.db_pool = hs.get_datastores().main.db_pool
self._dont_notify_new_devices_for = (
hs.config.registration.dont_notify_new_devices_for
)
self.device_list_updater = DeviceListUpdater(hs, self)
federation_registry = hs.get_federation_registry()
@@ -509,9 +505,6 @@ class DeviceHandler(DeviceWorkerHandler):
self._check_device_name_length(initial_device_display_name)
# Check if we should send out device lists updates for this new device.
notify = user_id not in self._dont_notify_new_devices_for
if device_id is not None:
new_device = await self.store.store_device(
user_id=user_id,
@@ -521,8 +514,7 @@ class DeviceHandler(DeviceWorkerHandler):
auth_provider_session_id=auth_provider_session_id,
)
if new_device:
if notify:
await self.notify_device_update(user_id, [device_id])
await self.notify_device_update(user_id, [device_id])
return device_id
# if the device id is not specified, we'll autogen one, but loop a few
@@ -538,8 +530,7 @@ class DeviceHandler(DeviceWorkerHandler):
auth_provider_session_id=auth_provider_session_id,
)
if new_device:
if notify:
await self.notify_device_update(user_id, [new_device_id])
await self.notify_device_update(user_id, [new_device_id])
return new_device_id
attempts += 1

View File

@@ -265,9 +265,9 @@ class DirectoryHandler:
async def get_association(self, room_alias: RoomAlias) -> JsonDict:
room_id = None
if self.hs.is_mine(room_alias):
result: Optional[RoomAliasMapping] = (
await self.get_association_from_room_alias(room_alias)
)
result: Optional[
RoomAliasMapping
] = await self.get_association_from_room_alias(room_alias)
if result:
room_id = result.room_id

View File

@@ -1001,11 +1001,11 @@ class FederationHandler:
)
if include_auth_user_id:
event_content[EventContentFields.AUTHORISING_USER] = (
await self._event_auth_handler.get_user_which_could_invite(
room_id,
state_ids,
)
event_content[
EventContentFields.AUTHORISING_USER
] = await self._event_auth_handler.get_user_which_could_invite(
room_id,
state_ids,
)
builder = self.event_builder_factory.for_room_version(

View File

@@ -1367,9 +1367,9 @@ class FederationEventHandler:
)
if remote_event.is_state() and remote_event.rejected_reason is None:
state_map[(remote_event.type, remote_event.state_key)] = (
remote_event.event_id
)
state_map[
(remote_event.type, remote_event.state_key)
] = remote_event.event_id
return state_map
@@ -1757,25 +1757,17 @@ class FederationEventHandler:
events_and_contexts_to_persist.append((event, context))
for i, event in enumerate(sorted_auth_events):
for event in sorted_auth_events:
await prep(event)
# The above function is typically not async, and so won't yield to
# the reactor. For large rooms let's yield to the reactor
# occasionally to ensure we don't block other work.
if (i + 1) % 1000 == 0:
await self._clock.sleep(0)
# Also persist the new event in batches for similar reasons as above.
for batch in batch_iter(events_and_contexts_to_persist, 1000):
await self.persist_events_and_notify(
room_id,
batch,
# Mark these events as backfilled as they're historic events that will
# eventually be backfilled. For example, missing events we fetch
# during backfill should be marked as backfilled as well.
backfilled=True,
)
await self.persist_events_and_notify(
room_id,
events_and_contexts_to_persist,
# Mark these events backfilled as they're historic events that will
# eventually be backfilled. For example, missing events we fetch
# during backfill should be marked as backfilled as well.
backfilled=True,
)
@trace
async def _check_event_auth(

View File

@@ -34,7 +34,6 @@ from synapse.api.constants import (
EventTypes,
GuestAccess,
HistoryVisibility,
JoinRules,
Membership,
RelationTypes,
UserTypes,
@@ -1326,18 +1325,6 @@ class EventCreationHandler:
self.validator.validate_new(event, self.config)
await self._validate_event_relation(event)
if event.type == EventTypes.CallInvite:
room_id = event.room_id
room_info = await self.store.get_room_with_stats(room_id)
assert room_info is not None
if room_info.join_rules == JoinRules.PUBLIC:
raise SynapseError(
403,
"Call invites are not allowed in public rooms.",
Codes.FORBIDDEN,
)
logger.debug("Created event %s", event.event_id)
return event, context
@@ -1667,9 +1654,9 @@ class EventCreationHandler:
expiry_ms=60 * 60 * 1000,
)
self._external_cache_joined_hosts_updates[state_entry.state_group] = (
None
)
self._external_cache_joined_hosts_updates[
state_entry.state_group
] = None
async def _validate_canonical_alias(
self,

View File

@@ -65,7 +65,6 @@ from synapse.http.server import finish_request
from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
from synapse.module_api import ModuleApi
from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart
from synapse.util import Clock, json_decoder
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
@@ -422,19 +421,9 @@ class OidcProvider:
# from the IdP's jwks_uri, if required.
self._jwks = RetryOnExceptionCachedCall(self._load_jwks)
user_mapping_provider_init_method = (
provider.user_mapping_provider_class.__init__
self._user_mapping_provider = provider.user_mapping_provider_class(
provider.user_mapping_provider_config
)
if len(inspect.signature(user_mapping_provider_init_method).parameters) == 3:
self._user_mapping_provider = provider.user_mapping_provider_class(
provider.user_mapping_provider_config,
ModuleApi(hs, hs.get_auth_handler()),
)
else:
self._user_mapping_provider = provider.user_mapping_provider_class(
provider.user_mapping_provider_config,
)
self._skip_verification = provider.skip_verification
self._allow_existing_users = provider.allow_existing_users
@@ -453,10 +442,6 @@ class OidcProvider:
# optional brand identifier for this auth provider
self.idp_brand = provider.idp_brand
self.additional_authorization_parameters = (
provider.additional_authorization_parameters
)
self._sso_handler = hs.get_sso_handler()
self._device_handler = hs.get_device_handler()
@@ -833,38 +818,14 @@ class OidcProvider:
logger.debug("Using the OAuth2 access_token to request userinfo")
metadata = await self.load_metadata()
resp = await self._http_client.request(
"GET",
resp = await self._http_client.get_json(
metadata["userinfo_endpoint"],
headers=Headers(
{"Authorization": ["Bearer {}".format(token["access_token"])]}
),
headers={"Authorization": ["Bearer {}".format(token["access_token"])]},
)
body = await readBody(resp)
logger.debug("Retrieved user info from userinfo endpoint: %r", resp)
content_type_headers = resp.headers.getRawHeaders("Content-Type")
assert content_type_headers
# We use `startswith` because the header value can contain the `charset` parameter
# even if it is useless, and Twisted doesn't take care of that for us.
if content_type_headers[0].startswith("application/jwt"):
alg_values = metadata.get(
"id_token_signing_alg_values_supported", ["RS256"]
)
jwt = JsonWebToken(alg_values)
jwk_set = await self.load_jwks()
try:
decoded_resp = jwt.decode(body, key=jwk_set)
except ValueError:
logger.info("Reloading JWKS after decode error")
jwk_set = await self.load_jwks(force=True) # try reloading the jwks
decoded_resp = jwt.decode(body, key=jwk_set)
else:
decoded_resp = json_decoder.decode(body.decode("utf-8"))
logger.debug("Retrieved user info from userinfo endpoint: %r", decoded_resp)
return UserInfo(decoded_resp)
return UserInfo(resp)
async def _verify_jwt(
self,
@@ -1010,21 +971,17 @@ class OidcProvider:
metadata = await self.load_metadata()
additional_authorization_parameters = dict(
self.additional_authorization_parameters
)
# Automatically enable PKCE if it is supported.
extra_grant_values = {}
if metadata.get("code_challenge_methods_supported"):
code_verifier = generate_token(48)
# Note that we verified the server supports S256 earlier (in
# OidcProvider._validate_metadata).
additional_authorization_parameters.update(
{
"code_challenge_method": "S256",
"code_challenge": create_s256_code_challenge(code_verifier),
}
)
extra_grant_values = {
"code_challenge_method": "S256",
"code_challenge": create_s256_code_challenge(code_verifier),
}
cookie = self._macaroon_generaton.generate_oidc_session_token(
state=state,
@@ -1063,7 +1020,7 @@ class OidcProvider:
scope=self._scopes,
state=state,
nonce=nonce,
**additional_authorization_parameters,
**extra_grant_values,
)
async def handle_oidc_callback(
@@ -1626,7 +1583,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
This is the default mapping provider.
"""
def __init__(self, config: JinjaOidcMappingConfig, module_api: ModuleApi):
def __init__(self, config: JinjaOidcMappingConfig):
self._config = config
@staticmethod

View File

@@ -493,9 +493,9 @@ class WorkerPresenceHandler(BasePresenceHandler):
# The number of ongoing syncs on this process, by (user ID, device ID).
# Empty if _presence_enabled is false.
self._user_device_to_num_current_syncs: Dict[Tuple[str, Optional[str]], int] = (
{}
)
self._user_device_to_num_current_syncs: Dict[
Tuple[str, Optional[str]], int
] = {}
self.notifier = hs.get_notifier()
self.instance_id = hs.get_instance_id()
@@ -818,9 +818,9 @@ class PresenceHandler(BasePresenceHandler):
# Keeps track of the number of *ongoing* syncs on this process. While
# this is non zero a user will never go offline.
self._user_device_to_num_current_syncs: Dict[Tuple[str, Optional[str]], int] = (
{}
)
self._user_device_to_num_current_syncs: Dict[
Tuple[str, Optional[str]], int
] = {}
# Keeps track of the number of *ongoing* syncs on other processes.
#

View File

@@ -320,9 +320,9 @@ class ProfileHandler:
server_name = host
if self._is_mine_server_name(server_name):
media_info: Optional[Union[LocalMedia, RemoteMedia]] = (
await self.store.get_local_media(media_id)
)
media_info: Optional[
Union[LocalMedia, RemoteMedia]
] = await self.store.get_local_media(media_id)
else:
media_info = await self.store.get_cached_remote_media(server_name, media_id)

View File

@@ -55,12 +55,12 @@ class ReadMarkerHandler:
should_update = True
# Get event ordering, this also ensures we know about the event
event_ordering = await self.store.get_event_ordering(event_id, room_id)
event_ordering = await self.store.get_event_ordering(event_id)
if existing_read_marker:
try:
old_event_ordering = await self.store.get_event_ordering(
existing_read_marker["event_id"], room_id
existing_read_marker["event_id"]
)
except SynapseError:
# Old event no longer exists, assume new is ahead. This may

View File

@@ -188,13 +188,13 @@ class RelationsHandler:
if include_original_event:
# Do not bundle aggregations when retrieving the original event because
# we want the content before relations are applied to it.
return_value["original_event"] = (
await self._event_serializer.serialize_event(
event,
now,
bundle_aggregations=None,
config=serialize_options,
)
return_value[
"original_event"
] = await self._event_serializer.serialize_event(
event,
now,
bundle_aggregations=None,
config=serialize_options,
)
if next_token:

View File

@@ -151,7 +151,7 @@ class RoomCreationHandler:
"history_visibility": HistoryVisibility.SHARED,
"original_invitees_have_ops": False,
"guest_can_join": False,
"power_level_content_override": {EventTypes.CallInvite: 50},
"power_level_content_override": {},
},
}
@@ -538,10 +538,10 @@ class RoomCreationHandler:
# deep-copy the power-levels event before we start modifying it
# note that if frozen_dicts are enabled, `power_levels` will be a frozen
# dict so we can't just copy.deepcopy it.
initial_state[(EventTypes.PowerLevels, "")] = power_levels = (
copy_and_fixup_power_levels_contents(
initial_state[(EventTypes.PowerLevels, "")]
)
initial_state[
(EventTypes.PowerLevels, "")
] = power_levels = copy_and_fixup_power_levels_contents(
initial_state[(EventTypes.PowerLevels, "")]
)
# Resolve the minimum power level required to send any state event
@@ -1362,11 +1362,9 @@ class RoomCreationHandler:
visibility = room_config.get("visibility", "private")
preset_name = room_config.get(
"preset",
(
RoomCreationPreset.PRIVATE_CHAT
if visibility == "private"
else RoomCreationPreset.PUBLIC_CHAT
),
RoomCreationPreset.PRIVATE_CHAT
if visibility == "private"
else RoomCreationPreset.PUBLIC_CHAT,
)
try:
preset_config = self._presets_dict[preset_name]

View File

@@ -51,7 +51,6 @@ from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
from synapse.logging import opentracing
from synapse.metrics import event_processing_positions
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.push import ReplicationCopyPusherRestServlet
from synapse.storage.databases.main.state_deltas import StateDelta
from synapse.types import (
JsonDict,
@@ -182,12 +181,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
hs.config.server.forgotten_room_retention_period
)
self._is_push_writer = (
hs.get_instance_name() in hs.config.worker.writers.push_rules
)
self._push_writer = hs.config.worker.writers.push_rules[0]
self._copy_push_client = ReplicationCopyPusherRestServlet.make_client(hs)
def _on_user_joined_room(self, event_id: str, room_id: str) -> None:
"""Notify the rate limiter that a room join has occurred.
@@ -1243,11 +1236,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# If this is going to be a local join, additional information must
# be included in the event content in order to efficiently validate
# the event.
content[EventContentFields.AUTHORISING_USER] = (
await self.event_auth_handler.get_user_which_could_invite(
room_id,
state_before_join,
)
content[
EventContentFields.AUTHORISING_USER
] = await self.event_auth_handler.get_user_which_could_invite(
room_id,
state_before_join,
)
return False, []
@@ -1308,17 +1301,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
old_room_id, new_room_id, user_id
)
# Copy over push rules
if self._is_push_writer:
await self.store.copy_push_rules_from_room_to_room_for_user(
old_room_id, new_room_id, user_id
)
else:
await self._copy_push_client(
instance_name=self._push_writer,
user_id=user_id,
old_room_id=old_room_id,
new_room_id=new_room_id,
)
await self.store.copy_push_rules_from_room_to_room_for_user(
old_room_id, new_room_id, user_id
)
except Exception:
logger.exception(
"Error copying tags and/or push rules from rooms %s to %s for user %s. "

View File

@@ -150,7 +150,7 @@ class UserAttributes:
display_name: Optional[str] = None
picture: Optional[str] = None
# mypy thinks these are incompatible for some reason.
emails: StrCollection = attr.Factory(list)
emails: StrCollection = attr.Factory(list) # type: ignore[assignment]
@attr.s(slots=True, auto_attribs=True)

View File

@@ -41,7 +41,6 @@ from synapse.api.constants import (
AccountDataTypes,
EventContentFields,
EventTypes,
JoinRules,
Membership,
)
from synapse.api.filtering import FilterCollection
@@ -676,22 +675,13 @@ class SyncHandler:
)
)
filtered_recents = await filter_events_for_client(
loaded_recents = await filter_events_for_client(
self._storage_controllers,
sync_config.user.to_string(),
loaded_recents,
always_include_ids=current_state_ids,
)
loaded_recents = []
for event in filtered_recents:
if event.type == EventTypes.CallInvite:
room_info = await self.store.get_room_with_stats(event.room_id)
assert room_info is not None
if room_info.join_rules == JoinRules.PUBLIC:
continue
loaded_recents.append(event)
log_kv({"loaded_recents_after_client_filtering": len(loaded_recents)})
loaded_recents.extend(recents)
@@ -953,7 +943,7 @@ class SyncHandler:
batch: TimelineBatch,
sync_config: SyncConfig,
since_token: Optional[StreamToken],
end_token: StreamToken,
now_token: StreamToken,
full_state: bool,
) -> MutableStateMap[EventBase]:
"""Works out the difference in state between the end of the previous sync and
@@ -964,9 +954,7 @@ class SyncHandler:
batch: The timeline batch for the room that will be sent to the user.
sync_config:
since_token: Token of the end of the previous batch. May be `None`.
end_token: Token of the end of the current batch. Normally this will be
the same as the global "now_token", but if the user has left the room,
the point just after their leave event.
now_token: Token of the end of the current batch.
full_state: Whether to force returning the full state.
`lazy_load_members` still applies when `full_state` is `True`.
@@ -1026,6 +1014,30 @@ class SyncHandler:
if event.is_state():
timeline_state[(event.type, event.state_key)] = event.event_id
if full_state:
# always make sure we LL ourselves so we know we're in the room
# (if we are) to fix https://github.com/vector-im/riot-web/issues/7209
# We only need apply this on full state syncs given we disabled
# LL for incr syncs in https://github.com/matrix-org/synapse/pull/3840.
# We don't insert ourselves into `members_to_fetch`, because in some
# rare cases (an empty event batch with a now_token after the user's
# leave in a partial state room which another local user has
# joined), the room state will be missing our membership and there
# is no guarantee that our membership will be in the auth events of
# timeline events when the room is partial stated.
state_filter = StateFilter.from_lazy_load_member_list(
members_to_fetch.union((sync_config.user.to_string(),))
)
else:
state_filter = StateFilter.from_lazy_load_member_list(
members_to_fetch
)
# We are happy to use partial state to compute the `/sync` response.
# Since partial state may not include the lazy-loaded memberships we
# require, we fix up the state response afterwards with memberships from
# auth events.
await_full_state = False
else:
timeline_state = {
(event.type, event.state_key): event.event_id
@@ -1033,6 +1045,9 @@ class SyncHandler:
if event.is_state()
}
state_filter = StateFilter.all()
await_full_state = True
# Now calculate the state to return in the sync response for the room.
# This is more or less the change in state between the end of the previous
# sync's timeline and the start of the current sync's timeline.
@@ -1042,29 +1057,132 @@ class SyncHandler:
# whether the room is partial stated *before* fetching it.
is_partial_state_room = await self.store.is_partial_state_room(room_id)
if full_state:
state_ids = await self._compute_state_delta_for_full_sync(
room_id,
sync_config.user,
batch,
end_token,
members_to_fetch,
timeline_state,
if batch:
state_at_timeline_end = (
await self._state_storage_controller.get_state_ids_for_event(
batch.events[-1].event_id,
state_filter=state_filter,
await_full_state=await_full_state,
)
)
state_at_timeline_start = (
await self._state_storage_controller.get_state_ids_for_event(
batch.events[0].event_id,
state_filter=state_filter,
await_full_state=await_full_state,
)
)
else:
state_at_timeline_end = await self.get_state_at(
room_id,
stream_position=now_token,
state_filter=state_filter,
await_full_state=await_full_state,
)
state_at_timeline_start = state_at_timeline_end
state_ids = _calculate_state(
timeline_contains=timeline_state,
timeline_start=state_at_timeline_start,
timeline_end=state_at_timeline_end,
previous_timeline_end={},
lazy_load_members=lazy_load_members,
)
else:
elif batch.limited:
if batch:
state_at_timeline_start = (
await self._state_storage_controller.get_state_ids_for_event(
batch.events[0].event_id,
state_filter=state_filter,
await_full_state=await_full_state,
)
)
else:
# We can get here if the user has ignored the senders of all
# the recent events.
state_at_timeline_start = await self.get_state_at(
room_id,
stream_position=now_token,
state_filter=state_filter,
await_full_state=await_full_state,
)
# for now, we disable LL for gappy syncs - see
# https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
# N.B. this slows down incr syncs as we are now processing way
# more state in the server than if we were LLing.
#
# We still have to filter timeline_start to LL entries (above) in order
# for _calculate_state's LL logic to work, as we have to include LL
# members for timeline senders in case they weren't loaded in the initial
# sync. We do this by (counterintuitively) by filtering timeline_start
# members to just be ones which were timeline senders, which then ensures
# all of the rest get included in the state block (if we need to know
# about them).
state_filter = StateFilter.all()
# If this is an initial sync then full_state should be set, and
# that case is handled above. We assert here to ensure that this
# is indeed the case.
assert since_token is not None
state_ids = await self._compute_state_delta_for_incremental_sync(
state_at_previous_sync = await self.get_state_at(
room_id,
batch,
since_token,
end_token,
members_to_fetch,
timeline_state,
stream_position=since_token,
state_filter=state_filter,
await_full_state=await_full_state,
)
if batch:
state_at_timeline_end = (
await self._state_storage_controller.get_state_ids_for_event(
batch.events[-1].event_id,
state_filter=state_filter,
await_full_state=await_full_state,
)
)
else:
# We can get here if the user has ignored the senders of all
# the recent events.
state_at_timeline_end = await self.get_state_at(
room_id,
stream_position=now_token,
state_filter=state_filter,
await_full_state=await_full_state,
)
state_ids = _calculate_state(
timeline_contains=timeline_state,
timeline_start=state_at_timeline_start,
timeline_end=state_at_timeline_end,
previous_timeline_end=state_at_previous_sync,
# we have to include LL members in case LL initial sync missed them
lazy_load_members=lazy_load_members,
)
else:
state_ids = {}
if lazy_load_members:
if members_to_fetch and batch.events:
# We're returning an incremental sync, with no
# "gap" since the previous sync, so normally there would be
# no state to return.
# But we're lazy-loading, so the client might need some more
# member events to understand the events in this timeline.
# So we fish out all the member events corresponding to the
# timeline here, and then dedupe any redundant ones below.
state_ids = await self._state_storage_controller.get_state_ids_for_event(
batch.events[0].event_id,
# we only want members!
state_filter=StateFilter.from_types(
(EventTypes.Member, member)
for member in members_to_fetch
),
await_full_state=False,
)
# If we only have partial state for the room, `state_ids` may be missing the
# memberships we wanted. We attempt to find some by digging through the auth
# events of timeline events.
@@ -1127,195 +1245,6 @@ class SyncHandler:
if e.type != EventTypes.Aliases # until MSC2261 or alternative solution
}
async def _compute_state_delta_for_full_sync(
self,
room_id: str,
syncing_user: UserID,
batch: TimelineBatch,
end_token: StreamToken,
members_to_fetch: Optional[Set[str]],
timeline_state: StateMap[str],
) -> StateMap[str]:
"""Calculate the state events to be included in a full sync response.
As with `_compute_state_delta_for_incremental_sync`, the result will include
the membership events for the senders of each event in `members_to_fetch`.
Args:
room_id: The room we are calculating for.
syncing_user: The user that is calling `/sync`.
batch: The timeline batch for the room that will be sent to the user.
end_token: Token of the end of the current batch. Normally this will be
the same as the global "now_token", but if the user has left the room,
the point just after their leave event.
members_to_fetch: If lazy-loading is enabled, the memberships needed for
events in the timeline.
timeline_state: The contribution to the room state from state events in
`batch`. Only contains the last event for any given state key.
Returns:
A map from (type, state_key) to event_id, for each event that we believe
should be included in the `state` part of the sync response.
"""
if members_to_fetch is not None:
# Lazy-loading of membership events is enabled.
#
# Always make sure we load our own membership event so we know if
# we're in the room, to fix https://github.com/vector-im/riot-web/issues/7209.
#
# We only need apply this on full state syncs given we disabled
# LL for incr syncs in https://github.com/matrix-org/synapse/pull/3840.
#
# We don't insert ourselves into `members_to_fetch`, because in some
# rare cases (an empty event batch with a now_token after the user's
# leave in a partial state room which another local user has
# joined), the room state will be missing our membership and there
# is no guarantee that our membership will be in the auth events of
# timeline events when the room is partial stated.
state_filter = StateFilter.from_lazy_load_member_list(
members_to_fetch.union((syncing_user.to_string(),))
)
# We are happy to use partial state to compute the `/sync` response.
# Since partial state may not include the lazy-loaded memberships we
# require, we fix up the state response afterwards with memberships from
# auth events.
await_full_state = False
lazy_load_members = True
else:
state_filter = StateFilter.all()
await_full_state = True
lazy_load_members = False
state_at_timeline_end = await self.get_state_at(
room_id,
stream_position=end_token,
state_filter=state_filter,
await_full_state=await_full_state,
)
if batch:
# Strictly speaking, this returns the state *after* the first event in the
# timeline, but that is good enough here.
state_at_timeline_start = (
await self._state_storage_controller.get_state_ids_for_event(
batch.events[0].event_id,
state_filter=state_filter,
await_full_state=await_full_state,
)
)
else:
state_at_timeline_start = state_at_timeline_end
state_ids = _calculate_state(
timeline_contains=timeline_state,
timeline_start=state_at_timeline_start,
timeline_end=state_at_timeline_end,
previous_timeline_end={},
lazy_load_members=lazy_load_members,
)
return state_ids
async def _compute_state_delta_for_incremental_sync(
self,
room_id: str,
batch: TimelineBatch,
since_token: StreamToken,
end_token: StreamToken,
members_to_fetch: Optional[Set[str]],
timeline_state: StateMap[str],
) -> StateMap[str]:
"""Calculate the state events to be included in an incremental sync response.
If lazy-loading of membership events is enabled (as indicated by
`members_to_fetch` being not-`None`), the result will include the membership
events for each member in `members_to_fetch`. The caller
(`compute_state_delta`) is responsible for keeping track of which membership
events we have already sent to the client, and hence ripping them out.
Args:
room_id: The room we are calculating for.
batch: The timeline batch for the room that will be sent to the user.
since_token: Token of the end of the previous batch.
end_token: Token of the end of the current batch. Normally this will be
the same as the global "now_token", but if the user has left the room,
the point just after their leave event.
members_to_fetch: If lazy-loading is enabled, the memberships needed for
events in the timeline. Otherwise, `None`.
timeline_state: The contribution to the room state from state events in
`batch`. Only contains the last event for any given state key.
Returns:
A map from (type, state_key) to event_id, for each event that we believe
should be included in the `state` part of the sync response.
"""
if members_to_fetch is not None:
# Lazy-loading is enabled. Only return the state that is needed.
state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch)
await_full_state = False
lazy_load_members = True
else:
state_filter = StateFilter.all()
await_full_state = True
lazy_load_members = False
if batch:
state_at_timeline_start = (
await self._state_storage_controller.get_state_ids_for_event(
batch.events[0].event_id,
state_filter=state_filter,
await_full_state=await_full_state,
)
)
else:
# We can get here if the user has ignored the senders of all
# the recent events.
state_at_timeline_start = await self.get_state_at(
room_id,
stream_position=end_token,
state_filter=state_filter,
await_full_state=await_full_state,
)
if batch.limited:
# for now, we disable LL for gappy syncs - see
# https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
# N.B. this slows down incr syncs as we are now processing way
# more state in the server than if we were LLing.
#
# We still have to filter timeline_start to LL entries (above) in order
# for _calculate_state's LL logic to work, as we have to include LL
# members for timeline senders in case they weren't loaded in the initial
# sync. We do this by (counterintuitively) by filtering timeline_start
# members to just be ones which were timeline senders, which then ensures
# all of the rest get included in the state block (if we need to know
# about them).
state_filter = StateFilter.all()
state_at_previous_sync = await self.get_state_at(
room_id,
stream_position=since_token,
state_filter=state_filter,
await_full_state=await_full_state,
)
state_at_timeline_end = await self.get_state_at(
room_id,
stream_position=end_token,
state_filter=state_filter,
await_full_state=await_full_state,
)
state_ids = _calculate_state(
timeline_contains=timeline_state,
timeline_start=state_at_timeline_start,
timeline_end=state_at_timeline_end,
previous_timeline_end=state_at_previous_sync,
lazy_load_members=lazy_load_members,
)
return state_ids
async def _find_missing_partial_state_memberships(
self,
room_id: str,
@@ -1404,9 +1333,9 @@ class SyncHandler:
and auth_event.state_key == member
):
missing_members.discard(member)
additional_state_ids[(EventTypes.Member, member)] = (
auth_event.event_id
)
additional_state_ids[
(EventTypes.Member, member)
] = auth_event.event_id
break
if missing_members:
@@ -2314,7 +2243,6 @@ class SyncHandler:
full_state=False,
since_token=since_token,
upto_token=leave_token,
end_token=leave_token,
out_of_band=leave_event.internal_metadata.is_out_of_band_membership(),
)
)
@@ -2352,7 +2280,6 @@ class SyncHandler:
full_state=False,
since_token=None if newly_joined else since_token,
upto_token=prev_batch_token,
end_token=now_token,
)
else:
entry = RoomSyncResultBuilder(
@@ -2363,7 +2290,6 @@ class SyncHandler:
full_state=False,
since_token=since_token,
upto_token=since_token,
end_token=now_token,
)
room_entries.append(entry)
@@ -2422,7 +2348,6 @@ class SyncHandler:
full_state=True,
since_token=since_token,
upto_token=now_token,
end_token=now_token,
)
)
elif event.membership == Membership.INVITE:
@@ -2452,7 +2377,6 @@ class SyncHandler:
full_state=True,
since_token=since_token,
upto_token=leave_token,
end_token=leave_token,
)
)
@@ -2523,7 +2447,6 @@ class SyncHandler:
{
"since_token": since_token,
"upto_token": upto_token,
"end_token": room_builder.end_token,
}
)
@@ -2597,7 +2520,7 @@ class SyncHandler:
batch,
sync_config,
since_token,
room_builder.end_token,
now_token,
full_state=full_state,
)
else:
@@ -2757,61 +2680,6 @@ def _calculate_state(
e for t, e in timeline_start.items() if t[0] == EventTypes.Member
)
# Naively, we would just return the difference between the state at the start
# of the timeline (`timeline_start_ids`) and that at the end of the previous sync
# (`previous_timeline_end_ids`). However, that fails in the presence of forks in
# the DAG.
#
# For example, consider a DAG such as the following:
#
# E1
# ↗ ↖
# | S2
# | ↑
# --|------|----
# | |
# E3 |
# ↖ /
# E4
#
# ... and a filter that means we only return 2 events, represented by the dashed
# horizontal line. Assuming S2 was *not* included in the previous sync, we need to
# include it in the `state` section.
#
# Note that the state at the start of the timeline (E3) does not include S2. So,
# to make sure it gets included in the calculation here, we actually look at
# the state at the *end* of the timeline, and subtract any events that are present
# in the timeline.
#
# ----------
#
# Aside 1: You may then wonder if we need to include `timeline_start` in the
# calculation. Consider a linear DAG:
#
# E1
# ↑
# S2
# ↑
# ----|------
# |
# E3
# ↑
# S4
# ↑
# E5
#
# ... where S2 and S4 change the same piece of state; and where we have a filter
# that returns 3 events (E3, S4, E5). We still need to tell the client about S2,
# because it might affect the display of E3. However, the state at the end of the
# timeline only tells us about S4; if we don't inspect `timeline_start` we won't
# find out about S2.
#
# (There are yet more complicated cases in which a state event is excluded from the
# timeline, but whose effect actually lands in the DAG in the *middle* of the
# timeline. We have no way to represent that in the /sync response, and we don't
# even try; it is ether omitted or plonked into `state` as if it were at the start
# of the timeline, depending on what else is in the timeline.)
state_ids = (
(timeline_end_ids | timeline_start_ids)
- previous_timeline_end_ids
@@ -2878,7 +2746,7 @@ class SyncResultBuilder:
if self.since_token:
for joined_sync in self.joined:
it = itertools.chain(
joined_sync.state.values(), joined_sync.timeline.events
joined_sync.timeline.events, joined_sync.state.values()
)
for event in it:
if event.type == EventTypes.Member:
@@ -2890,20 +2758,13 @@ class SyncResultBuilder:
newly_joined_or_invited_or_knocked_users.add(
event.state_key
)
# If the user left and rejoined in the same batch, they
# count as a newly-joined user, *not* a newly-left user.
newly_left_users.discard(event.state_key)
else:
prev_content = event.unsigned.get("prev_content", {})
prev_membership = prev_content.get("membership", None)
if prev_membership == Membership.JOIN:
newly_left_users.add(event.state_key)
# If the user joined and left in the same batch, they
# count as a newly-left user, not a newly-joined user.
newly_joined_or_invited_or_knocked_users.discard(
event.state_key
)
newly_left_users -= newly_joined_or_invited_or_knocked_users
return newly_joined_or_invited_or_knocked_users, newly_left_users
@@ -2914,30 +2775,13 @@ class RoomSyncResultBuilder:
Attributes:
room_id
rtype: One of `"joined"` or `"archived"`
events: List of events to include in the room (more events may be added
when generating result).
newly_joined: If the user has newly joined the room
full_state: Whether the full state should be sent in result
since_token: Earliest point to return events from, or None
upto_token: Latest point to return events from. If `events` is populated,
this is set to the token at the start of `events`
end_token: The last point in the timeline that the client should see events
from. Normally this will be the same as the global `now_token`, but in
the case of rooms where the user has left the room, this will be the point
just after their leave event.
This is used in the calculation of the state which is returned in `state`:
any state changes *up to* `end_token` (and not beyond!) which are not
reflected in the timeline need to be returned in `state`.
upto_token: Latest point to return events from.
out_of_band: whether the events in the room are "out of band" events
and the server isn't in the room.
"""
@@ -2949,5 +2793,5 @@ class RoomSyncResultBuilder:
full_state: bool
since_token: Optional[StreamToken]
upto_token: StreamToken
end_token: StreamToken
out_of_band: bool = False

View File

@@ -182,15 +182,12 @@ class WorkerLocksHandler:
if not locks:
return
def _wake_all_locks(
locks: Collection[Union[WaitingLock, WaitingMultiLock]]
) -> None:
for lock in locks:
deferred = lock.deferred
if not deferred.called:
deferred.callback(None)
def _wake_deferred(deferred: defer.Deferred) -> None:
if not deferred.called:
deferred.callback(None)
self._clock.call_later(0, _wake_all_locks, locks)
for lock in locks:
self._clock.call_later(0, _wake_deferred, lock.deferred)
@wrap_as_background_process("_cleanup_locks")
async def _cleanup_locks(self) -> None:

View File

@@ -390,13 +390,6 @@ class BaseHttpClient:
cooperator=self._cooperator,
)
# Always make sure we add a user agent to the request
if headers is None:
headers = Headers()
if not headers.hasHeader("User-Agent"):
headers.addRawHeader("User-Agent", self.user_agent)
request_deferred: defer.Deferred = treq.request(
method,
uri,

View File

@@ -931,7 +931,8 @@ class MatrixFederationHttpClient:
try_trailing_slash_on_400: bool = False,
parser: Literal[None] = None,
backoff_on_all_error_codes: bool = False,
) -> JsonDict: ...
) -> JsonDict:
...
@overload
async def put_json(
@@ -948,7 +949,8 @@ class MatrixFederationHttpClient:
try_trailing_slash_on_400: bool = False,
parser: Optional[ByteParser[T]] = None,
backoff_on_all_error_codes: bool = False,
) -> T: ...
) -> T:
...
async def put_json(
self,
@@ -1138,7 +1140,8 @@ class MatrixFederationHttpClient:
ignore_backoff: bool = False,
try_trailing_slash_on_400: bool = False,
parser: Literal[None] = None,
) -> JsonDict: ...
) -> JsonDict:
...
@overload
async def get_json(
@@ -1151,7 +1154,8 @@ class MatrixFederationHttpClient:
ignore_backoff: bool = ...,
try_trailing_slash_on_400: bool = ...,
parser: ByteParser[T] = ...,
) -> T: ...
) -> T:
...
async def get_json(
self,
@@ -1232,7 +1236,8 @@ class MatrixFederationHttpClient:
ignore_backoff: bool = False,
try_trailing_slash_on_400: bool = False,
parser: Literal[None] = None,
) -> Tuple[JsonDict, Dict[bytes, List[bytes]]]: ...
) -> Tuple[JsonDict, Dict[bytes, List[bytes]]]:
...
@overload
async def get_json_with_headers(
@@ -1245,7 +1250,8 @@ class MatrixFederationHttpClient:
ignore_backoff: bool = ...,
try_trailing_slash_on_400: bool = ...,
parser: ByteParser[T] = ...,
) -> Tuple[T, Dict[bytes, List[bytes]]]: ...
) -> Tuple[T, Dict[bytes, List[bytes]]]:
...
async def get_json_with_headers(
self,

View File

@@ -61,17 +61,20 @@ logger = logging.getLogger(__name__)
@overload
def parse_integer(request: Request, name: str, default: int) -> int: ...
def parse_integer(request: Request, name: str, default: int) -> int:
...
@overload
def parse_integer(request: Request, name: str, *, required: Literal[True]) -> int: ...
def parse_integer(request: Request, name: str, *, required: Literal[True]) -> int:
...
@overload
def parse_integer(
request: Request, name: str, default: Optional[int] = None, required: bool = False
) -> Optional[int]: ...
) -> Optional[int]:
...
def parse_integer(
@@ -102,7 +105,8 @@ def parse_integer_from_args(
args: Mapping[bytes, Sequence[bytes]],
name: str,
default: Optional[int] = None,
) -> Optional[int]: ...
) -> Optional[int]:
...
@overload
@@ -111,7 +115,8 @@ def parse_integer_from_args(
name: str,
*,
required: Literal[True],
) -> int: ...
) -> int:
...
@overload
@@ -120,7 +125,8 @@ def parse_integer_from_args(
name: str,
default: Optional[int] = None,
required: bool = False,
) -> Optional[int]: ...
) -> Optional[int]:
...
def parse_integer_from_args(
@@ -166,17 +172,20 @@ def parse_integer_from_args(
@overload
def parse_boolean(request: Request, name: str, default: bool) -> bool: ...
def parse_boolean(request: Request, name: str, default: bool) -> bool:
...
@overload
def parse_boolean(request: Request, name: str, *, required: Literal[True]) -> bool: ...
def parse_boolean(request: Request, name: str, *, required: Literal[True]) -> bool:
...
@overload
def parse_boolean(
request: Request, name: str, default: Optional[bool] = None, required: bool = False
) -> Optional[bool]: ...
) -> Optional[bool]:
...
def parse_boolean(
@@ -207,7 +216,8 @@ def parse_boolean_from_args(
args: Mapping[bytes, Sequence[bytes]],
name: str,
default: bool,
) -> bool: ...
) -> bool:
...
@overload
@@ -216,7 +226,8 @@ def parse_boolean_from_args(
name: str,
*,
required: Literal[True],
) -> bool: ...
) -> bool:
...
@overload
@@ -225,7 +236,8 @@ def parse_boolean_from_args(
name: str,
default: Optional[bool] = None,
required: bool = False,
) -> Optional[bool]: ...
) -> Optional[bool]:
...
def parse_boolean_from_args(
@@ -277,7 +289,8 @@ def parse_bytes_from_args(
args: Mapping[bytes, Sequence[bytes]],
name: str,
default: Optional[bytes] = None,
) -> Optional[bytes]: ...
) -> Optional[bytes]:
...
@overload
@@ -287,7 +300,8 @@ def parse_bytes_from_args(
default: Literal[None] = None,
*,
required: Literal[True],
) -> bytes: ...
) -> bytes:
...
@overload
@@ -296,7 +310,8 @@ def parse_bytes_from_args(
name: str,
default: Optional[bytes] = None,
required: bool = False,
) -> Optional[bytes]: ...
) -> Optional[bytes]:
...
def parse_bytes_from_args(
@@ -340,7 +355,8 @@ def parse_string(
*,
allowed_values: Optional[StrCollection] = None,
encoding: str = "ascii",
) -> str: ...
) -> str:
...
@overload
@@ -351,7 +367,8 @@ def parse_string(
required: Literal[True],
allowed_values: Optional[StrCollection] = None,
encoding: str = "ascii",
) -> str: ...
) -> str:
...
@overload
@@ -363,7 +380,8 @@ def parse_string(
required: bool = False,
allowed_values: Optional[StrCollection] = None,
encoding: str = "ascii",
) -> Optional[str]: ...
) -> Optional[str]:
...
def parse_string(
@@ -419,7 +437,8 @@ def parse_enum(
name: str,
E: Type[EnumT],
default: EnumT,
) -> EnumT: ...
) -> EnumT:
...
@overload
@@ -429,7 +448,8 @@ def parse_enum(
E: Type[EnumT],
*,
required: Literal[True],
) -> EnumT: ...
) -> EnumT:
...
def parse_enum(
@@ -506,7 +526,8 @@ def parse_strings_from_args(
*,
allowed_values: Optional[StrCollection] = None,
encoding: str = "ascii",
) -> Optional[List[str]]: ...
) -> Optional[List[str]]:
...
@overload
@@ -517,7 +538,8 @@ def parse_strings_from_args(
*,
allowed_values: Optional[StrCollection] = None,
encoding: str = "ascii",
) -> List[str]: ...
) -> List[str]:
...
@overload
@@ -528,7 +550,8 @@ def parse_strings_from_args(
required: Literal[True],
allowed_values: Optional[StrCollection] = None,
encoding: str = "ascii",
) -> List[str]: ...
) -> List[str]:
...
@overload
@@ -540,7 +563,8 @@ def parse_strings_from_args(
required: bool = False,
allowed_values: Optional[StrCollection] = None,
encoding: str = "ascii",
) -> Optional[List[str]]: ...
) -> Optional[List[str]]:
...
def parse_strings_from_args(
@@ -601,7 +625,8 @@ def parse_string_from_args(
*,
allowed_values: Optional[StrCollection] = None,
encoding: str = "ascii",
) -> Optional[str]: ...
) -> Optional[str]:
...
@overload
@@ -613,7 +638,8 @@ def parse_string_from_args(
required: Literal[True],
allowed_values: Optional[StrCollection] = None,
encoding: str = "ascii",
) -> str: ...
) -> str:
...
@overload
@@ -624,7 +650,8 @@ def parse_string_from_args(
required: bool = False,
allowed_values: Optional[StrCollection] = None,
encoding: str = "ascii",
) -> Optional[str]: ...
) -> Optional[str]:
...
def parse_string_from_args(
@@ -677,19 +704,22 @@ def parse_string_from_args(
@overload
def parse_json_value_from_request(request: Request) -> JsonDict: ...
def parse_json_value_from_request(request: Request) -> JsonDict:
...
@overload
def parse_json_value_from_request(
request: Request, allow_empty_body: Literal[False]
) -> JsonDict: ...
) -> JsonDict:
...
@overload
def parse_json_value_from_request(
request: Request, allow_empty_body: bool = False
) -> Optional[JsonDict]: ...
) -> Optional[JsonDict]:
...
def parse_json_value_from_request(
@@ -817,6 +847,7 @@ def assert_params_in_dict(body: JsonDict, required: StrCollection) -> None:
class RestServlet:
"""A Synapse REST Servlet.
An implementing class can either provide its own custom 'register' method,

View File

@@ -744,7 +744,8 @@ def preserve_fn(
@overload
def preserve_fn(f: Callable[P, R]) -> Callable[P, "defer.Deferred[R]"]: ...
def preserve_fn(f: Callable[P, R]) -> Callable[P, "defer.Deferred[R]"]:
...
def preserve_fn(
@@ -773,10 +774,15 @@ def run_in_background(
@overload
def run_in_background(
f: Callable[P, R], *args: P.args, **kwargs: P.kwargs
) -> "defer.Deferred[R]": ...
) -> "defer.Deferred[R]":
...
def run_in_background(
def run_in_background( # type: ignore[misc]
# The `type: ignore[misc]` above suppresses
# "Overloaded function implementation does not accept all possible arguments of signature 1"
# "Overloaded function implementation does not accept all possible arguments of signature 2"
# which seems like a bug in mypy.
f: Union[
Callable[P, R],
Callable[P, Awaitable[R]],

View File

@@ -388,13 +388,15 @@ def only_if_tracing(func: Callable[P, R]) -> Callable[P, Optional[R]]:
@overload
def ensure_active_span(
message: str,
) -> Callable[[Callable[P, R]], Callable[P, Optional[R]]]: ...
) -> Callable[[Callable[P, R]], Callable[P, Optional[R]]]:
...
@overload
def ensure_active_span(
message: str, ret: T
) -> Callable[[Callable[P, R]], Callable[P, Union[T, R]]]: ...
) -> Callable[[Callable[P, R]], Callable[P, Union[T, R]]]:
...
def ensure_active_span(

View File

@@ -1002,9 +1002,9 @@ class MediaRepository:
)
t_width = min(m_width, t_width)
t_height = min(m_height, t_height)
thumbnails[(t_width, t_height, requirement.media_type)] = (
requirement.method
)
thumbnails[
(t_width, t_height, requirement.media_type)
] = requirement.method
# Now we generate the thumbnails for each dimension, store it
for (t_width, t_height, t_type), t_method in thumbnails.items():

View File

@@ -256,11 +256,12 @@ def calc_description_and_urls(open_graph_response: JsonDict, html_body: str) ->
parser = etree.HTMLParser(recover=True, encoding="utf-8")
# Attempt to parse the body. If this fails, log and return no metadata.
tree = etree.fromstring(html_body, parser)
# TODO Develop of lxml-stubs has this correct.
tree = etree.fromstring(html_body, parser) # type: ignore[arg-type]
# The data was successfully parsed, but no tree was found.
if tree is None:
return
return # type: ignore[unreachable]
# Attempt to find interesting URLs (images, videos, embeds).
if "og:image" not in open_graph_response:

View File

@@ -160,7 +160,8 @@ def decode_body(
# Attempt to parse the body. Returns None if the body was successfully
# parsed, but no tree was found.
return etree.fromstring(body, parser)
# TODO Develop of lxml-stubs has this correct.
return etree.fromstring(body, parser) # type: ignore[arg-type]
def _get_meta_tags(

View File

@@ -42,12 +42,14 @@ class JemallocStats:
@overload
def _mallctl(
self, name: str, read: Literal[True] = True, write: Optional[int] = None
) -> int: ...
) -> int:
...
@overload
def _mallctl(
self, name: str, read: Literal[False], write: Optional[int] = None
) -> None: ...
) -> None:
...
def _mallctl(
self, name: str, read: bool = True, write: Optional[int] = None

View File

@@ -455,7 +455,7 @@ class SpamCheckerModuleApiCallbacks:
# mypy complains that we can't reach this code because of the
# return type in CHECK_EVENT_FOR_SPAM_CALLBACK, but we don't know
# for sure that the module actually returns it.
logger.warning( # type: ignore[unreachable]
logger.warning(
"Module returned invalid value, rejecting message as spam"
)
res = "This message has been rejected as probable spam"

View File

@@ -366,7 +366,7 @@ class ThirdPartyEventRulesModuleApiCallbacks:
if len(self._check_threepid_can_be_invited_callbacks) == 0:
return True
state_events = await self._storage_controllers.state.get_current_state(room_id)
state_events = await self._get_state_map_for_room(room_id)
for callback in self._check_threepid_can_be_invited_callbacks:
try:
@@ -399,7 +399,7 @@ class ThirdPartyEventRulesModuleApiCallbacks:
if len(self._check_visibility_can_be_modified_callbacks) == 0:
return True
state_events = await self._storage_controllers.state.get_current_state(room_id)
state_events = await self._get_state_map_for_room(room_id)
for callback in self._check_visibility_can_be_modified_callbacks:
try:
@@ -427,13 +427,7 @@ class ThirdPartyEventRulesModuleApiCallbacks:
return
event = await self.store.get_event(event_id)
# We *don't* want to wait for the full state here, because waiting for full
# state will persist event, which in turn will call this method.
# This would end up in a deadlock.
state_events = await self._storage_controllers.state.get_current_state(
event.room_id, await_full_state=False
)
state_events = await self._get_state_map_for_room(event.room_id)
for callback in self._on_new_event_callbacks:
try:
@@ -496,6 +490,17 @@ class ThirdPartyEventRulesModuleApiCallbacks:
)
return True
async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]:
"""Given a room ID, return the state events of that room.
Args:
room_id: The ID of the room.
Returns:
A dict mapping (event type, state key) to state event.
"""
return await self._storage_controllers.state.get_current_state(room_id)
async def on_profile_update(
self, user_id: str, new_profile: ProfileInfo, by_admin: bool, deactivation: bool
) -> None:

View File

@@ -469,7 +469,8 @@ class Notifier:
new_token: RoomStreamToken,
users: Optional[Collection[Union[str, UserID]]] = None,
rooms: Optional[StrCollection] = None,
) -> None: ...
) -> None:
...
@overload
def on_new_event(
@@ -478,7 +479,8 @@ class Notifier:
new_token: MultiWriterStreamToken,
users: Optional[Collection[Union[str, UserID]]] = None,
rooms: Optional[StrCollection] = None,
) -> None: ...
) -> None:
...
@overload
def on_new_event(
@@ -495,7 +497,8 @@ class Notifier:
new_token: int,
users: Optional[Collection[Union[str, UserID]]] = None,
rooms: Optional[StrCollection] = None,
) -> None: ...
) -> None:
...
def on_new_event(
self,

View File

@@ -26,7 +26,6 @@ from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, TypeVar
import bleach
import jinja2
from markupsafe import Markup
from prometheus_client import Counter
from synapse.api.constants import EventTypes, Membership, RoomTypes
from synapse.api.errors import StoreError
@@ -57,12 +56,6 @@ logger = logging.getLogger(__name__)
T = TypeVar("T")
emails_sent_counter = Counter(
"synapse_emails_sent_total",
"Emails sent by type",
["type"],
)
CONTEXT_BEFORE = 1
CONTEXT_AFTER = 1
@@ -137,8 +130,6 @@ class Mailer:
logger.info("Created Mailer for app_name %s" % app_name)
emails_sent_counter.labels("password_reset")
async def send_password_reset_mail(
self, email_address: str, token: str, client_secret: str, sid: str
) -> None:
@@ -162,8 +153,6 @@ class Mailer:
template_vars: TemplateVars = {"link": link}
emails_sent_counter.labels("password_reset").inc()
await self.send_email(
email_address,
self.email_subjects.password_reset
@@ -171,8 +160,6 @@ class Mailer:
template_vars,
)
emails_sent_counter.labels("registration")
async def send_registration_mail(
self, email_address: str, token: str, client_secret: str, sid: str
) -> None:
@@ -196,8 +183,6 @@ class Mailer:
template_vars: TemplateVars = {"link": link}
emails_sent_counter.labels("registration").inc()
await self.send_email(
email_address,
self.email_subjects.email_validation
@@ -205,8 +190,6 @@ class Mailer:
template_vars,
)
emails_sent_counter.labels("add_threepid")
async def send_add_threepid_mail(
self, email_address: str, token: str, client_secret: str, sid: str
) -> None:
@@ -231,8 +214,6 @@ class Mailer:
template_vars: TemplateVars = {"link": link}
emails_sent_counter.labels("add_threepid").inc()
await self.send_email(
email_address,
self.email_subjects.email_validation
@@ -240,8 +221,6 @@ class Mailer:
template_vars,
)
emails_sent_counter.labels("notification")
async def send_notification_mail(
self,
app_id: str,
@@ -336,8 +315,6 @@ class Mailer:
"reason": reason,
}
emails_sent_counter.labels("notification").inc()
await self.send_email(
email_address, summary_text, template_vars, unsubscribe_link
)
@@ -377,14 +354,12 @@ class Mailer:
#
# Note that many email clients will not render the unsubscribe link
# unless DKIM, etc. is properly setup.
additional_headers=(
{
"List-Unsubscribe-Post": "List-Unsubscribe=One-Click",
"List-Unsubscribe": f"<{unsubscribe_link}>",
}
if unsubscribe_link
else None
),
additional_headers={
"List-Unsubscribe-Post": "List-Unsubscribe=One-Click",
"List-Unsubscribe": f"<{unsubscribe_link}>",
}
if unsubscribe_link
else None,
)
async def _get_room_vars(

View File

@@ -29,17 +29,11 @@ from synapse.storage.databases.main import DataStore
async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -> int:
invites = await store.get_invited_rooms_for_local_user(user_id)
joins = await store.get_rooms_for_user(user_id)
badge = len(invites)
room_to_count = await store.get_unread_counts_by_room_for_user(user_id)
for room_id, notify_count in room_to_count.items():
# room_to_count may include rooms which the user has left,
# ignore those.
if room_id not in joins:
continue
for _room_id, notify_count in room_to_count.items():
if notify_count == 0:
continue

View File

@@ -259,9 +259,9 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
url_args.append(txn_id)
if cls.METHOD == "POST":
request_func: Callable[..., Awaitable[Any]] = (
client.post_json_get_json
)
request_func: Callable[
..., Awaitable[Any]
] = client.post_json_get_json
elif cls.METHOD == "PUT":
request_func = client.put_json
elif cls.METHOD == "GET":

View File

@@ -77,46 +77,5 @@ class ReplicationRemovePusherRestServlet(ReplicationEndpoint):
return 200, {}
class ReplicationCopyPusherRestServlet(ReplicationEndpoint):
"""Copies push rules from an old room to new room.
Request format:
POST /_synapse/replication/copy_push_rules/:user_id/:old_room_id/:new_room_id
{}
"""
NAME = "copy_push_rules"
PATH_ARGS = ("user_id", "old_room_id", "new_room_id")
CACHE = False
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self._store = hs.get_datastores().main
@staticmethod
async def _serialize_payload(user_id: str, old_room_id: str, new_room_id: str) -> JsonDict: # type: ignore[override]
return {}
async def _handle_request( # type: ignore[override]
self,
request: Request,
content: JsonDict,
user_id: str,
old_room_id: str,
new_room_id: str,
) -> Tuple[int, JsonDict]:
await self._store.copy_push_rules_from_room_to_room_for_user(
old_room_id, new_room_id, user_id
)
return 200, {}
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReplicationRemovePusherRestServlet(hs).register(http_server)
ReplicationCopyPusherRestServlet(hs).register(http_server)

View File

@@ -70,9 +70,9 @@ class ExternalCache:
def __init__(self, hs: "HomeServer"):
if hs.config.redis.redis_enabled:
self._redis_connection: Optional["ConnectionHandler"] = (
hs.get_outbound_redis_connection()
)
self._redis_connection: Optional[
"ConnectionHandler"
] = hs.get_outbound_redis_connection()
else:
self._redis_connection = None

View File

@@ -66,7 +66,6 @@ from synapse.replication.tcp.streams import (
FederationStream,
PresenceFederationStream,
PresenceStream,
PushRulesStream,
ReceiptsStream,
Stream,
ToDeviceStream,
@@ -179,12 +178,6 @@ class ReplicationCommandHandler:
continue
if isinstance(stream, PushRulesStream):
if hs.get_instance_name() in hs.config.worker.writers.push_rules:
self._streams_to_replicate.append(stream)
continue
# Only add any other streams if we're on master.
if hs.config.worker.worker_app is not None:
continue

View File

@@ -109,7 +109,6 @@ from synapse.rest.admin.users import (
UserReplaceMasterCrossSigningKeyRestServlet,
UserRestServletV2,
UsersRestServletV2,
UsersRestServletV3,
UserTokenRestServlet,
WhoisRestServlet,
)
@@ -237,12 +236,10 @@ class PurgeHistoryStatusRestServlet(RestServlet):
raise NotFoundError("purge id '%s' not found" % purge_id)
result: JsonDict = {
"status": (
purge_task.status
if purge_task.status == TaskStatus.COMPLETE
or purge_task.status == TaskStatus.FAILED
else "active"
),
"status": purge_task.status
if purge_task.status == TaskStatus.COMPLETE
or purge_task.status == TaskStatus.FAILED
else "active",
}
if purge_task.error:
result["error"] = purge_task.error
@@ -292,7 +289,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
UserTokenRestServlet(hs).register(http_server)
UserRestServletV2(hs).register(http_server)
UsersRestServletV2(hs).register(http_server)
UsersRestServletV3(hs).register(http_server)
UserMediaStatisticsRestServlet(hs).register(http_server)
LargestRoomsStatistics(hs).register(http_server)
EventReportDetailRestServlet(hs).register(http_server)

Some files were not shown because too many files have changed in this diff Show More