mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
2 Commits
erikj/devi
...
quenting/r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08f16646a2 | ||
|
|
b86deec62a |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -2,4 +2,4 @@
|
||||
(using a matrix.org account if necessary). We do not use GitHub issues for
|
||||
support.
|
||||
|
||||
**If you want to report a security issue** please see https://element.io/security/security-disclosure-policy
|
||||
**If you want to report a security issue** please see https://matrix.org/security-disclosure-policy/
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -7,7 +7,7 @@ body:
|
||||
**THIS IS NOT A SUPPORT CHANNEL!**
|
||||
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
|
||||
|
||||
If you want to report a security issue, please see https://element.io/security/security-disclosure-policy
|
||||
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
|
||||
|
||||
This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.
|
||||
|
||||
|
||||
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -72,7 +72,7 @@ jobs:
|
||||
|
||||
- name: Build and push all platforms
|
||||
id: build-and-push
|
||||
uses: docker/build-push-action@v6
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
push: true
|
||||
labels: |
|
||||
|
||||
2
.github/workflows/docs-pr-netlify.yaml
vendored
2
.github/workflows/docs-pr-netlify.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@bf251b5aa9c2f7eeb574a96ee720e24f801b7c11 # v6
|
||||
uses: dawidd6/action-download-artifact@deb3bb83256a78589fef6a7b942e5f2573ad7c13 # v5
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
|
||||
8
.github/workflows/release-artifacts.yml
vendored
8
.github/workflows/release-artifacts.yml
vendored
@@ -102,7 +102,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-12]
|
||||
os: [ubuntu-20.04, macos-11]
|
||||
arch: [x86_64, aarch64]
|
||||
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
||||
# It is not read by the rest of the workflow.
|
||||
@@ -112,9 +112,9 @@ jobs:
|
||||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-12"
|
||||
os: "macos-11"
|
||||
# Don't build aarch64 wheels on mac.
|
||||
- os: "macos-12"
|
||||
- os: "macos-11"
|
||||
arch: aarch64
|
||||
# Don't build aarch64 wheels on PR CI.
|
||||
- is_pr: true
|
||||
@@ -130,7 +130,7 @@ jobs:
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: python -m pip install cibuildwheel==2.19.1
|
||||
run: python -m pip install cibuildwheel==2.16.2
|
||||
|
||||
- name: Set up QEMU to emulate aarch64
|
||||
if: matrix.arch == 'aarch64'
|
||||
|
||||
3
.github/workflows/tests.yml
vendored
3
.github/workflows/tests.yml
vendored
@@ -479,9 +479,6 @@ jobs:
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
# If this is a pull request to a release branch, use that branch as default branch for sytest, else use develop
|
||||
# This works because the release script always create a branch on the sytest repo with the same name as the release branch
|
||||
SYTEST_DEFAULT_BRANCH: ${{ startsWith(github.base_ref, 'release-') && github.base_ref || 'develop' }}
|
||||
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.job.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') || '' }}
|
||||
|
||||
22
CHANGES.md
22
CHANGES.md
@@ -1,25 +1,3 @@
|
||||
# Synapse 1.109.0 (2024-06-18)
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Fix the building of binary wheels for macOS by switching to macOS 12 CI runners. ([\#17319](https://github.com/element-hq/synapse/issues/17319))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.109.0rc3 (2024-06-17)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- When rolling back to a previous Synapse version and then forwards again to this release, don't require server operators to manually run SQL. ([\#17305](https://github.com/element-hq/synapse/issues/17305), [\#17309](https://github.com/element-hq/synapse/issues/17309))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Use the release branch for sytest in release-branch PRs. ([\#17306](https://github.com/element-hq/synapse/issues/17306))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.109.0rc2 (2024-06-11)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
4
Cargo.lock
generated
4
Cargo.lock
generated
@@ -212,9 +212,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.5.0"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
|
||||
71
README.rst
71
README.rst
@@ -1,34 +1,21 @@
|
||||
.. image:: https://github.com/element-hq/product/assets/87339233/7abf477a-5277-47f3-be44-ea44917d8ed7
|
||||
:height: 60px
|
||||
=========================================================================
|
||||
Synapse |support| |development| |documentation| |license| |pypi| |python|
|
||||
=========================================================================
|
||||
|
||||
**Element Synapse - Matrix homeserver implementation**
|
||||
Synapse is an open-source `Matrix <https://matrix.org/>`_ homeserver written and
|
||||
maintained by the Matrix.org Foundation. We began rapid development in 2014,
|
||||
reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues
|
||||
in earnest today.
|
||||
|
||||
|support| |development| |documentation| |license| |pypi| |python|
|
||||
|
||||
Synapse is an open source `Matrix <https://matrix.org>`_ homeserver
|
||||
implementation, written and maintained by `Element <https://element.io>`_.
|
||||
`Matrix <https://github.com/matrix-org>`_ is the open standard for
|
||||
secure and interoperable real time communications. You can directly run
|
||||
and manage the source code in this repository, available under an AGPL
|
||||
license. There is no support provided from Element unless you have a
|
||||
subscription.
|
||||
|
||||
Subscription alternative
|
||||
========================
|
||||
|
||||
Alternatively, for those that need an enterprise-ready solution, Element
|
||||
Server Suite (ESS) is `available as a subscription <https://element.io/pricing>`_.
|
||||
ESS builds on Synapse to offer a complete Matrix-based backend including the full
|
||||
`Admin Console product <https://element.io/enterprise-functionality/admin-console>`_,
|
||||
giving admins the power to easily manage an organization-wide
|
||||
deployment. It includes advanced identity management, auditing,
|
||||
moderation and data retention options as well as Long Term Support and
|
||||
SLAs. ESS can be used to support any Matrix-based frontend client.
|
||||
Briefly, Matrix is an open standard for communications on the internet, supporting
|
||||
federation, encryption and VoIP. Matrix.org has more to say about the `goals of the
|
||||
Matrix project <https://matrix.org/docs/guides/introduction>`_, and the `formal specification
|
||||
<https://spec.matrix.org/>`_ describes the technical details.
|
||||
|
||||
.. contents::
|
||||
|
||||
🛠️ Installing and configuration
|
||||
===============================
|
||||
Installing and configuration
|
||||
============================
|
||||
|
||||
The Synapse documentation describes `how to install Synapse <https://element-hq.github.io/synapse/latest/setup/installation.html>`_. We recommend using
|
||||
`Docker images <https://element-hq.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org
|
||||
@@ -118,8 +105,8 @@ Following this advice ensures that even if an XSS is found in Synapse, the
|
||||
impact to other applications will be minimal.
|
||||
|
||||
|
||||
🧪 Testing a new installation
|
||||
============================
|
||||
Testing a new installation
|
||||
==========================
|
||||
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client.
|
||||
@@ -172,20 +159,8 @@ the form of::
|
||||
As when logging in, you will need to specify a "Custom server". Specify your
|
||||
desired ``localpart`` in the 'User name' box.
|
||||
|
||||
🎯 Troubleshooting and support
|
||||
=============================
|
||||
|
||||
🚀 Professional support
|
||||
----------------------
|
||||
|
||||
Enterprise quality support for Synapse including SLAs is available as part of an
|
||||
`Element Server Suite (ESS) <https://element.io/pricing>` subscription.
|
||||
|
||||
If you are an existing ESS subscriber then you can raise a `support request <https://ems.element.io/support>`
|
||||
and access the `knowledge base <https://ems-docs.element.io>`.
|
||||
|
||||
🤝 Community support
|
||||
-------------------
|
||||
Troubleshooting and support
|
||||
===========================
|
||||
|
||||
The `Admin FAQ <https://element-hq.github.io/synapse/latest/usage/administration/admin_faq.html>`_
|
||||
includes tips on dealing with some common problems. For more details, see
|
||||
@@ -201,8 +176,8 @@ issues for support requests, only for bug reports and feature requests.
|
||||
.. |docs| replace:: ``docs``
|
||||
.. _docs: docs
|
||||
|
||||
🪪 Identity Servers
|
||||
==================
|
||||
Identity Servers
|
||||
================
|
||||
|
||||
Identity servers have the job of mapping email addresses and other 3rd Party
|
||||
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
|
||||
@@ -231,8 +206,8 @@ an email address with your account, or send an invite to another user via their
|
||||
email address.
|
||||
|
||||
|
||||
🛠️ Development
|
||||
==============
|
||||
Development
|
||||
===========
|
||||
|
||||
We welcome contributions to Synapse from the community!
|
||||
The best place to get started is our
|
||||
@@ -250,8 +225,8 @@ Alongside all that, join our developer community on Matrix:
|
||||
`#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans!
|
||||
|
||||
|
||||
.. |support| image:: https://img.shields.io/badge/matrix-community%20support-success
|
||||
:alt: (get community support in #synapse:matrix.org)
|
||||
.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
|
||||
:alt: (get support on #synapse:matrix.org)
|
||||
:target: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
.. |development| image:: https://img.shields.io/matrix/synapse-dev:matrix.org?label=development&logo=matrix
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
|
||||
by adding a federation /download endpoint.
|
||||
by adding a federation /download endpoint (#17172).
|
||||
@@ -1 +0,0 @@
|
||||
Remove unused `expire_access_token` option in the Synapse Docker config file. Contributed by @AaronDewes.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for [MSC823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension.
|
||||
@@ -1 +0,0 @@
|
||||
Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api).
|
||||
@@ -1 +0,0 @@
|
||||
Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
@@ -1 +0,0 @@
|
||||
Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
@@ -1 +0,0 @@
|
||||
Include user membership in events served to clients, per MSC4115.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long-standing bug where an invalid 'from' parameter to [`/notifications`](https://spec.matrix.org/v1.10/client-server-api/#get_matrixclientv3notifications) would result in an Internal Server Error.
|
||||
@@ -1 +0,0 @@
|
||||
Do not require user-interactive authentication for uploading cross-signing keys for the first time, per MSC3967.
|
||||
@@ -1 +0,0 @@
|
||||
Add `stream_ordering` sort to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
@@ -1,2 +0,0 @@
|
||||
`register_new_matrix_user` now supports a --password-file flag, which
|
||||
is useful for scripting.
|
||||
@@ -1 +0,0 @@
|
||||
Fix edge case in `/sync` returning the wrong the state when using sharded event persisters.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API.
|
||||
@@ -1 +0,0 @@
|
||||
Bump `mypy` from 1.8.0 to 1.9.0.
|
||||
@@ -1 +0,0 @@
|
||||
Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`.
|
||||
@@ -1 +0,0 @@
|
||||
Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
@@ -1,2 +0,0 @@
|
||||
`register_new_matrix_user` now supports a --exists-ok flag to allow registration of users that already exist in the database.
|
||||
This is useful for scripts that bootstrap user accounts with initial passwords.
|
||||
@@ -1 +0,0 @@
|
||||
Add missing quotes for example for `exclude_rooms_from_sync`.
|
||||
1
changelog.d/17318.misc
Normal file
1
changelog.d/17318.misc
Normal file
@@ -0,0 +1 @@
|
||||
Make the release script create a release branch for Complement as well.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for via query parameter from MSC415.
|
||||
@@ -1 +0,0 @@
|
||||
Update the README with Element branding, improve headers and fix the #synapse:matrix.org support room link rendering.
|
||||
@@ -1 +0,0 @@
|
||||
This is a changelog so tests will run.
|
||||
@@ -1 +0,0 @@
|
||||
Update header in the README to visually fix the the auto-generated table of contents.
|
||||
@@ -1 +0,0 @@
|
||||
Change path of the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync implementation to `/org.matrix.simplified_msc3575/sync` since our simplified API is slightly incompatible with what's in the current MSC.
|
||||
@@ -1 +0,0 @@
|
||||
Handle device lists notifications for large accounts more efficiently in worker mode.
|
||||
@@ -1 +0,0 @@
|
||||
Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
@@ -1 +0,0 @@
|
||||
Fix email notification subject when invited to a space.
|
||||
@@ -1 +0,0 @@
|
||||
Do not block event sending/receiving while calculating large event auth chains.
|
||||
@@ -1 +0,0 @@
|
||||
Tidy up `parse_integer` docs and call sites to reflect the fact that they require non-negative integers by default, and bring `parse_integer_from_args` default in alignment. Contributed by Denis Kasak (@dkasak).
|
||||
@@ -1 +0,0 @@
|
||||
Fix stale references to the Foundation's Security Disclosure Policy.
|
||||
@@ -1 +0,0 @@
|
||||
Add default values for `rc_invites.per_issuer` to docs.
|
||||
@@ -1 +0,0 @@
|
||||
Fix an error in the docs for `search_all_users` parameter under `user_directory`.
|
||||
@@ -1 +0,0 @@
|
||||
Handle device lists notifications for large accounts more efficiently in worker mode.
|
||||
@@ -1 +0,0 @@
|
||||
Fix rare race which causes no new to-device messages to be received from remote server.
|
||||
18
debian/changelog
vendored
18
debian/changelog
vendored
@@ -1,21 +1,3 @@
|
||||
matrix-synapse-py3 (1.109.0+nmu1) UNRELEASED; urgency=medium
|
||||
|
||||
* `register_new_matrix_user` now supports a --password-file and a --exists-ok flag.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jun 2024 13:29:36 +0100
|
||||
|
||||
matrix-synapse-py3 (1.109.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.109.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jun 2024 09:45:15 +0000
|
||||
|
||||
matrix-synapse-py3 (1.109.0~rc3) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.109.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Jun 2024 12:05:24 +0000
|
||||
|
||||
matrix-synapse-py3 (1.109.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.109.0rc2.
|
||||
|
||||
11
debian/register_new_matrix_user.ronn
vendored
11
debian/register_new_matrix_user.ronn
vendored
@@ -31,12 +31,8 @@ A sample YAML file accepted by `register_new_matrix_user` is described below:
|
||||
Local part of the new user. Will prompt if omitted.
|
||||
|
||||
* `-p`, `--password`:
|
||||
New password for user. Will prompt if this option and `--password-file` are omitted.
|
||||
Supplying the password on the command line is not recommended.
|
||||
|
||||
* `--password-file`:
|
||||
File containing the new password for user. If set, overrides `--password`.
|
||||
This is a more secure alternative to specifying the password on the command line.
|
||||
New password for user. Will prompt if omitted. Supplying the password
|
||||
on the command line is not recommended. Use the STDIN instead.
|
||||
|
||||
* `-a`, `--admin`:
|
||||
Register new user as an admin. Will prompt if omitted.
|
||||
@@ -48,9 +44,6 @@ A sample YAML file accepted by `register_new_matrix_user` is described below:
|
||||
Shared secret as defined in server config file. This is an optional
|
||||
parameter as it can be also supplied via the YAML file.
|
||||
|
||||
* `--exists-ok`:
|
||||
Do not fail if the user already exists. The user account will be not updated in this case.
|
||||
|
||||
* `server_url`:
|
||||
URL of the home server. Defaults to 'https://localhost:8448'.
|
||||
|
||||
|
||||
@@ -105,6 +105,8 @@ experimental_features:
|
||||
# Expose a room summary for public rooms
|
||||
msc3266_enabled: true
|
||||
|
||||
msc4115_membership_on_events: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
system_mxid_display_name: "Server Alert"
|
||||
|
||||
@@ -176,6 +176,7 @@ app_service_config_files:
|
||||
{% endif %}
|
||||
|
||||
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
||||
expire_access_token: False
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
|
||||
@@ -36,10 +36,6 @@ The following query parameters are available:
|
||||
- the room's name,
|
||||
- the local part of the room's canonical alias, or
|
||||
- the complete (local and server part) room's id (case sensitive).
|
||||
* `public_rooms` - Optional flag to filter public rooms. If `true`, only public rooms are queried. If `false`, public rooms are excluded from
|
||||
the query. When the flag is absent (the default), **both** public and non-public rooms are included in the search results.
|
||||
* `empty_rooms` - Optional flag to filter empty rooms. A room is empty if joined_members is zero. If `true`, only empty rooms are queried. If `false`, empty rooms are excluded from
|
||||
the query. When the flag is absent (the default), **both** empty and non-empty rooms are included in the search results.
|
||||
|
||||
Defaults to no filtering.
|
||||
|
||||
|
||||
@@ -255,3 +255,13 @@ however extreme care must be taken to avoid database corruption.
|
||||
|
||||
Note that the above may fail with an error about duplicate rows if corruption
|
||||
has already occurred, and such duplicate rows will need to be manually removed.
|
||||
|
||||
### Fixing inconsistent sequences error
|
||||
|
||||
Synapse uses Postgres sequences to generate IDs for various tables. A sequence
|
||||
and associated table can get out of sync if, for example, Synapse has been
|
||||
downgraded and then upgraded again.
|
||||
|
||||
To fix the issue shut down Synapse (including any and all workers) and run the
|
||||
SQL command included in the error message. Once done Synapse should start
|
||||
successfully.
|
||||
|
||||
@@ -1759,9 +1759,8 @@ rc_3pid_validation:
|
||||
### `rc_invites`
|
||||
|
||||
This option sets ratelimiting how often invites can be sent in a room or to a
|
||||
specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10`,
|
||||
`per_user` defaults to `per_second: 0.003`, `burst_count: 5`, and `per_issuer`
|
||||
defaults to `per_second: 0.3`, `burst_count: 10`.
|
||||
specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and
|
||||
`per_user` defaults to `per_second: 0.003`, `burst_count: 5`.
|
||||
|
||||
Client requests that invite user(s) when [creating a
|
||||
room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom)
|
||||
@@ -2719,7 +2718,7 @@ Example configuration:
|
||||
session_lifetime: 24h
|
||||
```
|
||||
---
|
||||
### `refreshable_access_token_lifetime`
|
||||
### `refresh_access_token_lifetime`
|
||||
|
||||
Time that an access token remains valid for, if the session is using refresh tokens.
|
||||
|
||||
@@ -3807,8 +3806,7 @@ This setting defines options related to the user directory.
|
||||
This option has the following sub-options:
|
||||
* `enabled`: Defines whether users can search the user directory. If false then
|
||||
empty responses are returned to all queries. Defaults to true.
|
||||
* `search_all_users`: Defines whether to search all users visible to your homeserver at the time the search is performed.
|
||||
If set to true, will return all users known to the homeserver matching the search query.
|
||||
* `search_all_users`: Defines whether to search all users visible to your HS at the time the search is performed. If set to true, will return all users who share a room with the user from the homeserver.
|
||||
If false, search results will only contain users
|
||||
visible in public rooms and users sharing a room with the requester.
|
||||
Defaults to false.
|
||||
@@ -4152,7 +4150,7 @@ By default, no room is excluded.
|
||||
Example configuration:
|
||||
```yaml
|
||||
exclude_rooms_from_sync:
|
||||
- "!foo:example.com"
|
||||
- !foo:example.com
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -62,6 +62,6 @@ following documentation:
|
||||
|
||||
## Reporting a security vulnerability
|
||||
|
||||
If you've found a security issue in Synapse or any other Element project,
|
||||
please report it to us in accordance with our [Security Disclosure
|
||||
Policy](https://element.io/security/security-disclosure-policy). Thank you!
|
||||
If you've found a security issue in Synapse or any other Matrix.org Foundation
|
||||
project, please report it to us in accordance with our [Security Disclosure
|
||||
Policy](https://www.matrix.org/security-disclosure-policy/). Thank you!
|
||||
|
||||
266
poetry.lock
generated
266
poetry.lock
generated
@@ -35,13 +35,13 @@ tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "p
|
||||
|
||||
[[package]]
|
||||
name = "authlib"
|
||||
version = "1.3.1"
|
||||
version = "1.3.0"
|
||||
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "Authlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:d35800b973099bbadc49b42b256ecb80041ad56b7fe1216a362c7943c088f377"},
|
||||
{file = "authlib-1.3.1.tar.gz", hash = "sha256:7ae843f03c06c5c0debd63c9db91f9fda64fa62a42a77419fa15fbb7e7a58917"},
|
||||
{file = "Authlib-1.3.0-py2.py3-none-any.whl", hash = "sha256:9637e4de1fb498310a56900b3e2043a206b03cb11c05422014b0302cbc814be3"},
|
||||
{file = "Authlib-1.3.0.tar.gz", hash = "sha256:959ea62a5b7b5123c5059758296122b57cd2585ae2ed1c0622c21b371ffdae06"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1319,103 +1319,103 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "msgpack"
|
||||
version = "1.0.8"
|
||||
version = "1.0.7"
|
||||
description = "MessagePack serializer"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "msgpack-1.0.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:505fe3d03856ac7d215dbe005414bc28505d26f0c128906037e66d98c4e95868"},
|
||||
{file = "msgpack-1.0.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6b7842518a63a9f17107eb176320960ec095a8ee3b4420b5f688e24bf50c53c"},
|
||||
{file = "msgpack-1.0.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:376081f471a2ef24828b83a641a02c575d6103a3ad7fd7dade5486cad10ea659"},
|
||||
{file = "msgpack-1.0.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e390971d082dba073c05dbd56322427d3280b7cc8b53484c9377adfbae67dc2"},
|
||||
{file = "msgpack-1.0.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e073efcba9ea99db5acef3959efa45b52bc67b61b00823d2a1a6944bf45982"},
|
||||
{file = "msgpack-1.0.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82d92c773fbc6942a7a8b520d22c11cfc8fd83bba86116bfcf962c2f5c2ecdaa"},
|
||||
{file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9ee32dcb8e531adae1f1ca568822e9b3a738369b3b686d1477cbc643c4a9c128"},
|
||||
{file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e3aa7e51d738e0ec0afbed661261513b38b3014754c9459508399baf14ae0c9d"},
|
||||
{file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69284049d07fce531c17404fcba2bb1df472bc2dcdac642ae71a2d079d950653"},
|
||||
{file = "msgpack-1.0.8-cp310-cp310-win32.whl", hash = "sha256:13577ec9e247f8741c84d06b9ece5f654920d8365a4b636ce0e44f15e07ec693"},
|
||||
{file = "msgpack-1.0.8-cp310-cp310-win_amd64.whl", hash = "sha256:e532dbd6ddfe13946de050d7474e3f5fb6ec774fbb1a188aaf469b08cf04189a"},
|
||||
{file = "msgpack-1.0.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9517004e21664f2b5a5fd6333b0731b9cf0817403a941b393d89a2f1dc2bd836"},
|
||||
{file = "msgpack-1.0.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d16a786905034e7e34098634b184a7d81f91d4c3d246edc6bd7aefb2fd8ea6ad"},
|
||||
{file = "msgpack-1.0.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2872993e209f7ed04d963e4b4fbae72d034844ec66bc4ca403329db2074377b"},
|
||||
{file = "msgpack-1.0.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c330eace3dd100bdb54b5653b966de7f51c26ec4a7d4e87132d9b4f738220ba"},
|
||||
{file = "msgpack-1.0.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b5c044f3eff2a6534768ccfd50425939e7a8b5cf9a7261c385de1e20dcfc85"},
|
||||
{file = "msgpack-1.0.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1876b0b653a808fcd50123b953af170c535027bf1d053b59790eebb0aeb38950"},
|
||||
{file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dfe1f0f0ed5785c187144c46a292b8c34c1295c01da12e10ccddfc16def4448a"},
|
||||
{file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3528807cbbb7f315bb81959d5961855e7ba52aa60a3097151cb21956fbc7502b"},
|
||||
{file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e2f879ab92ce502a1e65fce390eab619774dda6a6ff719718069ac94084098ce"},
|
||||
{file = "msgpack-1.0.8-cp311-cp311-win32.whl", hash = "sha256:26ee97a8261e6e35885c2ecd2fd4a6d38252246f94a2aec23665a4e66d066305"},
|
||||
{file = "msgpack-1.0.8-cp311-cp311-win_amd64.whl", hash = "sha256:eadb9f826c138e6cf3c49d6f8de88225a3c0ab181a9b4ba792e006e5292d150e"},
|
||||
{file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:114be227f5213ef8b215c22dde19532f5da9652e56e8ce969bf0a26d7c419fee"},
|
||||
{file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d661dc4785affa9d0edfdd1e59ec056a58b3dbb9f196fa43587f3ddac654ac7b"},
|
||||
{file = "msgpack-1.0.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d56fd9f1f1cdc8227d7b7918f55091349741904d9520c65f0139a9755952c9e8"},
|
||||
{file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0726c282d188e204281ebd8de31724b7d749adebc086873a59efb8cf7ae27df3"},
|
||||
{file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8db8e423192303ed77cff4dce3a4b88dbfaf43979d280181558af5e2c3c71afc"},
|
||||
{file = "msgpack-1.0.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99881222f4a8c2f641f25703963a5cefb076adffd959e0558dc9f803a52d6a58"},
|
||||
{file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b5505774ea2a73a86ea176e8a9a4a7c8bf5d521050f0f6f8426afe798689243f"},
|
||||
{file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ef254a06bcea461e65ff0373d8a0dd1ed3aa004af48839f002a0c994a6f72d04"},
|
||||
{file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e1dd7839443592d00e96db831eddb4111a2a81a46b028f0facd60a09ebbdd543"},
|
||||
{file = "msgpack-1.0.8-cp312-cp312-win32.whl", hash = "sha256:64d0fcd436c5683fdd7c907eeae5e2cbb5eb872fafbc03a43609d7941840995c"},
|
||||
{file = "msgpack-1.0.8-cp312-cp312-win_amd64.whl", hash = "sha256:74398a4cf19de42e1498368c36eed45d9528f5fd0155241e82c4082b7e16cffd"},
|
||||
{file = "msgpack-1.0.8-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0ceea77719d45c839fd73abcb190b8390412a890df2f83fb8cf49b2a4b5c2f40"},
|
||||
{file = "msgpack-1.0.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ab0bbcd4d1f7b6991ee7c753655b481c50084294218de69365f8f1970d4c151"},
|
||||
{file = "msgpack-1.0.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1cce488457370ffd1f953846f82323cb6b2ad2190987cd4d70b2713e17268d24"},
|
||||
{file = "msgpack-1.0.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3923a1778f7e5ef31865893fdca12a8d7dc03a44b33e2a5f3295416314c09f5d"},
|
||||
{file = "msgpack-1.0.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22e47578b30a3e199ab067a4d43d790249b3c0587d9a771921f86250c8435db"},
|
||||
{file = "msgpack-1.0.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd739c9251d01e0279ce729e37b39d49a08c0420d3fee7f2a4968c0576678f77"},
|
||||
{file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d3420522057ebab1728b21ad473aa950026d07cb09da41103f8e597dfbfaeb13"},
|
||||
{file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5845fdf5e5d5b78a49b826fcdc0eb2e2aa7191980e3d2cfd2a30303a74f212e2"},
|
||||
{file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6a0e76621f6e1f908ae52860bdcb58e1ca85231a9b0545e64509c931dd34275a"},
|
||||
{file = "msgpack-1.0.8-cp38-cp38-win32.whl", hash = "sha256:374a8e88ddab84b9ada695d255679fb99c53513c0a51778796fcf0944d6c789c"},
|
||||
{file = "msgpack-1.0.8-cp38-cp38-win_amd64.whl", hash = "sha256:f3709997b228685fe53e8c433e2df9f0cdb5f4542bd5114ed17ac3c0129b0480"},
|
||||
{file = "msgpack-1.0.8-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f51bab98d52739c50c56658cc303f190785f9a2cd97b823357e7aeae54c8f68a"},
|
||||
{file = "msgpack-1.0.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:73ee792784d48aa338bba28063e19a27e8d989344f34aad14ea6e1b9bd83f596"},
|
||||
{file = "msgpack-1.0.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f9904e24646570539a8950400602d66d2b2c492b9010ea7e965025cb71d0c86d"},
|
||||
{file = "msgpack-1.0.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e75753aeda0ddc4c28dce4c32ba2f6ec30b1b02f6c0b14e547841ba5b24f753f"},
|
||||
{file = "msgpack-1.0.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dbf059fb4b7c240c873c1245ee112505be27497e90f7c6591261c7d3c3a8228"},
|
||||
{file = "msgpack-1.0.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4916727e31c28be8beaf11cf117d6f6f188dcc36daae4e851fee88646f5b6b18"},
|
||||
{file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7938111ed1358f536daf311be244f34df7bf3cdedb3ed883787aca97778b28d8"},
|
||||
{file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:493c5c5e44b06d6c9268ce21b302c9ca055c1fd3484c25ba41d34476c76ee746"},
|
||||
{file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fbb160554e319f7b22ecf530a80a3ff496d38e8e07ae763b9e82fadfe96f273"},
|
||||
{file = "msgpack-1.0.8-cp39-cp39-win32.whl", hash = "sha256:f9af38a89b6a5c04b7d18c492c8ccf2aee7048aff1ce8437c4683bb5a1df893d"},
|
||||
{file = "msgpack-1.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:ed59dd52075f8fc91da6053b12e8c89e37aa043f8986efd89e61fae69dc1b011"},
|
||||
{file = "msgpack-1.0.8.tar.gz", hash = "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3"},
|
||||
{file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862"},
|
||||
{file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329"},
|
||||
{file = "msgpack-1.0.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b"},
|
||||
{file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6"},
|
||||
{file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee"},
|
||||
{file = "msgpack-1.0.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d"},
|
||||
{file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d"},
|
||||
{file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1"},
|
||||
{file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681"},
|
||||
{file = "msgpack-1.0.7-cp310-cp310-win32.whl", hash = "sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9"},
|
||||
{file = "msgpack-1.0.7-cp310-cp310-win_amd64.whl", hash = "sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415"},
|
||||
{file = "msgpack-1.0.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:576eb384292b139821c41995523654ad82d1916da6a60cff129c715a6223ea84"},
|
||||
{file = "msgpack-1.0.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:730076207cb816138cf1af7f7237b208340a2c5e749707457d70705715c93b93"},
|
||||
{file = "msgpack-1.0.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:85765fdf4b27eb5086f05ac0491090fc76f4f2b28e09d9350c31aac25a5aaff8"},
|
||||
{file = "msgpack-1.0.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3476fae43db72bd11f29a5147ae2f3cb22e2f1a91d575ef130d2bf49afd21c46"},
|
||||
{file = "msgpack-1.0.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d4c80667de2e36970ebf74f42d1088cc9ee7ef5f4e8c35eee1b40eafd33ca5b"},
|
||||
{file = "msgpack-1.0.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b0bf0effb196ed76b7ad883848143427a73c355ae8e569fa538365064188b8e"},
|
||||
{file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f9a7c509542db4eceed3dcf21ee5267ab565a83555c9b88a8109dcecc4709002"},
|
||||
{file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:84b0daf226913133f899ea9b30618722d45feffa67e4fe867b0b5ae83a34060c"},
|
||||
{file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ec79ff6159dffcc30853b2ad612ed572af86c92b5168aa3fc01a67b0fa40665e"},
|
||||
{file = "msgpack-1.0.7-cp311-cp311-win32.whl", hash = "sha256:3e7bf4442b310ff154b7bb9d81eb2c016b7d597e364f97d72b1acc3817a0fdc1"},
|
||||
{file = "msgpack-1.0.7-cp311-cp311-win_amd64.whl", hash = "sha256:3f0c8c6dfa6605ab8ff0611995ee30d4f9fcff89966cf562733b4008a3d60d82"},
|
||||
{file = "msgpack-1.0.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f0936e08e0003f66bfd97e74ee530427707297b0d0361247e9b4f59ab78ddc8b"},
|
||||
{file = "msgpack-1.0.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:98bbd754a422a0b123c66a4c341de0474cad4a5c10c164ceed6ea090f3563db4"},
|
||||
{file = "msgpack-1.0.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b291f0ee7961a597cbbcc77709374087fa2a9afe7bdb6a40dbbd9b127e79afee"},
|
||||
{file = "msgpack-1.0.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebbbba226f0a108a7366bf4b59bf0f30a12fd5e75100c630267d94d7f0ad20e5"},
|
||||
{file = "msgpack-1.0.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e2d69948e4132813b8d1131f29f9101bc2c915f26089a6d632001a5c1349672"},
|
||||
{file = "msgpack-1.0.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdf38ba2d393c7911ae989c3bbba510ebbcdf4ecbdbfec36272abe350c454075"},
|
||||
{file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:993584fc821c58d5993521bfdcd31a4adf025c7d745bbd4d12ccfecf695af5ba"},
|
||||
{file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:52700dc63a4676669b341ba33520f4d6e43d3ca58d422e22ba66d1736b0a6e4c"},
|
||||
{file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e45ae4927759289c30ccba8d9fdce62bb414977ba158286b5ddaf8df2cddb5c5"},
|
||||
{file = "msgpack-1.0.7-cp312-cp312-win32.whl", hash = "sha256:27dcd6f46a21c18fa5e5deed92a43d4554e3df8d8ca5a47bf0615d6a5f39dbc9"},
|
||||
{file = "msgpack-1.0.7-cp312-cp312-win_amd64.whl", hash = "sha256:7687e22a31e976a0e7fc99c2f4d11ca45eff652a81eb8c8085e9609298916dcf"},
|
||||
{file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95"},
|
||||
{file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0"},
|
||||
{file = "msgpack-1.0.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7"},
|
||||
{file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d"},
|
||||
{file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524"},
|
||||
{file = "msgpack-1.0.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc"},
|
||||
{file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc"},
|
||||
{file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf"},
|
||||
{file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c"},
|
||||
{file = "msgpack-1.0.7-cp38-cp38-win32.whl", hash = "sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2"},
|
||||
{file = "msgpack-1.0.7-cp38-cp38-win_amd64.whl", hash = "sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c"},
|
||||
{file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f"},
|
||||
{file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81"},
|
||||
{file = "msgpack-1.0.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc"},
|
||||
{file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d"},
|
||||
{file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7"},
|
||||
{file = "msgpack-1.0.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61"},
|
||||
{file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819"},
|
||||
{file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd"},
|
||||
{file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f"},
|
||||
{file = "msgpack-1.0.7-cp39-cp39-win32.whl", hash = "sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad"},
|
||||
{file = "msgpack-1.0.7-cp39-cp39-win_amd64.whl", hash = "sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3"},
|
||||
{file = "msgpack-1.0.7.tar.gz", hash = "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mypy"
|
||||
version = "1.9.0"
|
||||
version = "1.8.0"
|
||||
description = "Optional static typing for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"},
|
||||
{file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"},
|
||||
{file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"},
|
||||
{file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"},
|
||||
{file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"},
|
||||
{file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"},
|
||||
{file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"},
|
||||
{file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"},
|
||||
{file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"},
|
||||
{file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"},
|
||||
{file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"},
|
||||
{file = "mypy-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3"},
|
||||
{file = "mypy-1.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4"},
|
||||
{file = "mypy-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d"},
|
||||
{file = "mypy-1.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9"},
|
||||
{file = "mypy-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410"},
|
||||
{file = "mypy-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:855fe27b80375e5c5878492f0729540db47b186509c98dae341254c8f45f42ae"},
|
||||
{file = "mypy-1.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c886c6cce2d070bd7df4ec4a05a13ee20c0aa60cb587e8d1265b6c03cf91da3"},
|
||||
{file = "mypy-1.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d19c413b3c07cbecf1f991e2221746b0d2a9410b59cb3f4fb9557f0365a1a817"},
|
||||
{file = "mypy-1.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9261ed810972061388918c83c3f5cd46079d875026ba97380f3e3978a72f503d"},
|
||||
{file = "mypy-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:51720c776d148bad2372ca21ca29256ed483aa9a4cdefefcef49006dff2a6835"},
|
||||
{file = "mypy-1.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:52825b01f5c4c1c4eb0db253ec09c7aa17e1a7304d247c48b6f3599ef40db8bd"},
|
||||
{file = "mypy-1.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f5ac9a4eeb1ec0f1ccdc6f326bcdb464de5f80eb07fb38b5ddd7b0de6bc61e55"},
|
||||
{file = "mypy-1.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afe3fe972c645b4632c563d3f3eff1cdca2fa058f730df2b93a35e3b0c538218"},
|
||||
{file = "mypy-1.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:42c6680d256ab35637ef88891c6bd02514ccb7e1122133ac96055ff458f93fc3"},
|
||||
{file = "mypy-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:720a5ca70e136b675af3af63db533c1c8c9181314d207568bbe79051f122669e"},
|
||||
{file = "mypy-1.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6"},
|
||||
{file = "mypy-1.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66"},
|
||||
{file = "mypy-1.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6"},
|
||||
{file = "mypy-1.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d"},
|
||||
{file = "mypy-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02"},
|
||||
{file = "mypy-1.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8"},
|
||||
{file = "mypy-1.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259"},
|
||||
{file = "mypy-1.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b"},
|
||||
{file = "mypy-1.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592"},
|
||||
{file = "mypy-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a"},
|
||||
{file = "mypy-1.8.0-py3-none-any.whl", hash = "sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d"},
|
||||
{file = "mypy-1.8.0.tar.gz", hash = "sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1461,13 +1461,13 @@ test = ["lxml", "pytest (>=4.6)", "pytest-cov"]
|
||||
|
||||
[[package]]
|
||||
name = "netaddr"
|
||||
version = "1.3.0"
|
||||
version = "1.2.1"
|
||||
description = "A network address manipulation library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "netaddr-1.3.0-py3-none-any.whl", hash = "sha256:c2c6a8ebe5554ce33b7d5b3a306b71bbb373e000bbbf2350dd5213cc56e3dbbe"},
|
||||
{file = "netaddr-1.3.0.tar.gz", hash = "sha256:5c3c3d9895b551b763779ba7db7a03487dc1f8e3b385af819af341ae9ef6e48a"},
|
||||
{file = "netaddr-1.2.1-py3-none-any.whl", hash = "sha256:bd9e9534b0d46af328cf64f0e5a23a5a43fca292df221c85580b27394793496e"},
|
||||
{file = "netaddr-1.2.1.tar.gz", hash = "sha256:6eb8fedf0412c6d294d06885c110de945cf4d22d2b510d0404f4e06950857987"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -1488,13 +1488,13 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte
|
||||
|
||||
[[package]]
|
||||
name = "packaging"
|
||||
version = "24.1"
|
||||
version = "24.0"
|
||||
description = "Core utilities for Python packages"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
|
||||
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
|
||||
{file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"},
|
||||
{file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1524,13 +1524,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "phonenumbers"
|
||||
version = "8.13.39"
|
||||
version = "8.13.37"
|
||||
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "phonenumbers-8.13.39-py2.py3-none-any.whl", hash = "sha256:3ad2d086fa71e7eef409001b9195ac54bebb0c6e3e752209b558ca192c9229a0"},
|
||||
{file = "phonenumbers-8.13.39.tar.gz", hash = "sha256:db7ca4970d206b2056231105300753b1a5b229f43416f8c2b3010e63fbb68d77"},
|
||||
{file = "phonenumbers-8.13.37-py2.py3-none-any.whl", hash = "sha256:4ea00ef5012422c08c7955c21131e7ae5baa9a3ef52cf2d561e963f023006b80"},
|
||||
{file = "phonenumbers-8.13.37.tar.gz", hash = "sha256:bd315fed159aea0516f7c367231810fe8344d5bec26156b88fa18374c11d1cf2"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2157,13 +2157,13 @@ rpds-py = ">=0.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "requests"
|
||||
version = "2.32.2"
|
||||
version = "2.31.0"
|
||||
description = "Python HTTP for Humans."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"},
|
||||
{file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"},
|
||||
{file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
|
||||
{file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2387,13 +2387,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
|
||||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "2.6.0"
|
||||
version = "2.3.1"
|
||||
description = "Python client for Sentry (https://sentry.io)"
|
||||
optional = true
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "sentry_sdk-2.6.0-py2.py3-none-any.whl", hash = "sha256:422b91cb49378b97e7e8d0e8d5a1069df23689d45262b86f54988a7db264e874"},
|
||||
{file = "sentry_sdk-2.6.0.tar.gz", hash = "sha256:65cc07e9c6995c5e316109f138570b32da3bd7ff8d0d0ee4aaf2628c3dd8127d"},
|
||||
{file = "sentry_sdk-2.3.1-py2.py3-none-any.whl", hash = "sha256:c5aeb095ba226391d337dd42a6f9470d86c9fc236ecc71cfc7cd1942b45010c6"},
|
||||
{file = "sentry_sdk-2.3.1.tar.gz", hash = "sha256:139a71a19f5e9eb5d3623942491ce03cf8ebc14ea2e39ba3e6fe79560d8a5b1f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2598,22 +2598,22 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "tornado"
|
||||
version = "6.4.1"
|
||||
version = "6.4"
|
||||
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">= 3.8"
|
||||
files = [
|
||||
{file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"},
|
||||
{file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"},
|
||||
{file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"},
|
||||
{file = "tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0"},
|
||||
{file = "tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263"},
|
||||
{file = "tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e"},
|
||||
{file = "tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579"},
|
||||
{file = "tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212"},
|
||||
{file = "tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"},
|
||||
{file = "tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78"},
|
||||
{file = "tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f"},
|
||||
{file = "tornado-6.4-cp38-abi3-win32.whl", hash = "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052"},
|
||||
{file = "tornado-6.4-cp38-abi3-win_amd64.whl", hash = "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63"},
|
||||
{file = "tornado-6.4.tar.gz", hash = "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2822,13 +2822,13 @@ referencing = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-netaddr"
|
||||
version = "1.3.0.20240530"
|
||||
version = "1.2.0.20240219"
|
||||
description = "Typing stubs for netaddr"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "types-netaddr-1.3.0.20240530.tar.gz", hash = "sha256:742c2ec1f202b666f544223e2616b34f1f13df80c91e5aeaaa93a72e4d0774ea"},
|
||||
{file = "types_netaddr-1.3.0.20240530-py3-none-any.whl", hash = "sha256:354998d018e326da4f1d9b005fc91137b7c2c473aaf03c4ef64bf83c6861b440"},
|
||||
{file = "types-netaddr-1.2.0.20240219.tar.gz", hash = "sha256:984e70ad838218d3032f37f05a7e294f7b007fe274ec9d774265c8c06698395f"},
|
||||
{file = "types_netaddr-1.2.0.20240219-py3-none-any.whl", hash = "sha256:b26144e878acb8a1a9008e6997863714db04f8029a0f7f6bfe483c977d21b522"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2881,13 +2881,13 @@ types-cffi = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-pyyaml"
|
||||
version = "6.0.12.20240311"
|
||||
version = "6.0.12.12"
|
||||
description = "Typing stubs for PyYAML"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-PyYAML-6.0.12.20240311.tar.gz", hash = "sha256:a9e0f0f88dc835739b0c1ca51ee90d04ca2a897a71af79de9aec5f38cb0a5342"},
|
||||
{file = "types_PyYAML-6.0.12.20240311-py3-none-any.whl", hash = "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6"},
|
||||
{file = "types-PyYAML-6.0.12.12.tar.gz", hash = "sha256:334373d392fde0fdf95af5c3f1661885fa10c52167b14593eb856289e1855062"},
|
||||
{file = "types_PyYAML-6.0.12.12-py3-none-any.whl", hash = "sha256:c05bc6c158facb0676674b7f11fe3960db4f389718e19e62bd2b84d6205cfd24"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2917,13 +2917,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
version = "4.12.2"
|
||||
version = "4.11.0"
|
||||
description = "Backported and Experimental Type Hints for Python 3.8+"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
|
||||
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
|
||||
{file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"},
|
||||
{file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2939,18 +2939,18 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "2.2.2"
|
||||
version = "2.0.7"
|
||||
description = "HTTP library with thread-safe connection pooling, file post, and more."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"},
|
||||
{file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"},
|
||||
{file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"},
|
||||
{file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
|
||||
h2 = ["h2 (>=4,<5)"]
|
||||
secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"]
|
||||
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
|
||||
zstd = ["zstandard (>=0.18.0)"]
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.109.0"
|
||||
version = "1.109.0rc2"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
||||
@@ -204,8 +204,6 @@ pub struct EventInternalMetadata {
|
||||
/// The stream ordering of this event. None, until it has been persisted.
|
||||
#[pyo3(get, set)]
|
||||
stream_ordering: Option<NonZeroI64>,
|
||||
#[pyo3(get, set)]
|
||||
instance_name: Option<String>,
|
||||
|
||||
/// whether this event is an outlier (ie, whether we have the state at that
|
||||
/// point in the DAG)
|
||||
@@ -234,7 +232,6 @@ impl EventInternalMetadata {
|
||||
Ok(EventInternalMetadata {
|
||||
data,
|
||||
stream_ordering: None,
|
||||
instance_name: None,
|
||||
outlier: false,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -223,6 +223,7 @@ test_packages=(
|
||||
./tests/msc3930
|
||||
./tests/msc3902
|
||||
./tests/msc3967
|
||||
./tests/msc4115
|
||||
)
|
||||
|
||||
# Enable dirty runs, so tests will reuse the same container where possible.
|
||||
|
||||
@@ -70,6 +70,7 @@ def cli() -> None:
|
||||
pip install -e .[dev]
|
||||
|
||||
- A checkout of the sytest repository at ../sytest
|
||||
- A checkout of the complement repository at ../complement
|
||||
|
||||
Then to use:
|
||||
|
||||
@@ -112,10 +113,12 @@ def _prepare() -> None:
|
||||
# Make sure we're in a git repo.
|
||||
synapse_repo = get_repo_and_check_clean_checkout()
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
complement_repo = get_repo_and_check_clean_checkout("../complement", "complement")
|
||||
|
||||
click.secho("Updating Synapse and Sytest git repos...")
|
||||
synapse_repo.remote().fetch()
|
||||
sytest_repo.remote().fetch()
|
||||
complement_repo.remote().fetch()
|
||||
|
||||
# Get the current version and AST from root Synapse module.
|
||||
current_version = get_package_version()
|
||||
@@ -208,7 +211,15 @@ def _prepare() -> None:
|
||||
"Which branch should the release be based on?", default=default
|
||||
)
|
||||
|
||||
for repo_name, repo in {"synapse": synapse_repo, "sytest": sytest_repo}.items():
|
||||
for repo_name, repo in {
|
||||
"synapse": synapse_repo,
|
||||
"sytest": sytest_repo,
|
||||
"complement": complement_repo,
|
||||
}.items():
|
||||
# Special case for Complement: `develop` maps to `main`
|
||||
if repo_name == "complement" and branch_name == "develop":
|
||||
branch_name = "main"
|
||||
|
||||
base_branch = find_ref(repo, branch_name)
|
||||
if not base_branch:
|
||||
print(f"Could not find base branch {branch_name} for {repo_name}!")
|
||||
@@ -231,6 +242,12 @@ def _prepare() -> None:
|
||||
if click.confirm("Push new SyTest branch?", default=True):
|
||||
sytest_repo.git.push("-u", sytest_repo.remote().name, release_branch_name)
|
||||
|
||||
# Same for Complement
|
||||
if click.confirm("Push new Complement branch?", default=True):
|
||||
complement_repo.git.push(
|
||||
"-u", complement_repo.remote().name, release_branch_name
|
||||
)
|
||||
|
||||
# Switch to the release branch and ensure it's up to date.
|
||||
synapse_repo.git.checkout(release_branch_name)
|
||||
update_branch(synapse_repo)
|
||||
@@ -630,6 +647,9 @@ def _merge_back() -> None:
|
||||
else:
|
||||
# Full release
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
complement_repo = get_repo_and_check_clean_checkout(
|
||||
"../complement", "complement"
|
||||
)
|
||||
|
||||
if click.confirm(f"Merge {branch_name} → master?", default=True):
|
||||
_merge_into(synapse_repo, branch_name, "master")
|
||||
@@ -643,6 +663,9 @@ def _merge_back() -> None:
|
||||
if click.confirm("On SyTest, merge master → develop?", default=True):
|
||||
_merge_into(sytest_repo, "master", "develop")
|
||||
|
||||
if click.confirm(f"On Complement, merge {branch_name} → main?", default=True):
|
||||
_merge_into(complement_repo, branch_name, "main")
|
||||
|
||||
|
||||
@cli.command()
|
||||
def announce() -> None:
|
||||
|
||||
@@ -52,7 +52,6 @@ def request_registration(
|
||||
user_type: Optional[str] = None,
|
||||
_print: Callable[[str], None] = print,
|
||||
exit: Callable[[int], None] = sys.exit,
|
||||
exists_ok: bool = False,
|
||||
) -> None:
|
||||
url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),)
|
||||
|
||||
@@ -98,10 +97,6 @@ def request_registration(
|
||||
r = requests.post(url, json=data)
|
||||
|
||||
if r.status_code != 200:
|
||||
response = r.json()
|
||||
if exists_ok and response["errcode"] == "M_USER_IN_USE":
|
||||
_print("User already exists. Skipping.")
|
||||
return
|
||||
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||
if 400 <= r.status_code < 500:
|
||||
try:
|
||||
@@ -120,7 +115,6 @@ def register_new_user(
|
||||
shared_secret: str,
|
||||
admin: Optional[bool],
|
||||
user_type: Optional[str],
|
||||
exists_ok: bool = False,
|
||||
) -> None:
|
||||
if not user:
|
||||
try:
|
||||
@@ -160,13 +154,7 @@ def register_new_user(
|
||||
admin = False
|
||||
|
||||
request_registration(
|
||||
user,
|
||||
password,
|
||||
server_location,
|
||||
shared_secret,
|
||||
bool(admin),
|
||||
user_type,
|
||||
exists_ok=exists_ok,
|
||||
user, password, server_location, shared_secret, bool(admin), user_type
|
||||
)
|
||||
|
||||
|
||||
@@ -186,22 +174,10 @@ def main() -> None:
|
||||
help="Local part of the new user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--exists-ok",
|
||||
action="store_true",
|
||||
help="Do not fail if user already exists.",
|
||||
)
|
||||
password_group = parser.add_mutually_exclusive_group()
|
||||
password_group.add_argument(
|
||||
"-p",
|
||||
"--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt for a password if "
|
||||
"this flag and `--password-file` are both omitted.",
|
||||
)
|
||||
password_group.add_argument(
|
||||
"--password-file",
|
||||
default=None,
|
||||
help="File containing the new password for user. If set, will override `--password`.",
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
@@ -209,7 +185,6 @@ def main() -> None:
|
||||
default=None,
|
||||
help="User type as specified in synapse.api.constants.UserTypes",
|
||||
)
|
||||
|
||||
admin_group = parser.add_mutually_exclusive_group()
|
||||
admin_group.add_argument(
|
||||
"-a",
|
||||
@@ -272,11 +247,6 @@ def main() -> None:
|
||||
print(_NO_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if args.password_file:
|
||||
password = _read_file(args.password_file, "password-file").strip()
|
||||
else:
|
||||
password = args.password
|
||||
|
||||
if args.server_url:
|
||||
server_url = args.server_url
|
||||
elif config is not None:
|
||||
@@ -300,13 +270,7 @@ def main() -> None:
|
||||
admin = args.admin
|
||||
|
||||
register_new_user(
|
||||
args.user,
|
||||
password,
|
||||
server_url,
|
||||
secret,
|
||||
admin,
|
||||
args.user_type,
|
||||
exists_ok=args.exists_ok,
|
||||
args.user, args.password, server_url, secret, admin, args.user_type
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -238,7 +238,7 @@ class EventUnsignedContentFields:
|
||||
"""Fields found inside the 'unsigned' data on events"""
|
||||
|
||||
# Requesting user's membership, per MSC4115
|
||||
MEMBERSHIP: Final = "membership"
|
||||
MSC4115_MEMBERSHIP: Final = "io.element.msc4115.membership"
|
||||
|
||||
|
||||
class RoomTypes:
|
||||
|
||||
@@ -393,6 +393,9 @@ class ExperimentalConfig(Config):
|
||||
# MSC3391: Removing account data.
|
||||
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
|
||||
|
||||
# MSC3967: Do not require UIA when first uploading cross signing keys
|
||||
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
|
||||
|
||||
# MSC3861: Matrix architecture change to delegate authentication via OIDC
|
||||
try:
|
||||
self.msc3861 = MSC3861(**experimental.get("msc3861", {}))
|
||||
@@ -433,8 +436,8 @@ class ExperimentalConfig(Config):
|
||||
("experimental", "msc4108_delegation_endpoint"),
|
||||
)
|
||||
|
||||
self.msc3823_account_suspension = experimental.get(
|
||||
"msc3823_account_suspension", False
|
||||
self.msc4115_membership_on_events = experimental.get(
|
||||
"msc4115_membership_on_events", False
|
||||
)
|
||||
|
||||
self.msc3916_authenticated_media_enabled = experimental.get(
|
||||
@@ -443,6 +446,3 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC4151: Report room API (Client-Server API)
|
||||
self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
|
||||
|
||||
# MSC4156: Migrate server_name to via
|
||||
self.msc4156_enabled: bool = experimental.get("msc4156_enabled", False)
|
||||
|
||||
@@ -90,7 +90,6 @@ def prune_event(event: EventBase) -> EventBase:
|
||||
pruned_event.internal_metadata.stream_ordering = (
|
||||
event.internal_metadata.stream_ordering
|
||||
)
|
||||
pruned_event.internal_metadata.instance_name = event.internal_metadata.instance_name
|
||||
pruned_event.internal_metadata.outlier = event.internal_metadata.outlier
|
||||
|
||||
# Mark the event as redacted
|
||||
@@ -117,7 +116,6 @@ def clone_event(event: EventBase) -> EventBase:
|
||||
new_event.internal_metadata.stream_ordering = (
|
||||
event.internal_metadata.stream_ordering
|
||||
)
|
||||
new_event.internal_metadata.instance_name = event.internal_metadata.instance_name
|
||||
new_event.internal_metadata.outlier = event.internal_metadata.outlier
|
||||
|
||||
return new_event
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Type
|
||||
|
||||
@@ -323,6 +324,21 @@ def register_servlets(
|
||||
):
|
||||
continue
|
||||
|
||||
# don't load the endpoint if the storage provider is incompatible
|
||||
media_repo = hs.get_media_repository()
|
||||
load_download_endpoint = True
|
||||
for provider in media_repo.media_storage.storage_providers:
|
||||
signature = inspect.signature(provider.backend.fetch)
|
||||
if "federation" not in signature.parameters:
|
||||
logger.warning(
|
||||
f"Federation media `/download` endpoint will not be enabled as storage provider {provider.backend} is not compatible with this endpoint."
|
||||
)
|
||||
load_download_endpoint = False
|
||||
break
|
||||
|
||||
if not load_download_endpoint:
|
||||
continue
|
||||
|
||||
servletclass(
|
||||
hs=hs,
|
||||
authenticator=authenticator,
|
||||
|
||||
@@ -793,7 +793,7 @@ class FederationAccountStatusServlet(BaseFederationServerServlet):
|
||||
class FederationUnstableMediaDownloadServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
Implementation of new federation media `/download` endpoint outlined in MSC3916. Returns
|
||||
a multipart/mixed response consisting of a JSON object and the requested media
|
||||
a multipart/form-data response consisting of a JSON object and the requested media
|
||||
item. This endpoint only returns local media.
|
||||
"""
|
||||
|
||||
|
||||
@@ -42,6 +42,7 @@ class AdminHandler:
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._state_storage_controller = self._storage_controllers.state
|
||||
self._hs_config = hs.config
|
||||
self._msc3866_enabled = hs.config.experimental.msc3866.enabled
|
||||
|
||||
async def get_whois(self, user: UserID) -> JsonMapping:
|
||||
@@ -214,6 +215,7 @@ class AdminHandler:
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
events,
|
||||
msc4115_membership_on_events=self._hs_config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
writer.write_events(room_id, events)
|
||||
|
||||
@@ -148,6 +148,7 @@ class EventHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastores().main
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._config = hs.config
|
||||
|
||||
async def get_event(
|
||||
self,
|
||||
@@ -193,6 +194,7 @@ class EventHandler:
|
||||
user.to_string(),
|
||||
[event],
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
if not filtered:
|
||||
|
||||
@@ -224,6 +224,7 @@ class InitialSyncHandler:
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
messages,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
@@ -382,6 +383,7 @@ class InitialSyncHandler:
|
||||
requester.user.to_string(),
|
||||
messages,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
@@ -496,6 +498,7 @@ class InitialSyncHandler:
|
||||
requester.user.to_string(),
|
||||
messages,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
|
||||
@@ -201,7 +201,7 @@ class MessageHandler:
|
||||
|
||||
if at_token:
|
||||
last_event_id = (
|
||||
await self.store.get_last_event_id_in_room_before_stream_ordering(
|
||||
await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=at_token.room_key,
|
||||
)
|
||||
@@ -642,17 +642,6 @@ class EventCreationHandler:
|
||||
"""
|
||||
await self.auth_blocking.check_auth_blocking(requester=requester)
|
||||
|
||||
if event_dict["type"] == EventTypes.Message:
|
||||
requester_suspended = await self.store.get_user_suspended_status(
|
||||
requester.user.to_string()
|
||||
)
|
||||
if requester_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Sending messages while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
|
||||
room_version_id = event_dict["content"]["room_version"]
|
||||
maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
||||
@@ -1562,7 +1551,6 @@ class EventCreationHandler:
|
||||
# stream_ordering entry manually (as it was persisted on
|
||||
# another worker).
|
||||
event.internal_metadata.stream_ordering = stream_id
|
||||
event.internal_metadata.instance_name = writer_instance
|
||||
|
||||
return event
|
||||
|
||||
|
||||
@@ -623,6 +623,7 @@ class PaginationHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
# if after the filter applied there are no more events
|
||||
|
||||
@@ -95,6 +95,7 @@ class RelationsHandler:
|
||||
self._event_handler = hs.get_event_handler()
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self._event_creation_handler = hs.get_event_creation_handler()
|
||||
self._config = hs.config
|
||||
|
||||
async def get_relations(
|
||||
self,
|
||||
@@ -163,6 +164,7 @@ class RelationsHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
# The relations returned for the requested event do include their
|
||||
@@ -608,6 +610,7 @@ class RelationsHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
aggregations = await self.get_bundled_aggregations(
|
||||
|
||||
@@ -1476,6 +1476,7 @@ class RoomContextHandler:
|
||||
user.to_string(),
|
||||
events,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
event = await self.store.get_event(
|
||||
|
||||
@@ -483,6 +483,7 @@ class SearchHandler:
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
filtered_events,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
events.sort(key=lambda e: -rank_map[e.event_id])
|
||||
@@ -584,6 +585,7 @@ class SearchHandler:
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
filtered_events,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
room_events.extend(events)
|
||||
@@ -671,12 +673,14 @@ class SearchHandler:
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
res.events_before,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
events_after = await filter_events_for_client(
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
res.events_after,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
context: JsonDict = {
|
||||
|
||||
@@ -18,22 +18,14 @@
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, AbstractSet, Dict, List, Optional
|
||||
|
||||
from immutabledict import immutabledict
|
||||
|
||||
from synapse.api.constants import AccountDataTypes, EventTypes, Membership
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.events import EventBase
|
||||
from synapse.storage.roommember import RoomsForUser
|
||||
from synapse.types import (
|
||||
PersistedEventPosition,
|
||||
Requester,
|
||||
RoomStreamToken,
|
||||
StreamToken,
|
||||
UserID,
|
||||
)
|
||||
from synapse.types import Requester, RoomStreamToken, StreamToken, UserID
|
||||
from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult
|
||||
from synapse.types.state import StateFilter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -41,27 +33,6 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def convert_event_to_rooms_for_user(event: EventBase) -> RoomsForUser:
|
||||
"""
|
||||
Quick helper to convert an event to a `RoomsForUser` object.
|
||||
"""
|
||||
# These fields should be present for all persisted events
|
||||
assert event.internal_metadata.stream_ordering is not None
|
||||
assert event.internal_metadata.instance_name is not None
|
||||
|
||||
return RoomsForUser(
|
||||
room_id=event.room_id,
|
||||
sender=event.sender,
|
||||
membership=event.membership,
|
||||
event_id=event.event_id,
|
||||
event_pos=PersistedEventPosition(
|
||||
event.internal_metadata.instance_name,
|
||||
event.internal_metadata.stream_ordering,
|
||||
),
|
||||
room_version_id=event.room_version.identifier,
|
||||
)
|
||||
|
||||
|
||||
def filter_membership_for_sync(*, membership: str, user_id: str, sender: str) -> bool:
|
||||
"""
|
||||
Returns True if the membership event should be included in the sync response,
|
||||
@@ -86,7 +57,6 @@ class SlidingSyncHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastores().main
|
||||
self.storage_controllers = hs.get_storage_controllers()
|
||||
self.auth_blocking = hs.get_auth_blocking()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.event_sources = hs.get_event_sources()
|
||||
@@ -99,19 +69,9 @@ class SlidingSyncHandler:
|
||||
from_token: Optional[StreamToken] = None,
|
||||
timeout_ms: int = 0,
|
||||
) -> SlidingSyncResult:
|
||||
"""
|
||||
Get the sync for a client if we have new data for it now. Otherwise
|
||||
"""Get the sync for a client if we have new data for it now. Otherwise
|
||||
wait for new data to arrive on the server. If the timeout expires, then
|
||||
return an empty sync result.
|
||||
|
||||
Args:
|
||||
requester: The user making the request
|
||||
sync_config: Sync configuration
|
||||
from_token: The point in the stream to sync from. Token of the end of the
|
||||
previous batch. May be `None` if this is the initial sync request.
|
||||
timeout_ms: The time in milliseconds to wait for new data to arrive. If 0,
|
||||
we will immediately but there might not be any new data so we just return an
|
||||
empty response.
|
||||
"""
|
||||
# If the user is not part of the mau group, then check that limits have
|
||||
# not been exceeded (if not part of the group by this point, almost certain
|
||||
@@ -183,14 +143,6 @@ class SlidingSyncHandler:
|
||||
"""
|
||||
Generates the response body of a Sliding Sync result, represented as a
|
||||
`SlidingSyncResult`.
|
||||
|
||||
We fetch data according to the token range (> `from_token` and <= `to_token`).
|
||||
|
||||
Args:
|
||||
sync_config: Sync configuration
|
||||
to_token: The point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from. Token of the end of the
|
||||
previous batch. May be `None` if this is the initial sync request.
|
||||
"""
|
||||
user_id = sync_config.user.to_string()
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
@@ -199,28 +151,25 @@ class SlidingSyncHandler:
|
||||
# See https://github.com/matrix-org/matrix-doc/issues/1144
|
||||
raise NotImplementedError()
|
||||
|
||||
# Get all of the room IDs that the user should be able to see in the sync
|
||||
# response
|
||||
room_id_set = await self.get_sync_room_ids_for_user(
|
||||
sync_config.user,
|
||||
from_token=from_token,
|
||||
to_token=to_token,
|
||||
)
|
||||
|
||||
# Assemble sliding window lists
|
||||
lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {}
|
||||
if sync_config.lists:
|
||||
# Get all of the room IDs that the user should be able to see in the sync
|
||||
# response
|
||||
sync_room_map = await self.get_sync_room_ids_for_user(
|
||||
sync_config.user,
|
||||
from_token=from_token,
|
||||
to_token=to_token,
|
||||
)
|
||||
|
||||
for list_key, list_config in sync_config.lists.items():
|
||||
# Apply filters
|
||||
filtered_sync_room_map = sync_room_map
|
||||
if list_config.filters is not None:
|
||||
filtered_sync_room_map = await self.filter_rooms(
|
||||
sync_config.user, sync_room_map, list_config.filters, to_token
|
||||
)
|
||||
|
||||
sorted_room_info = await self.sort_rooms(
|
||||
filtered_sync_room_map, to_token
|
||||
)
|
||||
# TODO: Apply filters
|
||||
#
|
||||
# TODO: Exclude partially stated rooms unless the `required_state` has
|
||||
# `["m.room.member", "$LAZY"]`
|
||||
filtered_room_ids = room_id_set
|
||||
# TODO: Apply sorts
|
||||
sorted_room_ids = sorted(filtered_room_ids)
|
||||
|
||||
ops: List[SlidingSyncResult.SlidingWindowList.Operation] = []
|
||||
if list_config.ranges:
|
||||
@@ -229,17 +178,12 @@ class SlidingSyncHandler:
|
||||
SlidingSyncResult.SlidingWindowList.Operation(
|
||||
op=OperationType.SYNC,
|
||||
range=range,
|
||||
room_ids=[
|
||||
room_id
|
||||
for room_id, _ in sorted_room_info[
|
||||
range[0] : range[1]
|
||||
]
|
||||
],
|
||||
room_ids=sorted_room_ids[range[0] : range[1]],
|
||||
)
|
||||
)
|
||||
|
||||
lists[list_key] = SlidingSyncResult.SlidingWindowList(
|
||||
count=len(sorted_room_info),
|
||||
count=len(sorted_room_ids),
|
||||
ops=ops,
|
||||
)
|
||||
|
||||
@@ -256,7 +200,7 @@ class SlidingSyncHandler:
|
||||
user: UserID,
|
||||
to_token: StreamToken,
|
||||
from_token: Optional[StreamToken] = None,
|
||||
) -> Dict[str, RoomsForUser]:
|
||||
) -> AbstractSet[str]:
|
||||
"""
|
||||
Fetch room IDs that should be listed for this user in the sync response (the
|
||||
full room list that will be filtered, sorted, and sliced).
|
||||
@@ -273,15 +217,6 @@ class SlidingSyncHandler:
|
||||
`forgotten` flag to the `room_memberships` table in Synapse. There isn't a way
|
||||
to tell when a room was forgotten at the moment so we can't factor it into the
|
||||
from/to range.
|
||||
|
||||
Args:
|
||||
user: User to fetch rooms for
|
||||
to_token: The token to fetch rooms up to.
|
||||
from_token: The point in the stream to sync from.
|
||||
|
||||
Returns:
|
||||
A dictionary of room IDs that should be listed in the sync response along
|
||||
with membership information in that room at the time of `to_token`.
|
||||
"""
|
||||
user_id = user.to_string()
|
||||
|
||||
@@ -301,11 +236,11 @@ class SlidingSyncHandler:
|
||||
|
||||
# If the user has never joined any rooms before, we can just return an empty list
|
||||
if not room_for_user_list:
|
||||
return {}
|
||||
return set()
|
||||
|
||||
# Our working list of rooms that can show up in the sync response
|
||||
sync_room_id_set = {
|
||||
room_for_user.room_id: room_for_user
|
||||
room_for_user.room_id
|
||||
for room_for_user in room_for_user_list
|
||||
if filter_membership_for_sync(
|
||||
membership=room_for_user.membership,
|
||||
@@ -340,6 +275,12 @@ class SlidingSyncHandler:
|
||||
instance_map=immutabledict(instance_to_max_stream_ordering_map),
|
||||
)
|
||||
|
||||
# If our `to_token` is already the same or ahead of the latest room membership
|
||||
# for the user, we can just straight-up return the room list (nothing has
|
||||
# changed)
|
||||
if membership_snapshot_token.is_before_or_eq(to_token.room_key):
|
||||
return sync_room_id_set
|
||||
|
||||
# Since we fetched the users room list at some point in time after the from/to
|
||||
# tokens, we need to revert/rewind some membership changes to match the point in
|
||||
# time of the `to_token`. In particular, we need to make these fixups:
|
||||
@@ -359,20 +300,14 @@ class SlidingSyncHandler:
|
||||
|
||||
# 1) Fetch membership changes that fall in the range from `to_token` up to
|
||||
# `membership_snapshot_token`
|
||||
#
|
||||
# If our `to_token` is already the same or ahead of the latest room membership
|
||||
# for the user, we don't need to do any "2)" fix-ups and can just straight-up
|
||||
# use the room list from the snapshot as a base (nothing has changed)
|
||||
membership_change_events_after_to_token = []
|
||||
if not membership_snapshot_token.is_before_or_eq(to_token.room_key):
|
||||
membership_change_events_after_to_token = (
|
||||
await self.store.get_membership_changes_for_user(
|
||||
user_id,
|
||||
from_key=to_token.room_key,
|
||||
to_key=membership_snapshot_token,
|
||||
excluded_rooms=self.rooms_to_exclude_globally,
|
||||
)
|
||||
membership_change_events_after_to_token = (
|
||||
await self.store.get_membership_changes_for_user(
|
||||
user_id,
|
||||
from_key=to_token.room_key,
|
||||
to_key=membership_snapshot_token,
|
||||
excluded_rooms=self.rooms_to_exclude_globally,
|
||||
)
|
||||
)
|
||||
|
||||
# 1) Assemble a list of the last membership events in some given ranges. Someone
|
||||
# could have left and joined multiple times during the given range but we only
|
||||
@@ -455,9 +390,7 @@ class SlidingSyncHandler:
|
||||
not was_last_membership_already_included
|
||||
and should_prev_membership_be_included
|
||||
):
|
||||
sync_room_id_set[room_id] = convert_event_to_rooms_for_user(
|
||||
last_membership_change_after_to_token
|
||||
)
|
||||
sync_room_id_set.add(room_id)
|
||||
# 1b) Remove rooms that the user joined (hasn't left) after the `to_token`
|
||||
#
|
||||
# For example, if the last membership event after the `to_token` is a "join"
|
||||
@@ -468,7 +401,7 @@ class SlidingSyncHandler:
|
||||
was_last_membership_already_included
|
||||
and not should_prev_membership_be_included
|
||||
):
|
||||
del sync_room_id_set[room_id]
|
||||
sync_room_id_set.discard(room_id)
|
||||
|
||||
# 2) -----------------------------------------------------
|
||||
# We fix-up newly_left rooms after the first fixup because it may have removed
|
||||
@@ -503,178 +436,6 @@ class SlidingSyncHandler:
|
||||
# include newly_left rooms because the last event that the user should see
|
||||
# is their own leave event
|
||||
if last_membership_change_in_from_to_range.membership == Membership.LEAVE:
|
||||
sync_room_id_set[room_id] = convert_event_to_rooms_for_user(
|
||||
last_membership_change_in_from_to_range
|
||||
)
|
||||
sync_room_id_set.add(room_id)
|
||||
|
||||
return sync_room_id_set
|
||||
|
||||
async def filter_rooms(
|
||||
self,
|
||||
user: UserID,
|
||||
sync_room_map: Dict[str, RoomsForUser],
|
||||
filters: SlidingSyncConfig.SlidingSyncList.Filters,
|
||||
to_token: StreamToken,
|
||||
) -> Dict[str, RoomsForUser]:
|
||||
"""
|
||||
Filter rooms based on the sync request.
|
||||
|
||||
Args:
|
||||
user: User to filter rooms for
|
||||
sync_room_map: Dictionary of room IDs to sort along with membership
|
||||
information in the room at the time of `to_token`.
|
||||
filters: Filters to apply
|
||||
to_token: We filter based on the state of the room at this token
|
||||
|
||||
Returns:
|
||||
A filtered dictionary of room IDs along with membership information in the
|
||||
room at the time of `to_token`.
|
||||
"""
|
||||
user_id = user.to_string()
|
||||
|
||||
# TODO: Apply filters
|
||||
#
|
||||
# TODO: Exclude partially stated rooms unless the `required_state` has
|
||||
# `["m.room.member", "$LAZY"]`
|
||||
|
||||
filtered_room_id_set = set(sync_room_map.keys())
|
||||
|
||||
# Filter for Direct-Message (DM) rooms
|
||||
if filters.is_dm is not None:
|
||||
# We're using global account data (`m.direct`) instead of checking for
|
||||
# `is_direct` on membership events because that property only appears for
|
||||
# the invitee membership event (doesn't show up for the inviter). Account
|
||||
# data is set by the client so it needs to be scrutinized.
|
||||
#
|
||||
# We're unable to take `to_token` into account for global account data since
|
||||
# we only keep track of the latest account data for the user.
|
||||
dm_map = await self.store.get_global_account_data_by_type_for_user(
|
||||
user_id, AccountDataTypes.DIRECT
|
||||
)
|
||||
|
||||
# Flatten out the map
|
||||
dm_room_id_set = set()
|
||||
if isinstance(dm_map, dict):
|
||||
for room_ids in dm_map.values():
|
||||
# Account data should be a list of room IDs. Ignore anything else
|
||||
if isinstance(room_ids, list):
|
||||
for room_id in room_ids:
|
||||
if isinstance(room_id, str):
|
||||
dm_room_id_set.add(room_id)
|
||||
|
||||
if filters.is_dm:
|
||||
# Only DM rooms please
|
||||
filtered_room_id_set = filtered_room_id_set.intersection(dm_room_id_set)
|
||||
else:
|
||||
# Only non-DM rooms please
|
||||
filtered_room_id_set = filtered_room_id_set.difference(dm_room_id_set)
|
||||
|
||||
if filters.spaces:
|
||||
raise NotImplementedError()
|
||||
|
||||
# Filter for encrypted rooms
|
||||
if filters.is_encrypted is not None:
|
||||
# Make a copy so we don't run into an error: `Set changed size during
|
||||
# iteration`, when we filter out and remove items
|
||||
for room_id in list(filtered_room_id_set):
|
||||
state_at_to_token = await self.storage_controllers.state.get_state_at(
|
||||
room_id,
|
||||
to_token,
|
||||
state_filter=StateFilter.from_types(
|
||||
[(EventTypes.RoomEncryption, "")]
|
||||
),
|
||||
)
|
||||
is_encrypted = state_at_to_token.get((EventTypes.RoomEncryption, ""))
|
||||
|
||||
# If we're looking for encrypted rooms, filter out rooms that are not
|
||||
# encrypted and vice versa
|
||||
if (filters.is_encrypted and not is_encrypted) or (
|
||||
not filters.is_encrypted and is_encrypted
|
||||
):
|
||||
filtered_room_id_set.remove(room_id)
|
||||
|
||||
# Filter for rooms that the user has been invited to
|
||||
if filters.is_invite is not None:
|
||||
# Make a copy so we don't run into an error: `Set changed size during
|
||||
# iteration`, when we filter out and remove items
|
||||
for room_id in list(filtered_room_id_set):
|
||||
room_for_user = sync_room_map[room_id]
|
||||
# If we're looking for invite rooms, filter out rooms that the user is
|
||||
# not invited to and vice versa
|
||||
if (
|
||||
filters.is_invite and room_for_user.membership != Membership.INVITE
|
||||
) or (
|
||||
not filters.is_invite
|
||||
and room_for_user.membership == Membership.INVITE
|
||||
):
|
||||
filtered_room_id_set.remove(room_id)
|
||||
|
||||
if filters.room_types:
|
||||
raise NotImplementedError()
|
||||
|
||||
if filters.not_room_types:
|
||||
raise NotImplementedError()
|
||||
|
||||
if filters.room_name_like:
|
||||
raise NotImplementedError()
|
||||
|
||||
if filters.tags:
|
||||
raise NotImplementedError()
|
||||
|
||||
if filters.not_tags:
|
||||
raise NotImplementedError()
|
||||
|
||||
# Assemble a new sync room map but only with the `filtered_room_id_set`
|
||||
return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set}
|
||||
|
||||
async def sort_rooms(
|
||||
self,
|
||||
sync_room_map: Dict[str, RoomsForUser],
|
||||
to_token: StreamToken,
|
||||
) -> List[Tuple[str, RoomsForUser]]:
|
||||
"""
|
||||
Sort by `stream_ordering` of the last event that the user should see in the
|
||||
room. `stream_ordering` is unique so we get a stable sort.
|
||||
|
||||
Args:
|
||||
sync_room_map: Dictionary of room IDs to sort along with membership
|
||||
information in the room at the time of `to_token`.
|
||||
to_token: We sort based on the events in the room at this token (<= `to_token`)
|
||||
|
||||
Returns:
|
||||
A sorted list of room IDs by `stream_ordering` along with membership information.
|
||||
"""
|
||||
|
||||
# Assemble a map of room ID to the `stream_ordering` of the last activity that the
|
||||
# user should see in the room (<= `to_token`)
|
||||
last_activity_in_room_map: Dict[str, int] = {}
|
||||
for room_id, room_for_user in sync_room_map.items():
|
||||
# If they are fully-joined to the room, let's find the latest activity
|
||||
# at/before the `to_token`.
|
||||
if room_for_user.membership == Membership.JOIN:
|
||||
last_event_result = (
|
||||
await self.store.get_last_event_pos_in_room_before_stream_ordering(
|
||||
room_id, to_token.room_key
|
||||
)
|
||||
)
|
||||
|
||||
# If the room has no events at/before the `to_token`, this is probably a
|
||||
# mistake in the code that generates the `sync_room_map` since that should
|
||||
# only give us rooms that the user had membership in during the token range.
|
||||
assert last_event_result is not None
|
||||
|
||||
_, event_pos = last_event_result
|
||||
|
||||
last_activity_in_room_map[room_id] = event_pos.stream
|
||||
else:
|
||||
# Otherwise, if the user has left/been invited/knocked/been banned from
|
||||
# a room, they shouldn't see anything past that point.
|
||||
last_activity_in_room_map[room_id] = room_for_user.event_pos.stream
|
||||
|
||||
return sorted(
|
||||
sync_room_map.items(),
|
||||
# Sort by the last activity (stream_ordering) in the room
|
||||
key=lambda room_info: last_activity_in_room_map[room_info[0]],
|
||||
# We want descending order
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
@@ -844,6 +844,7 @@ class SyncHandler:
|
||||
sync_config.user.to_string(),
|
||||
recents,
|
||||
always_include_ids=current_state_ids,
|
||||
msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
log_kv({"recents_after_visibility_filtering": len(recents)})
|
||||
else:
|
||||
@@ -929,6 +930,7 @@ class SyncHandler:
|
||||
sync_config.user.to_string(),
|
||||
loaded_recents,
|
||||
always_include_ids=current_state_ids,
|
||||
msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
loaded_recents = []
|
||||
@@ -979,6 +981,89 @@ class SyncHandler:
|
||||
bundled_aggregations=bundled_aggregations,
|
||||
)
|
||||
|
||||
async def get_state_after_event(
|
||||
self,
|
||||
event_id: str,
|
||||
state_filter: Optional[StateFilter] = None,
|
||||
await_full_state: bool = True,
|
||||
) -> StateMap[str]:
|
||||
"""
|
||||
Get the room state after the given event
|
||||
|
||||
Args:
|
||||
event_id: event of interest
|
||||
state_filter: The state filter used to fetch state from the database.
|
||||
await_full_state: if `True`, will block if we do not yet have complete state
|
||||
at the event and `state_filter` is not satisfied by partial state.
|
||||
Defaults to `True`.
|
||||
"""
|
||||
state_ids = await self._state_storage_controller.get_state_ids_for_event(
|
||||
event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
# using get_metadata_for_events here (instead of get_event) sidesteps an issue
|
||||
# with redactions: if `event_id` is a redaction event, and we don't have the
|
||||
# original (possibly because it got purged), get_event will refuse to return
|
||||
# the redaction event, which isn't terribly helpful here.
|
||||
#
|
||||
# (To be fair, in that case we could assume it's *not* a state event, and
|
||||
# therefore we don't need to worry about it. But still, it seems cleaner just
|
||||
# to pull the metadata.)
|
||||
m = (await self.store.get_metadata_for_events([event_id]))[event_id]
|
||||
if m.state_key is not None and m.rejection_reason is None:
|
||||
state_ids = dict(state_ids)
|
||||
state_ids[(m.event_type, m.state_key)] = event_id
|
||||
|
||||
return state_ids
|
||||
|
||||
async def get_state_at(
|
||||
self,
|
||||
room_id: str,
|
||||
stream_position: StreamToken,
|
||||
state_filter: Optional[StateFilter] = None,
|
||||
await_full_state: bool = True,
|
||||
) -> StateMap[str]:
|
||||
"""Get the room state at a particular stream position
|
||||
|
||||
Args:
|
||||
room_id: room for which to get state
|
||||
stream_position: point at which to get state
|
||||
state_filter: The state filter used to fetch state from the database.
|
||||
await_full_state: if `True`, will block if we do not yet have complete state
|
||||
at the last event in the room before `stream_position` and
|
||||
`state_filter` is not satisfied by partial state. Defaults to `True`.
|
||||
"""
|
||||
# FIXME: This gets the state at the latest event before the stream ordering,
|
||||
# which might not be the same as the "current state" of the room at the time
|
||||
# of the stream token if there were multiple forward extremities at the time.
|
||||
last_event_id = await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=stream_position.room_key,
|
||||
)
|
||||
|
||||
if last_event_id:
|
||||
state = await self.get_state_after_event(
|
||||
last_event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
else:
|
||||
# no events in this room - so presumably no state
|
||||
state = {}
|
||||
|
||||
# (erikj) This should be rarely hit, but we've had some reports that
|
||||
# we get more state down gappy syncs than we should, so let's add
|
||||
# some logging.
|
||||
logger.info(
|
||||
"Failed to find any events in room %s at %s",
|
||||
room_id,
|
||||
stream_position.room_key,
|
||||
)
|
||||
return state
|
||||
|
||||
async def compute_summary(
|
||||
self,
|
||||
room_id: str,
|
||||
@@ -1352,7 +1437,7 @@ class SyncHandler:
|
||||
await_full_state = True
|
||||
lazy_load_members = False
|
||||
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_at(
|
||||
state_at_timeline_end = await self.get_state_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
@@ -1436,7 +1521,7 @@ class SyncHandler:
|
||||
# We need to make sure the first event in our batch points to the
|
||||
# last event in the previous batch.
|
||||
last_event_id_prev_batch = (
|
||||
await self.store.get_last_event_id_in_room_before_stream_ordering(
|
||||
await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=since_token.room_key,
|
||||
)
|
||||
@@ -1480,7 +1565,7 @@ class SyncHandler:
|
||||
else:
|
||||
# We can get here if the user has ignored the senders of all
|
||||
# the recent events.
|
||||
state_at_timeline_start = await self._state_storage_controller.get_state_at(
|
||||
state_at_timeline_start = await self.get_state_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
@@ -1502,14 +1587,14 @@ class SyncHandler:
|
||||
# about them).
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
state_at_previous_sync = await self._state_storage_controller.get_state_at(
|
||||
state_at_previous_sync = await self.get_state_at(
|
||||
room_id,
|
||||
stream_position=since_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_at(
|
||||
state_at_timeline_end = await self.get_state_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
@@ -2508,7 +2593,7 @@ class SyncHandler:
|
||||
continue
|
||||
|
||||
if room_id in sync_result_builder.joined_room_ids or has_join:
|
||||
old_state_ids = await self._state_storage_controller.get_state_at(
|
||||
old_state_ids = await self.get_state_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types([(EventTypes.Member, user_id)]),
|
||||
@@ -2538,14 +2623,12 @@ class SyncHandler:
|
||||
newly_left_rooms.append(room_id)
|
||||
else:
|
||||
if not old_state_ids:
|
||||
old_state_ids = (
|
||||
await self._state_storage_controller.get_state_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types(
|
||||
[(EventTypes.Member, user_id)]
|
||||
),
|
||||
)
|
||||
old_state_ids = await self.get_state_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types(
|
||||
[(EventTypes.Member, user_id)]
|
||||
),
|
||||
)
|
||||
old_mem_ev_id = old_state_ids.get(
|
||||
(EventTypes.Member, user_id), None
|
||||
|
||||
@@ -119,15 +119,14 @@ def parse_integer(
|
||||
default: value to use if the parameter is absent, defaults to None.
|
||||
required: whether to raise a 400 SynapseError if the parameter is absent,
|
||||
defaults to False.
|
||||
negative: whether to allow negative integers, defaults to False (disallowing
|
||||
negatives).
|
||||
negative: whether to allow negative integers, defaults to True.
|
||||
Returns:
|
||||
An int value or the default.
|
||||
|
||||
Raises:
|
||||
SynapseError: if the parameter is absent and required, if the
|
||||
parameter is present and not an integer, or if the
|
||||
parameter is illegitimately negative.
|
||||
parameter is illegitimate negative.
|
||||
"""
|
||||
args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore
|
||||
return parse_integer_from_args(args, name, default, required, negative)
|
||||
@@ -165,7 +164,7 @@ def parse_integer_from_args(
|
||||
name: str,
|
||||
default: Optional[int] = None,
|
||||
required: bool = False,
|
||||
negative: bool = False,
|
||||
negative: bool = True,
|
||||
) -> Optional[int]:
|
||||
"""Parse an integer parameter from the request string
|
||||
|
||||
@@ -175,8 +174,7 @@ def parse_integer_from_args(
|
||||
default: value to use if the parameter is absent, defaults to None.
|
||||
required: whether to raise a 400 SynapseError if the parameter is absent,
|
||||
defaults to False.
|
||||
negative: whether to allow negative integers, defaults to False (disallowing
|
||||
negatives).
|
||||
negative: whether to allow negative integers, defaults to True.
|
||||
|
||||
Returns:
|
||||
An int value or the default.
|
||||
@@ -184,7 +182,7 @@ def parse_integer_from_args(
|
||||
Raises:
|
||||
SynapseError: if the parameter is absent and required, if the
|
||||
parameter is present and not an integer, or if the
|
||||
parameter is illegitimately negative.
|
||||
parameter is illegitimate negative.
|
||||
"""
|
||||
name_bytes = name.encode("ascii")
|
||||
|
||||
|
||||
@@ -46,10 +46,10 @@ from synapse.api.errors import Codes, cs_error
|
||||
from synapse.http.server import finish_request, respond_with_json
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import is_ascii
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.media.media_storage import MultipartResponder
|
||||
from synapse.storage.databases.main.media_repository import LocalMedia
|
||||
|
||||
|
||||
@@ -275,19 +275,16 @@ def _can_encode_filename_as_token(x: str) -> bool:
|
||||
|
||||
|
||||
async def respond_with_multipart_responder(
|
||||
clock: Clock,
|
||||
request: SynapseRequest,
|
||||
responder: "Optional[Responder]",
|
||||
responder: "Optional[MultipartResponder]",
|
||||
media_info: "LocalMedia",
|
||||
) -> None:
|
||||
"""
|
||||
Responds to requests originating from the federation media `/download` endpoint by
|
||||
streaming a multipart/mixed response
|
||||
Responds via a Multipart responder for the federation media `/download` requests
|
||||
|
||||
Args:
|
||||
clock:
|
||||
request: the federation request to respond to
|
||||
responder: the responder which will send the response
|
||||
responder: the Multipart responder which will send the response
|
||||
media_info: metadata about the media item
|
||||
"""
|
||||
if not responder:
|
||||
@@ -302,27 +299,15 @@ async def respond_with_multipart_responder(
|
||||
)
|
||||
return
|
||||
|
||||
from synapse.media.media_storage import MultipartFileConsumer
|
||||
|
||||
# note that currently the json_object is just {}, this will change when linked media
|
||||
# is implemented
|
||||
multipart_consumer = MultipartFileConsumer(
|
||||
clock, request, media_info.media_type, {}, media_info.media_length
|
||||
)
|
||||
|
||||
logger.debug("Responding to media request with responder %s", responder)
|
||||
if media_info.media_length is not None:
|
||||
content_length = multipart_consumer.content_length()
|
||||
assert content_length is not None
|
||||
request.setHeader(b"Content-Length", b"%d" % (content_length,))
|
||||
|
||||
request.setHeader(b"Content-Length", b"%d" % (media_info.media_length,))
|
||||
request.setHeader(
|
||||
b"Content-Type",
|
||||
b"multipart/mixed; boundary=%s" % multipart_consumer.boundary,
|
||||
b"Content-Type", b"multipart/mixed; boundary=%s" % responder.boundary
|
||||
)
|
||||
|
||||
try:
|
||||
await responder.write_to_consumer(multipart_consumer)
|
||||
await responder.write_to_consumer(request)
|
||||
except Exception as e:
|
||||
# The majority of the time this will be due to the client having gone
|
||||
# away. Unfortunately, Twisted simply throws a generic exception at us
|
||||
|
||||
@@ -58,7 +58,7 @@ from synapse.media._base import (
|
||||
respond_with_responder,
|
||||
)
|
||||
from synapse.media.filepath import MediaFilePaths
|
||||
from synapse.media.media_storage import MediaStorage
|
||||
from synapse.media.media_storage import MediaStorage, MultipartResponder
|
||||
from synapse.media.storage_provider import StorageProviderWrapper
|
||||
from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
|
||||
from synapse.media.url_previewer import UrlPreviewer
|
||||
@@ -462,11 +462,13 @@ class MediaRepository:
|
||||
|
||||
file_info = FileInfo(None, media_id, url_cache=bool(url_cache))
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
responder = await self.media_storage.fetch_media(
|
||||
file_info, media_info, federation
|
||||
)
|
||||
if federation:
|
||||
await respond_with_multipart_responder(
|
||||
self.clock, request, responder, media_info
|
||||
)
|
||||
# this really should be a Multipart responder but just in case
|
||||
assert isinstance(responder, MultipartResponder)
|
||||
await respond_with_multipart_responder(request, responder, media_info)
|
||||
else:
|
||||
await respond_with_responder(
|
||||
request, responder, media_type, media_length, upload_name
|
||||
|
||||
@@ -39,34 +39,30 @@ from typing import (
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
from uuid import uuid4
|
||||
|
||||
import attr
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import interfaces
|
||||
from twisted.internet import defer, interfaces
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.interfaces import IConsumer
|
||||
from twisted.protocols.basic import FileSender
|
||||
|
||||
from synapse.api.errors import NotFoundError
|
||||
from synapse.logging.context import (
|
||||
defer_to_thread,
|
||||
make_deferred_yieldable,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.logging.context import defer_to_thread, make_deferred_yieldable
|
||||
from synapse.logging.opentracing import start_active_span, trace, trace_with_opname
|
||||
from synapse.util import Clock
|
||||
from synapse.util.file_consumer import BackgroundFileConsumer
|
||||
|
||||
from ..storage.databases.main.media_repository import LocalMedia
|
||||
from ..types import JsonDict
|
||||
from ._base import FileInfo, Responder
|
||||
from .filepath import MediaFilePaths
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.media.storage_provider import StorageProvider
|
||||
from synapse.media.storage_provider import StorageProviderWrapper
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -89,7 +85,7 @@ class MediaStorage:
|
||||
hs: "HomeServer",
|
||||
local_media_directory: str,
|
||||
filepaths: MediaFilePaths,
|
||||
storage_providers: Sequence["StorageProvider"],
|
||||
storage_providers: Sequence["StorageProviderWrapper"],
|
||||
):
|
||||
self.hs = hs
|
||||
self.reactor = hs.get_reactor()
|
||||
@@ -185,15 +181,23 @@ class MediaStorage:
|
||||
|
||||
raise e from None
|
||||
|
||||
async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]:
|
||||
async def fetch_media(
|
||||
self,
|
||||
file_info: FileInfo,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
federation: bool = False,
|
||||
) -> Optional[Responder]:
|
||||
"""Attempts to fetch media described by file_info from the local cache
|
||||
and configured storage providers.
|
||||
|
||||
Args:
|
||||
file_info: Metadata about the media file
|
||||
media_info: Metadata about the media item
|
||||
federation: Whether this file is being fetched for a federation request
|
||||
|
||||
Returns:
|
||||
Returns a Responder if the file was found, otherwise None.
|
||||
If the file was found returns a Responder (a Multipart Responder if the requested
|
||||
file is for the federation /download endpoint), otherwise None.
|
||||
"""
|
||||
paths = [self._file_info_to_path(file_info)]
|
||||
|
||||
@@ -213,12 +217,19 @@ class MediaStorage:
|
||||
local_path = os.path.join(self.local_media_directory, path)
|
||||
if os.path.exists(local_path):
|
||||
logger.debug("responding with local file %s", local_path)
|
||||
return FileResponder(open(local_path, "rb"))
|
||||
if federation:
|
||||
assert media_info is not None
|
||||
boundary = uuid4().hex.encode("ascii")
|
||||
return MultipartResponder(
|
||||
open(local_path, "rb"), media_info, boundary
|
||||
)
|
||||
else:
|
||||
return FileResponder(open(local_path, "rb"))
|
||||
logger.debug("local file %s did not exist", local_path)
|
||||
|
||||
for provider in self.storage_providers:
|
||||
for path in paths:
|
||||
res: Any = await provider.fetch(path, file_info)
|
||||
res: Any = await provider.fetch(path, file_info, media_info, federation)
|
||||
if res:
|
||||
logger.debug("Streaming %s from %s", path, provider)
|
||||
return res
|
||||
@@ -353,6 +364,38 @@ class FileResponder(Responder):
|
||||
self.open_file.close()
|
||||
|
||||
|
||||
class MultipartResponder(Responder):
|
||||
"""Wraps an open file, formats the response according to MSC3916 and sends it to a
|
||||
federation request.
|
||||
|
||||
Args:
|
||||
open_file: A file like object to be streamed to the client,
|
||||
is closed when finished streaming.
|
||||
media_info: metadata about the media item
|
||||
boundary: bytes to use for the multipart response boundary
|
||||
"""
|
||||
|
||||
def __init__(self, open_file: IO, media_info: LocalMedia, boundary: bytes) -> None:
|
||||
self.open_file = open_file
|
||||
self.media_info = media_info
|
||||
self.boundary = boundary
|
||||
|
||||
def write_to_consumer(self, consumer: IConsumer) -> Deferred:
|
||||
return make_deferred_yieldable(
|
||||
MultipartFileSender().beginFileTransfer(
|
||||
self.open_file, consumer, self.media_info.media_type, {}, self.boundary
|
||||
)
|
||||
)
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[Type[BaseException]],
|
||||
exc_val: Optional[BaseException],
|
||||
exc_tb: Optional[TracebackType],
|
||||
) -> None:
|
||||
self.open_file.close()
|
||||
|
||||
|
||||
class SpamMediaException(NotFoundError):
|
||||
"""The media was blocked by a spam checker, so we simply 404 the request (in
|
||||
the same way as if it was quarantined).
|
||||
@@ -388,194 +431,105 @@ class ReadableFileWrapper:
|
||||
await self.clock.sleep(0)
|
||||
|
||||
|
||||
@implementer(interfaces.IConsumer)
|
||||
@implementer(interfaces.IPushProducer)
|
||||
class MultipartFileConsumer:
|
||||
"""Wraps a given consumer so that any data that gets written to it gets
|
||||
converted to a multipart format.
|
||||
@implementer(interfaces.IProducer)
|
||||
class MultipartFileSender:
|
||||
"""
|
||||
A producer that sends the contents of a file to a federation request in the format
|
||||
outlined in MSC3916 - a multipart/format-data response where the first field is a
|
||||
JSON object and the second is the requested file.
|
||||
|
||||
This is a slight re-writing of twisted.protocols.basic.FileSender to achieve the format
|
||||
outlined above.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
CHUNK_SIZE = 2**14
|
||||
|
||||
lastSent = ""
|
||||
deferred: Optional[defer.Deferred] = None
|
||||
|
||||
def beginFileTransfer(
|
||||
self,
|
||||
clock: Clock,
|
||||
wrapped_consumer: interfaces.IConsumer,
|
||||
file: IO,
|
||||
consumer: IConsumer,
|
||||
file_content_type: str,
|
||||
json_object: JsonDict,
|
||||
content_length: Optional[int] = None,
|
||||
) -> None:
|
||||
self.clock = clock
|
||||
self.wrapped_consumer = wrapped_consumer
|
||||
boundary: bytes,
|
||||
) -> Deferred:
|
||||
"""
|
||||
Begin transferring a file
|
||||
|
||||
Args:
|
||||
file: The file object to read data from
|
||||
consumer: The synapse request to write the data to
|
||||
file_content_type: The content-type of the file
|
||||
json_object: The JSON object to write to the first field of the response
|
||||
boundary: bytes to be used as the multipart/form-data boundary
|
||||
|
||||
Returns: A deferred whose callback will be invoked when the file has
|
||||
been completely written to the consumer. The last byte written to the
|
||||
consumer is passed to the callback.
|
||||
"""
|
||||
self.file: Optional[IO] = file
|
||||
self.consumer = consumer
|
||||
self.json_field = json_object
|
||||
self.json_field_written = False
|
||||
self.content_type_written = False
|
||||
self.file_content_type = file_content_type
|
||||
self.boundary = uuid4().hex.encode("ascii")
|
||||
self.boundary = boundary
|
||||
self.deferred: Deferred = defer.Deferred()
|
||||
self.consumer.registerProducer(self, False)
|
||||
# while it's not entirely clear why this assignment is necessary, it mirrors
|
||||
# the behavior in FileSender.beginFileTransfer and thus is preserved here
|
||||
deferred = self.deferred
|
||||
return deferred
|
||||
|
||||
# The producer that registered with us, and if it's a push or pull
|
||||
# producer.
|
||||
self.producer: Optional["interfaces.IProducer"] = None
|
||||
self.streaming: Optional[bool] = None
|
||||
|
||||
# Whether the wrapped consumer has asked us to pause.
|
||||
self.paused = False
|
||||
|
||||
self.length = content_length
|
||||
|
||||
### IConsumer APIs ###
|
||||
|
||||
def registerProducer(
|
||||
self, producer: "interfaces.IProducer", streaming: bool
|
||||
) -> None:
|
||||
"""
|
||||
Register to receive data from a producer.
|
||||
|
||||
This sets self to be a consumer for a producer. When this object runs
|
||||
out of data (as when a send(2) call on a socket succeeds in moving the
|
||||
last data from a userspace buffer into a kernelspace buffer), it will
|
||||
ask the producer to resumeProducing().
|
||||
|
||||
For L{IPullProducer} providers, C{resumeProducing} will be called once
|
||||
each time data is required.
|
||||
|
||||
For L{IPushProducer} providers, C{pauseProducing} will be called
|
||||
whenever the write buffer fills up and C{resumeProducing} will only be
|
||||
called when it empties. The consumer will only call C{resumeProducing}
|
||||
to balance a previous C{pauseProducing} call; the producer is assumed
|
||||
to start in an un-paused state.
|
||||
|
||||
@param streaming: C{True} if C{producer} provides L{IPushProducer},
|
||||
C{False} if C{producer} provides L{IPullProducer}.
|
||||
|
||||
@raise RuntimeError: If a producer is already registered.
|
||||
"""
|
||||
self.producer = producer
|
||||
self.streaming = streaming
|
||||
|
||||
self.wrapped_consumer.registerProducer(self, True)
|
||||
|
||||
# kick off producing if `self.producer` is not a streaming producer
|
||||
if not streaming:
|
||||
self.resumeProducing()
|
||||
|
||||
def unregisterProducer(self) -> None:
|
||||
"""
|
||||
Stop consuming data from a producer, without disconnecting.
|
||||
"""
|
||||
self.wrapped_consumer.write(CRLF + b"--" + self.boundary + b"--" + CRLF)
|
||||
self.wrapped_consumer.unregisterProducer()
|
||||
self.paused = True
|
||||
|
||||
def write(self, data: bytes) -> None:
|
||||
"""
|
||||
The producer will write data by calling this method.
|
||||
|
||||
The implementation must be non-blocking and perform whatever
|
||||
buffering is necessary. If the producer has provided enough data
|
||||
for now and it is a L{IPushProducer}, the consumer may call its
|
||||
C{pauseProducing} method.
|
||||
"""
|
||||
def resumeProducing(self) -> None:
|
||||
# write the first field, which will always be a json field
|
||||
if not self.json_field_written:
|
||||
self.wrapped_consumer.write(CRLF + b"--" + self.boundary + CRLF)
|
||||
self.consumer.write(CRLF + b"--" + self.boundary + CRLF)
|
||||
|
||||
content_type = Header(b"Content-Type", b"application/json")
|
||||
self.wrapped_consumer.write(bytes(content_type) + CRLF)
|
||||
self.consumer.write(bytes(content_type) + CRLF)
|
||||
|
||||
json_field = json.dumps(self.json_field)
|
||||
json_bytes = json_field.encode("utf-8")
|
||||
self.wrapped_consumer.write(CRLF + json_bytes)
|
||||
self.wrapped_consumer.write(CRLF + b"--" + self.boundary + CRLF)
|
||||
self.consumer.write(json_bytes)
|
||||
self.consumer.write(CRLF + b"--" + self.boundary + CRLF)
|
||||
|
||||
self.json_field_written = True
|
||||
|
||||
# if we haven't written the content type yet, do so
|
||||
if not self.content_type_written:
|
||||
type = self.file_content_type.encode("utf-8")
|
||||
content_type = Header(b"Content-Type", type)
|
||||
self.wrapped_consumer.write(bytes(content_type) + CRLF + CRLF)
|
||||
self.content_type_written = True
|
||||
chunk: Any = ""
|
||||
if self.file:
|
||||
# if we haven't written the content type yet, do so
|
||||
if not self.content_type_written:
|
||||
type = self.file_content_type.encode("utf-8")
|
||||
content_type = Header(b"Content-Type", type)
|
||||
self.consumer.write(bytes(content_type) + CRLF)
|
||||
self.content_type_written = True
|
||||
|
||||
self.wrapped_consumer.write(data)
|
||||
chunk = self.file.read(self.CHUNK_SIZE)
|
||||
|
||||
### IPushProducer APIs ###
|
||||
if not chunk:
|
||||
# we've reached the end of the file
|
||||
self.consumer.write(CRLF + b"--" + self.boundary + b"--" + CRLF)
|
||||
self.file = None
|
||||
self.consumer.unregisterProducer()
|
||||
|
||||
def stopProducing(self) -> None:
|
||||
"""
|
||||
Stop producing data.
|
||||
if self.deferred:
|
||||
self.deferred.callback(self.lastSent)
|
||||
self.deferred = None
|
||||
return
|
||||
|
||||
This tells a producer that its consumer has died, so it must stop
|
||||
producing data for good.
|
||||
"""
|
||||
assert self.producer is not None
|
||||
|
||||
self.paused = True
|
||||
self.producer.stopProducing()
|
||||
self.consumer.write(chunk)
|
||||
self.lastSent = chunk[-1:]
|
||||
|
||||
def pauseProducing(self) -> None:
|
||||
"""
|
||||
Pause producing data.
|
||||
pass
|
||||
|
||||
Tells a producer that it has produced too much data to process for
|
||||
the time being, and to stop until C{resumeProducing()} is called.
|
||||
"""
|
||||
assert self.producer is not None
|
||||
|
||||
self.paused = True
|
||||
|
||||
if self.streaming:
|
||||
cast("interfaces.IPushProducer", self.producer).pauseProducing()
|
||||
else:
|
||||
self.paused = True
|
||||
|
||||
def resumeProducing(self) -> None:
|
||||
"""
|
||||
Resume producing data.
|
||||
|
||||
This tells a producer to re-add itself to the main loop and produce
|
||||
more data for its consumer.
|
||||
"""
|
||||
assert self.producer is not None
|
||||
|
||||
if self.streaming:
|
||||
cast("interfaces.IPushProducer", self.producer).resumeProducing()
|
||||
else:
|
||||
# If the producer is not a streaming producer we need to start
|
||||
# repeatedly calling `resumeProducing` in a loop.
|
||||
run_in_background(self._resumeProducingRepeatedly)
|
||||
|
||||
def content_length(self) -> Optional[int]:
|
||||
"""
|
||||
Calculate the content length of the multipart response
|
||||
in bytes.
|
||||
"""
|
||||
if not self.length:
|
||||
return None
|
||||
# calculate length of json field and content-type header
|
||||
json_field = json.dumps(self.json_field)
|
||||
json_bytes = json_field.encode("utf-8")
|
||||
json_length = len(json_bytes)
|
||||
|
||||
type = self.file_content_type.encode("utf-8")
|
||||
content_type = Header(b"Content-Type", type)
|
||||
type_length = len(bytes(content_type))
|
||||
|
||||
# 154 is the length of the elements that aren't variable, ie
|
||||
# CRLFs and boundary strings, etc
|
||||
self.length += json_length + type_length + 154
|
||||
|
||||
return self.length
|
||||
|
||||
### Internal APIs. ###
|
||||
|
||||
async def _resumeProducingRepeatedly(self) -> None:
|
||||
assert self.producer is not None
|
||||
assert not self.streaming
|
||||
|
||||
producer = cast("interfaces.IPullProducer", self.producer)
|
||||
|
||||
self.paused = False
|
||||
while not self.paused:
|
||||
producer.resumeProducing()
|
||||
await self.clock.sleep(0)
|
||||
def stopProducing(self) -> None:
|
||||
if self.deferred:
|
||||
self.deferred.errback(Exception("Consumer asked us to stop producing"))
|
||||
self.deferred = None
|
||||
|
||||
|
||||
class Header:
|
||||
|
||||
@@ -24,14 +24,16 @@ import logging
|
||||
import os
|
||||
import shutil
|
||||
from typing import TYPE_CHECKING, Callable, Optional
|
||||
from uuid import uuid4
|
||||
|
||||
from synapse.config._base import Config
|
||||
from synapse.logging.context import defer_to_thread, run_in_background
|
||||
from synapse.logging.opentracing import start_active_span, trace_with_opname
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
from ..storage.databases.main.media_repository import LocalMedia
|
||||
from ._base import FileInfo, Responder
|
||||
from .media_storage import FileResponder
|
||||
from .media_storage import FileResponder, MultipartResponder
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -55,13 +57,21 @@ class StorageProvider(metaclass=abc.ABCMeta):
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
|
||||
async def fetch(
|
||||
self,
|
||||
path: str,
|
||||
file_info: FileInfo,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
federation: bool = False,
|
||||
) -> Optional[Responder]:
|
||||
"""Attempt to fetch the file described by file_info and stream it
|
||||
into writer.
|
||||
|
||||
Args:
|
||||
path: Relative path of file in local cache
|
||||
file_info: The metadata of the file.
|
||||
media_info: metadata of the media item
|
||||
federation: Whether the requested media is for a federation request
|
||||
|
||||
Returns:
|
||||
Returns a Responder if the provider has the file, otherwise returns None.
|
||||
@@ -124,7 +134,13 @@ class StorageProviderWrapper(StorageProvider):
|
||||
run_in_background(store)
|
||||
|
||||
@trace_with_opname("StorageProviderWrapper.fetch")
|
||||
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
|
||||
async def fetch(
|
||||
self,
|
||||
path: str,
|
||||
file_info: FileInfo,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
federation: bool = False,
|
||||
) -> Optional[Responder]:
|
||||
if file_info.url_cache:
|
||||
# Files in the URL preview cache definitely aren't stored here,
|
||||
# so avoid any potentially slow I/O or network access.
|
||||
@@ -132,7 +148,9 @@ class StorageProviderWrapper(StorageProvider):
|
||||
|
||||
# store_file is supposed to return an Awaitable, but guard
|
||||
# against improper implementations.
|
||||
return await maybe_awaitable(self.backend.fetch(path, file_info))
|
||||
return await maybe_awaitable(
|
||||
self.backend.fetch(path, file_info, media_info, federation)
|
||||
)
|
||||
|
||||
|
||||
class FileStorageProviderBackend(StorageProvider):
|
||||
@@ -172,11 +190,23 @@ class FileStorageProviderBackend(StorageProvider):
|
||||
)
|
||||
|
||||
@trace_with_opname("FileStorageProviderBackend.fetch")
|
||||
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
|
||||
async def fetch(
|
||||
self,
|
||||
path: str,
|
||||
file_info: FileInfo,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
federation: bool = False,
|
||||
) -> Optional[Responder]:
|
||||
"""See StorageProvider.fetch"""
|
||||
|
||||
backup_fname = os.path.join(self.base_directory, path)
|
||||
if os.path.isfile(backup_fname):
|
||||
if federation:
|
||||
assert media_info is not None
|
||||
boundary = uuid4().hex.encode("ascii")
|
||||
return MultipartResponder(
|
||||
open(backup_fname, "rb"), media_info, boundary
|
||||
)
|
||||
return FileResponder(open(backup_fname, "rb"))
|
||||
|
||||
return None
|
||||
|
||||
@@ -721,6 +721,7 @@ class Notifier:
|
||||
user.to_string(),
|
||||
new_events,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
elif keyname == StreamKeyType.PRESENCE:
|
||||
now = self.clock.time_msec()
|
||||
|
||||
@@ -28,7 +28,7 @@ import jinja2
|
||||
from markupsafe import Markup
|
||||
from prometheus_client import Counter
|
||||
|
||||
from synapse.api.constants import EventContentFields, EventTypes, Membership, RoomTypes
|
||||
from synapse.api.constants import EventTypes, Membership, RoomTypes
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.config.emailconfig import EmailSubjectConfig
|
||||
from synapse.events import EventBase
|
||||
@@ -532,6 +532,7 @@ class Mailer:
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
results.events_before,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
the_events.append(notif_event)
|
||||
|
||||
@@ -716,8 +717,7 @@ class Mailer:
|
||||
)
|
||||
if (
|
||||
create_event
|
||||
and create_event.content.get(EventContentFields.ROOM_TYPE)
|
||||
== RoomTypes.SPACE
|
||||
and create_event.content.get("room_type") == RoomTypes.SPACE
|
||||
):
|
||||
return self.email_subjects.invite_from_person_to_space % {
|
||||
"person": inviter_name,
|
||||
|
||||
@@ -114,19 +114,13 @@ class ReplicationDataHandler:
|
||||
"""
|
||||
all_room_ids: Set[str] = set()
|
||||
if stream_name == DeviceListsStream.NAME:
|
||||
if any(not row.is_signature and not row.hosts_calculated for row in rows):
|
||||
if any(row.entity.startswith("@") and not row.is_signature for row in rows):
|
||||
prev_token = self.store.get_device_stream_token()
|
||||
all_room_ids = await self.store.get_all_device_list_changes(
|
||||
prev_token, token
|
||||
)
|
||||
self.store.device_lists_in_rooms_have_changed(all_room_ids, token)
|
||||
|
||||
# If we're sending federation we need to update the device lists
|
||||
# outbound pokes stream change cache with updated hosts.
|
||||
if self.send_handler and any(row.hosts_calculated for row in rows):
|
||||
hosts = await self.store.get_destinations_for_device(token)
|
||||
self.store.device_lists_outbound_pokes_have_changed(hosts, token)
|
||||
|
||||
self.store.process_replication_rows(stream_name, instance_name, token, rows)
|
||||
# NOTE: this must be called after process_replication_rows to ensure any
|
||||
# cache invalidations are first handled before any stream ID advances.
|
||||
@@ -439,11 +433,12 @@ class FederationSenderHandler:
|
||||
# The entities are either user IDs (starting with '@') whose devices
|
||||
# have changed, or remote servers that we need to tell about
|
||||
# changes.
|
||||
if any(row.hosts_calculated for row in rows):
|
||||
hosts = await self.store.get_destinations_for_device(token)
|
||||
await self.federation_sender.send_device_messages(
|
||||
hosts, immediate=False
|
||||
)
|
||||
hosts = {
|
||||
row.entity
|
||||
for row in rows
|
||||
if not row.entity.startswith("@") and not row.is_signature
|
||||
}
|
||||
await self.federation_sender.send_device_messages(hosts, immediate=False)
|
||||
|
||||
elif stream_name == ToDeviceStream.NAME:
|
||||
# The to_device stream includes stuff to be pushed to both local
|
||||
|
||||
@@ -549,14 +549,10 @@ class DeviceListsStream(_StreamFromIdGen):
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class DeviceListsStreamRow:
|
||||
user_id: str
|
||||
entity: str
|
||||
# Indicates that a user has signed their own device with their user-signing key
|
||||
is_signature: bool
|
||||
|
||||
# Indicates if this is a notification that we've calculated the hosts we
|
||||
# need to send the update to.
|
||||
hosts_calculated: bool
|
||||
|
||||
NAME = "device_lists"
|
||||
ROW_TYPE = DeviceListsStreamRow
|
||||
|
||||
@@ -598,13 +594,13 @@ class DeviceListsStream(_StreamFromIdGen):
|
||||
upper_limit_token = min(upper_limit_token, signatures_to_token)
|
||||
|
||||
device_updates = [
|
||||
(stream_id, (entity, False, hosts))
|
||||
for stream_id, (entity, hosts) in device_updates
|
||||
(stream_id, (entity, False))
|
||||
for stream_id, (entity,) in device_updates
|
||||
if stream_id <= upper_limit_token
|
||||
]
|
||||
|
||||
signatures_updates = [
|
||||
(stream_id, (entity, True, False))
|
||||
(stream_id, (entity, True))
|
||||
for stream_id, (entity,) in signatures_updates
|
||||
if stream_id <= upper_limit_token
|
||||
]
|
||||
|
||||
@@ -101,7 +101,6 @@ from synapse.rest.admin.users import (
|
||||
ResetPasswordRestServlet,
|
||||
SearchUsersRestServlet,
|
||||
ShadowBanRestServlet,
|
||||
SuspendAccountRestServlet,
|
||||
UserAdminServlet,
|
||||
UserByExternalId,
|
||||
UserByThreePid,
|
||||
@@ -328,8 +327,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
BackgroundUpdateRestServlet(hs).register(http_server)
|
||||
BackgroundUpdateStartJobRestServlet(hs).register(http_server)
|
||||
ExperimentalFeaturesRestServlet(hs).register(http_server)
|
||||
if hs.config.experimental.msc3823_account_suspension:
|
||||
SuspendAccountRestServlet(hs).register(http_server)
|
||||
|
||||
|
||||
def register_servlets_for_client_rest_resource(
|
||||
|
||||
@@ -41,6 +41,7 @@ class ExperimentalFeature(str, Enum):
|
||||
|
||||
MSC3026 = "msc3026"
|
||||
MSC3881 = "msc3881"
|
||||
MSC3967 = "msc3967"
|
||||
|
||||
|
||||
class ExperimentalFeaturesRestServlet(RestServlet):
|
||||
|
||||
@@ -61,8 +61,8 @@ class ListDestinationsRestServlet(RestServlet):
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self._auth, request)
|
||||
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
start = parse_integer(request, "from", default=0, negative=False)
|
||||
limit = parse_integer(request, "limit", default=100, negative=False)
|
||||
|
||||
destination = parse_string(request, "destination")
|
||||
|
||||
@@ -181,8 +181,8 @@ class DestinationMembershipRestServlet(RestServlet):
|
||||
if not await self._store.is_destination_known(destination):
|
||||
raise NotFoundError("Unknown destination")
|
||||
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
start = parse_integer(request, "from", default=0, negative=False)
|
||||
limit = parse_integer(request, "limit", default=100, negative=False)
|
||||
|
||||
direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS)
|
||||
|
||||
|
||||
@@ -311,8 +311,8 @@ class DeleteMediaByDateSize(RestServlet):
|
||||
) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
before_ts = parse_integer(request, "before_ts", required=True)
|
||||
size_gt = parse_integer(request, "size_gt", default=0)
|
||||
before_ts = parse_integer(request, "before_ts", required=True, negative=False)
|
||||
size_gt = parse_integer(request, "size_gt", default=0, negative=False)
|
||||
keep_profiles = parse_boolean(request, "keep_profiles", default=True)
|
||||
|
||||
if before_ts < 30000000000: # Dec 1970 in milliseconds, Aug 2920 in seconds
|
||||
@@ -377,8 +377,8 @@ class UserMediaRestServlet(RestServlet):
|
||||
if user is None:
|
||||
raise NotFoundError("Unknown user")
|
||||
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
start = parse_integer(request, "from", default=0, negative=False)
|
||||
limit = parse_integer(request, "limit", default=100, negative=False)
|
||||
|
||||
# If neither `order_by` nor `dir` is set, set the default order
|
||||
# to newest media is on top for backward compatibility.
|
||||
@@ -421,8 +421,8 @@ class UserMediaRestServlet(RestServlet):
|
||||
if user is None:
|
||||
raise NotFoundError("Unknown user")
|
||||
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
start = parse_integer(request, "from", default=0, negative=False)
|
||||
limit = parse_integer(request, "limit", default=100, negative=False)
|
||||
|
||||
# If neither `order_by` nor `dir` is set, set the default order
|
||||
# to newest media is on top for backward compatibility.
|
||||
|
||||
@@ -35,7 +35,6 @@ from synapse.http.servlet import (
|
||||
ResolveRoomIdMixin,
|
||||
RestServlet,
|
||||
assert_params_in_dict,
|
||||
parse_boolean,
|
||||
parse_enum,
|
||||
parse_integer,
|
||||
parse_json,
|
||||
@@ -243,23 +242,13 @@ class ListRoomRestServlet(RestServlet):
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
public_rooms = parse_boolean(request, "public_rooms")
|
||||
empty_rooms = parse_boolean(request, "empty_rooms")
|
||||
|
||||
direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS)
|
||||
reverse_order = True if direction == Direction.BACKWARDS else False
|
||||
|
||||
# Return list of rooms according to parameters
|
||||
rooms, total_rooms = await self.store.get_rooms_paginate(
|
||||
start,
|
||||
limit,
|
||||
order_by,
|
||||
reverse_order,
|
||||
search_term,
|
||||
public_rooms,
|
||||
empty_rooms,
|
||||
start, limit, order_by, reverse_order, search_term
|
||||
)
|
||||
|
||||
response = {
|
||||
# next_token should be opaque, so return a value the client can parse
|
||||
"offset": start,
|
||||
|
||||
@@ -63,10 +63,10 @@ class UserMediaStatisticsRestServlet(RestServlet):
|
||||
),
|
||||
)
|
||||
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
from_ts = parse_integer(request, "from_ts", default=0)
|
||||
until_ts = parse_integer(request, "until_ts")
|
||||
start = parse_integer(request, "from", default=0, negative=False)
|
||||
limit = parse_integer(request, "limit", default=100, negative=False)
|
||||
from_ts = parse_integer(request, "from_ts", default=0, negative=False)
|
||||
until_ts = parse_integer(request, "until_ts", negative=False)
|
||||
|
||||
if until_ts is not None:
|
||||
if until_ts <= from_ts:
|
||||
|
||||
@@ -27,13 +27,11 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import attr
|
||||
|
||||
from synapse._pydantic_compat import HAS_PYDANTIC_V2
|
||||
from synapse.api.constants import Direction, UserTypes
|
||||
from synapse.api.errors import Codes, NotFoundError, SynapseError
|
||||
from synapse.http.servlet import (
|
||||
RestServlet,
|
||||
assert_params_in_dict,
|
||||
parse_and_validate_json_object_from_request,
|
||||
parse_boolean,
|
||||
parse_enum,
|
||||
parse_integer,
|
||||
@@ -51,17 +49,10 @@ from synapse.rest.client._base import client_patterns
|
||||
from synapse.storage.databases.main.registration import ExternalIDReuseException
|
||||
from synapse.storage.databases.main.stats import UserSortOrder
|
||||
from synapse.types import JsonDict, JsonMapping, UserID
|
||||
from synapse.types.rest import RequestBodyModel
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
if TYPE_CHECKING or HAS_PYDANTIC_V2:
|
||||
from pydantic.v1 import StrictBool
|
||||
else:
|
||||
from pydantic import StrictBool
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -99,8 +90,8 @@ class UsersRestServletV2(RestServlet):
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
start = parse_integer(request, "from", default=0)
|
||||
limit = parse_integer(request, "limit", default=100)
|
||||
start = parse_integer(request, "from", default=0, negative=False)
|
||||
limit = parse_integer(request, "limit", default=100, negative=False)
|
||||
|
||||
user_id = parse_string(request, "user_id")
|
||||
name = parse_string(request, "name", encoding="utf-8")
|
||||
@@ -741,36 +732,6 @@ class DeactivateAccountRestServlet(RestServlet):
|
||||
return HTTPStatus.OK, {"id_server_unbind_result": id_server_unbind_result}
|
||||
|
||||
|
||||
class SuspendAccountRestServlet(RestServlet):
|
||||
PATTERNS = admin_patterns("/suspend/(?P<target_user_id>[^/]*)$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.auth = hs.get_auth()
|
||||
self.is_mine = hs.is_mine
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
class PutBody(RequestBodyModel):
|
||||
suspend: StrictBool
|
||||
|
||||
async def on_PUT(
|
||||
self, request: SynapseRequest, target_user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
await assert_user_is_admin(self.auth, requester)
|
||||
|
||||
if not self.is_mine(UserID.from_string(target_user_id)):
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only suspend local users")
|
||||
|
||||
if not await self.store.get_user_by_id(target_user_id):
|
||||
raise NotFoundError("User not found")
|
||||
|
||||
body = parse_and_validate_json_object_from_request(request, self.PutBody)
|
||||
suspend = body.suspend
|
||||
await self.store.set_user_suspended_status(target_user_id, suspend)
|
||||
|
||||
return HTTPStatus.OK, {f"user_{target_user_id}_suspended": suspend}
|
||||
|
||||
|
||||
class AccountValidityRenewServlet(RestServlet):
|
||||
PATTERNS = admin_patterns("/account_validity/validity$")
|
||||
|
||||
|
||||
@@ -382,35 +382,44 @@ class SigningKeyUploadServlet(RestServlet):
|
||||
master_key_updatable_without_uia,
|
||||
) = await self.e2e_keys_handler.check_cross_signing_setup(user_id)
|
||||
|
||||
# Resending exactly the same keys should just 200 OK without doing a UIA prompt.
|
||||
keys_are_different = await self.e2e_keys_handler.has_different_keys(
|
||||
user_id, body
|
||||
)
|
||||
if not keys_are_different:
|
||||
return 200, {}
|
||||
# Before MSC3967 we required UIA both when setting up cross signing for the
|
||||
# first time and when resetting the device signing key. With MSC3967 we only
|
||||
# require UIA when resetting cross-signing, and not when setting up the first
|
||||
# time. Because there is no UIA in MSC3861, for now we throw an error if the
|
||||
# user tries to reset the device signing key when MSC3861 is enabled, but allow
|
||||
# first-time setup.
|
||||
if self.hs.config.experimental.msc3861.enabled:
|
||||
# The auth service has to explicitly mark the master key as replaceable
|
||||
# without UIA to reset the device signing key with MSC3861.
|
||||
if is_cross_signing_setup and not master_key_updatable_without_uia:
|
||||
config = self.hs.config.experimental.msc3861
|
||||
if config.account_management_url is not None:
|
||||
url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset"
|
||||
else:
|
||||
url = config.issuer
|
||||
|
||||
# The keys are different; is x-signing set up? If no, then this is first-time
|
||||
# setup, and that is allowed without UIA, per MSC3967.
|
||||
# If yes, then we need to authenticate the change.
|
||||
if is_cross_signing_setup:
|
||||
# With MSC3861, UIA is not possible. Instead, the auth service has to
|
||||
# explicitly mark the master key as replaceable.
|
||||
if self.hs.config.experimental.msc3861.enabled:
|
||||
if not master_key_updatable_without_uia:
|
||||
config = self.hs.config.experimental.msc3861
|
||||
if config.account_management_url is not None:
|
||||
url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset"
|
||||
else:
|
||||
url = config.issuer
|
||||
raise SynapseError(
|
||||
HTTPStatus.NOT_IMPLEMENTED,
|
||||
"To reset your end-to-end encryption cross-signing identity, "
|
||||
f"you first need to approve it at {url} and then try again.",
|
||||
Codes.UNRECOGNIZED,
|
||||
)
|
||||
# But first-time setup is fine
|
||||
|
||||
raise SynapseError(
|
||||
HTTPStatus.NOT_IMPLEMENTED,
|
||||
"To reset your end-to-end encryption cross-signing identity, "
|
||||
f"you first need to approve it at {url} and then try again.",
|
||||
Codes.UNRECOGNIZED,
|
||||
)
|
||||
else:
|
||||
# Without MSC3861, we require UIA.
|
||||
elif self.hs.config.experimental.msc3967_enabled:
|
||||
# MSC3967 allows this endpoint to 200 OK for idempotency. Resending exactly the same
|
||||
# keys should just 200 OK without doing a UIA prompt.
|
||||
keys_are_different = await self.e2e_keys_handler.has_different_keys(
|
||||
user_id, body
|
||||
)
|
||||
if not keys_are_different:
|
||||
# FIXME: we do not fallthrough to upload_signing_keys_for_user because confusingly
|
||||
# if we do, we 500 as it looks like it tries to INSERT the same key twice, causing a
|
||||
# unique key constraint violation. This sounds like a bug?
|
||||
return 200, {}
|
||||
# the keys are different, is x-signing set up? If no, then the keys don't exist which is
|
||||
# why they are different. If yes, then we need to UIA to change them.
|
||||
if is_cross_signing_setup:
|
||||
await self.auth_handler.validate_user_via_ui_auth(
|
||||
requester,
|
||||
request,
|
||||
@@ -419,6 +428,18 @@ class SigningKeyUploadServlet(RestServlet):
|
||||
# Do not allow skipping of UIA auth.
|
||||
can_skip_ui_auth=False,
|
||||
)
|
||||
# Otherwise we don't require UIA since we are setting up cross signing for first time
|
||||
else:
|
||||
# Previous behaviour is to always require UIA but allow it to be skipped
|
||||
await self.auth_handler.validate_user_via_ui_auth(
|
||||
requester,
|
||||
request,
|
||||
body,
|
||||
"add a device signing key to your account",
|
||||
# Allow skipping of UI auth since this is frequently called directly
|
||||
# after login and it is silly to ask users to re-auth immediately.
|
||||
can_skip_ui_auth=True,
|
||||
)
|
||||
|
||||
result = await self.e2e_keys_handler.upload_signing_keys_for_user(user_id, body)
|
||||
return 200, result
|
||||
|
||||
@@ -53,7 +53,6 @@ class KnockRoomAliasServlet(RestServlet):
|
||||
super().__init__()
|
||||
self.room_member_handler = hs.get_room_member_handler()
|
||||
self.auth = hs.get_auth()
|
||||
self._support_via = hs.config.experimental.msc4156_enabled
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
@@ -75,13 +74,6 @@ class KnockRoomAliasServlet(RestServlet):
|
||||
remote_room_hosts = parse_strings_from_args(
|
||||
args, "server_name", required=False
|
||||
)
|
||||
if self._support_via:
|
||||
remote_room_hosts = parse_strings_from_args(
|
||||
args,
|
||||
"org.matrix.msc4156.via",
|
||||
default=remote_room_hosts,
|
||||
required=False,
|
||||
)
|
||||
elif RoomAlias.is_valid(room_identifier):
|
||||
handler = self.room_member_handler
|
||||
room_alias = RoomAlias.from_string(room_identifier)
|
||||
|
||||
@@ -32,7 +32,6 @@ from synapse.http.servlet import RestServlet, parse_integer, parse_string
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.types import JsonDict
|
||||
|
||||
from ...api.errors import SynapseError
|
||||
from ._base import client_patterns
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -57,22 +56,7 @@ class NotificationsServlet(RestServlet):
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
# While this is intended to be "string" to clients, the 'from' token
|
||||
# is actually based on a numeric ID. So it must parse to an int.
|
||||
from_token_str = parse_string(request, "from", required=False)
|
||||
if from_token_str is not None:
|
||||
# Parse to an integer.
|
||||
try:
|
||||
from_token = int(from_token_str)
|
||||
except ValueError:
|
||||
# If it doesn't parse to an integer, then this cannot possibly be a valid
|
||||
# pagination token, as we only hand out integers.
|
||||
raise SynapseError(
|
||||
400, 'Query parameter "from" contains unrecognised token'
|
||||
)
|
||||
else:
|
||||
from_token = None
|
||||
|
||||
from_token = parse_string(request, "from", required=False)
|
||||
limit = parse_integer(request, "limit", default=50)
|
||||
only = parse_string(request, "only", required=False)
|
||||
|
||||
|
||||
@@ -108,19 +108,6 @@ class ProfileDisplaynameRestServlet(RestServlet):
|
||||
|
||||
propagate = _read_propagate(self.hs, request)
|
||||
|
||||
requester_suspended = (
|
||||
await self.hs.get_datastores().main.get_user_suspended_status(
|
||||
requester.user.to_string()
|
||||
)
|
||||
)
|
||||
|
||||
if requester_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Updating displayname while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
await self.profile_handler.set_displayname(
|
||||
user, requester, new_name, is_admin, propagate=propagate
|
||||
)
|
||||
@@ -180,19 +167,6 @@ class ProfileAvatarURLRestServlet(RestServlet):
|
||||
|
||||
propagate = _read_propagate(self.hs, request)
|
||||
|
||||
requester_suspended = (
|
||||
await self.hs.get_datastores().main.get_user_suspended_status(
|
||||
requester.user.to_string()
|
||||
)
|
||||
)
|
||||
|
||||
if requester_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Updating avatar URL while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
await self.profile_handler.set_avatar_url(
|
||||
user, requester, new_avatar_url, is_admin, propagate=propagate
|
||||
)
|
||||
|
||||
@@ -107,15 +107,7 @@ class ReportEventRestServlet(RestServlet):
|
||||
|
||||
|
||||
class ReportRoomRestServlet(RestServlet):
|
||||
"""This endpoint lets clients report a room for abuse.
|
||||
|
||||
Whilst MSC4151 is not yet merged, this unstable endpoint is enabled on matrix.org
|
||||
for content moderation purposes, and therefore backwards compatibility should be
|
||||
carefully considered when changing anything on this endpoint.
|
||||
|
||||
More details on the MSC: https://github.com/matrix-org/matrix-spec-proposals/pull/4151
|
||||
"""
|
||||
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/4151
|
||||
PATTERNS = client_patterns(
|
||||
"/org.matrix.msc4151/rooms/(?P<room_id>[^/]*)/report$",
|
||||
releases=[],
|
||||
|
||||
@@ -417,7 +417,6 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
|
||||
super().__init__(hs)
|
||||
super(ResolveRoomIdMixin, self).__init__(hs) # ensure the Mixin is set up
|
||||
self.auth = hs.get_auth()
|
||||
self._support_via = hs.config.experimental.msc4156_enabled
|
||||
|
||||
def register(self, http_server: HttpServer) -> None:
|
||||
# /join/$room_identifier[/$txn_id]
|
||||
@@ -436,13 +435,6 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
|
||||
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
|
||||
args: Dict[bytes, List[bytes]] = request.args # type: ignore
|
||||
remote_room_hosts = parse_strings_from_args(args, "server_name", required=False)
|
||||
if self._support_via:
|
||||
remote_room_hosts = parse_strings_from_args(
|
||||
args,
|
||||
"org.matrix.msc4156.via",
|
||||
default=remote_room_hosts,
|
||||
required=False,
|
||||
)
|
||||
room_id, remote_room_hosts = await self.resolve_room_id(
|
||||
room_identifier,
|
||||
remote_room_hosts,
|
||||
@@ -510,7 +502,7 @@ class PublicRoomListRestServlet(RestServlet):
|
||||
if server:
|
||||
raise e
|
||||
|
||||
limit: Optional[int] = parse_integer(request, "limit", 0)
|
||||
limit: Optional[int] = parse_integer(request, "limit", 0, negative=False)
|
||||
since_token = parse_string(request, "since")
|
||||
|
||||
if limit == 0:
|
||||
@@ -1120,20 +1112,6 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
requester_suspended = await self._store.get_user_suspended_status(
|
||||
requester.user.to_string()
|
||||
)
|
||||
|
||||
if requester_suspended:
|
||||
event = await self._store.get_event(event_id, allow_none=True)
|
||||
if event:
|
||||
if event.sender != requester.user.to_string():
|
||||
raise SynapseError(
|
||||
403,
|
||||
"You can only redact your own events while account is suspended.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
# Ensure the redacts property in the content matches the one provided in
|
||||
# the URL.
|
||||
room_version = await self._store.get_room_version(room_id)
|
||||
@@ -1444,7 +1422,16 @@ class RoomHierarchyRestServlet(RestServlet):
|
||||
requester = await self._auth.get_user_by_req(request, allow_guest=True)
|
||||
|
||||
max_depth = parse_integer(request, "max_depth")
|
||||
if max_depth is not None and max_depth < 0:
|
||||
raise SynapseError(
|
||||
400, "'max_depth' must be a non-negative integer", Codes.BAD_JSON
|
||||
)
|
||||
|
||||
limit = parse_integer(request, "limit")
|
||||
if limit is not None and limit <= 0:
|
||||
raise SynapseError(
|
||||
400, "'limit' must be a positive integer", Codes.BAD_JSON
|
||||
)
|
||||
|
||||
return 200, await self._room_summary_handler.get_room_hierarchy(
|
||||
requester,
|
||||
|
||||
@@ -864,7 +864,7 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns(
|
||||
"/org.matrix.simplified_msc3575/sync$", releases=[], v1=False, unstable=True
|
||||
"/org.matrix.msc3575/sync$", releases=[], v1=False, unstable=True
|
||||
)
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
|
||||
@@ -617,17 +617,6 @@ class EventsPersistenceStorageController:
|
||||
room_id, chunk
|
||||
)
|
||||
|
||||
with Measure(self._clock, "calculate_chain_cover_index_for_events"):
|
||||
# We now calculate chain ID/sequence numbers for any state events we're
|
||||
# persisting. We ignore out of band memberships as we're not in the room
|
||||
# and won't have their auth chain (we'll fix it up later if we join the
|
||||
# room).
|
||||
#
|
||||
# See: docs/auth_chain_difference_algorithm.md
|
||||
new_event_links = await self.persist_events_store.calculate_chain_cover_index_for_events(
|
||||
room_id, [e for e, _ in chunk]
|
||||
)
|
||||
|
||||
await self.persist_events_store._persist_events_and_state_updates(
|
||||
room_id,
|
||||
chunk,
|
||||
@@ -635,7 +624,6 @@ class EventsPersistenceStorageController:
|
||||
new_forward_extremities=new_forward_extremities,
|
||||
use_negative_stream_ordering=backfilled,
|
||||
inhibit_local_membership_updates=backfilled,
|
||||
new_event_links=new_event_links,
|
||||
)
|
||||
|
||||
return replaced_events
|
||||
|
||||
@@ -45,7 +45,7 @@ from synapse.storage.util.partial_state_events_tracker import (
|
||||
PartialStateEventsTracker,
|
||||
)
|
||||
from synapse.synapse_rust.acl import ServerAclEvaluator
|
||||
from synapse.types import MutableStateMap, StateMap, StreamToken, get_domain_from_id
|
||||
from synapse.types import MutableStateMap, StateMap, get_domain_from_id
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches import intern_string
|
||||
@@ -372,91 +372,6 @@ class StateStorageController:
|
||||
)
|
||||
return state_map[event_id]
|
||||
|
||||
async def get_state_after_event(
|
||||
self,
|
||||
event_id: str,
|
||||
state_filter: Optional[StateFilter] = None,
|
||||
await_full_state: bool = True,
|
||||
) -> StateMap[str]:
|
||||
"""
|
||||
Get the room state after the given event
|
||||
|
||||
Args:
|
||||
event_id: event of interest
|
||||
state_filter: The state filter used to fetch state from the database.
|
||||
await_full_state: if `True`, will block if we do not yet have complete state
|
||||
at the event and `state_filter` is not satisfied by partial state.
|
||||
Defaults to `True`.
|
||||
"""
|
||||
state_ids = await self.get_state_ids_for_event(
|
||||
event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
# using get_metadata_for_events here (instead of get_event) sidesteps an issue
|
||||
# with redactions: if `event_id` is a redaction event, and we don't have the
|
||||
# original (possibly because it got purged), get_event will refuse to return
|
||||
# the redaction event, which isn't terribly helpful here.
|
||||
#
|
||||
# (To be fair, in that case we could assume it's *not* a state event, and
|
||||
# therefore we don't need to worry about it. But still, it seems cleaner just
|
||||
# to pull the metadata.)
|
||||
m = (await self.stores.main.get_metadata_for_events([event_id]))[event_id]
|
||||
if m.state_key is not None and m.rejection_reason is None:
|
||||
state_ids = dict(state_ids)
|
||||
state_ids[(m.event_type, m.state_key)] = event_id
|
||||
|
||||
return state_ids
|
||||
|
||||
async def get_state_at(
|
||||
self,
|
||||
room_id: str,
|
||||
stream_position: StreamToken,
|
||||
state_filter: Optional[StateFilter] = None,
|
||||
await_full_state: bool = True,
|
||||
) -> StateMap[str]:
|
||||
"""Get the room state at a particular stream position
|
||||
|
||||
Args:
|
||||
room_id: room for which to get state
|
||||
stream_position: point at which to get state
|
||||
state_filter: The state filter used to fetch state from the database.
|
||||
await_full_state: if `True`, will block if we do not yet have complete state
|
||||
at the last event in the room before `stream_position` and
|
||||
`state_filter` is not satisfied by partial state. Defaults to `True`.
|
||||
"""
|
||||
# FIXME: This gets the state at the latest event before the stream ordering,
|
||||
# which might not be the same as the "current state" of the room at the time
|
||||
# of the stream token if there were multiple forward extremities at the time.
|
||||
last_event_id = (
|
||||
await self.stores.main.get_last_event_id_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=stream_position.room_key,
|
||||
)
|
||||
)
|
||||
|
||||
if last_event_id:
|
||||
state = await self.get_state_after_event(
|
||||
last_event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
else:
|
||||
# no events in this room - so presumably no state
|
||||
state = {}
|
||||
|
||||
# (erikj) This should be rarely hit, but we've had some reports that
|
||||
# we get more state down gappy syncs than we should, so let's add
|
||||
# some logging.
|
||||
logger.info(
|
||||
"Failed to find any events in room %s at %s",
|
||||
room_id,
|
||||
stream_position.room_key,
|
||||
)
|
||||
return state
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def get_state_for_groups(
|
||||
|
||||
@@ -825,13 +825,14 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
# Check if we've already inserted a matching message_id for that
|
||||
# origin. This can happen if the origin doesn't receive our
|
||||
# acknowledgement from the first time we received the message.
|
||||
already_inserted = self.db_pool.simple_select_list_txn(
|
||||
already_inserted = self.db_pool.simple_select_one_txn(
|
||||
txn,
|
||||
table="device_federation_inbox",
|
||||
keyvalues={"origin": origin, "message_id": message_id},
|
||||
retcols=("message_id",),
|
||||
allow_none=True,
|
||||
)
|
||||
if already_inserted:
|
||||
if already_inserted is not None:
|
||||
return
|
||||
|
||||
# Add an entry for this message_id so that we know we've processed
|
||||
|
||||
@@ -164,24 +164,22 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
prefilled_cache=user_signature_stream_prefill,
|
||||
)
|
||||
|
||||
self._device_list_federation_stream_cache = None
|
||||
if hs.should_send_federation():
|
||||
(
|
||||
device_list_federation_prefill,
|
||||
device_list_federation_list_id,
|
||||
) = self.db_pool.get_cache_dict(
|
||||
db_conn,
|
||||
"device_lists_outbound_pokes",
|
||||
entity_column="destination",
|
||||
stream_column="stream_id",
|
||||
max_value=device_list_max,
|
||||
limit=10000,
|
||||
)
|
||||
self._device_list_federation_stream_cache = StreamChangeCache(
|
||||
"DeviceListFederationStreamChangeCache",
|
||||
device_list_federation_list_id,
|
||||
prefilled_cache=device_list_federation_prefill,
|
||||
)
|
||||
(
|
||||
device_list_federation_prefill,
|
||||
device_list_federation_list_id,
|
||||
) = self.db_pool.get_cache_dict(
|
||||
db_conn,
|
||||
"device_lists_outbound_pokes",
|
||||
entity_column="destination",
|
||||
stream_column="stream_id",
|
||||
max_value=device_list_max,
|
||||
limit=10000,
|
||||
)
|
||||
self._device_list_federation_stream_cache = StreamChangeCache(
|
||||
"DeviceListFederationStreamChangeCache",
|
||||
device_list_federation_list_id,
|
||||
prefilled_cache=device_list_federation_prefill,
|
||||
)
|
||||
|
||||
if hs.config.worker.run_background_tasks:
|
||||
self._clock.looping_call(
|
||||
@@ -209,30 +207,23 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
) -> None:
|
||||
for row in rows:
|
||||
if row.is_signature:
|
||||
self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
|
||||
self._user_signature_stream_cache.entity_has_changed(row.entity, token)
|
||||
continue
|
||||
|
||||
# The entities are either user IDs (starting with '@') whose devices
|
||||
# have changed, or remote servers that we need to tell about
|
||||
# changes.
|
||||
if not row.hosts_calculated:
|
||||
self._device_list_stream_cache.entity_has_changed(row.user_id, token)
|
||||
self.get_cached_devices_for_user.invalidate((row.user_id,))
|
||||
self._get_cached_user_device.invalidate((row.user_id,))
|
||||
self.get_device_list_last_stream_id_for_remote.invalidate(
|
||||
(row.user_id,)
|
||||
if row.entity.startswith("@"):
|
||||
self._device_list_stream_cache.entity_has_changed(row.entity, token)
|
||||
self.get_cached_devices_for_user.invalidate((row.entity,))
|
||||
self._get_cached_user_device.invalidate((row.entity,))
|
||||
self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,))
|
||||
|
||||
else:
|
||||
self._device_list_federation_stream_cache.entity_has_changed(
|
||||
row.entity, token
|
||||
)
|
||||
|
||||
def device_lists_outbound_pokes_have_changed(
|
||||
self, destinations: StrCollection, token: int
|
||||
) -> None:
|
||||
assert self._device_list_federation_stream_cache is not None
|
||||
|
||||
for destination in destinations:
|
||||
self._device_list_federation_stream_cache.entity_has_changed(
|
||||
destination, token
|
||||
)
|
||||
|
||||
def device_lists_in_rooms_have_changed(
|
||||
self, room_ids: StrCollection, token: int
|
||||
) -> None:
|
||||
@@ -372,11 +363,6 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
EDU contents.
|
||||
"""
|
||||
now_stream_id = self.get_device_stream_token()
|
||||
if from_stream_id == now_stream_id:
|
||||
return now_stream_id, []
|
||||
|
||||
if self._device_list_federation_stream_cache is None:
|
||||
raise Exception("Func can only be used on federation senders")
|
||||
|
||||
has_changed = self._device_list_federation_stream_cache.has_entity_changed(
|
||||
destination, int(from_stream_id)
|
||||
@@ -1032,10 +1018,10 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
# This query Does The Right Thing where it'll correctly apply the
|
||||
# bounds to the inner queries.
|
||||
sql = """
|
||||
SELECT stream_id, user_id, hosts FROM (
|
||||
SELECT stream_id, user_id, false AS hosts FROM device_lists_stream
|
||||
SELECT stream_id, entity FROM (
|
||||
SELECT stream_id, user_id AS entity FROM device_lists_stream
|
||||
UNION ALL
|
||||
SELECT DISTINCT stream_id, user_id, true AS hosts FROM device_lists_outbound_pokes
|
||||
SELECT stream_id, destination AS entity FROM device_lists_outbound_pokes
|
||||
) AS e
|
||||
WHERE ? < stream_id AND stream_id <= ?
|
||||
ORDER BY stream_id ASC
|
||||
@@ -1591,14 +1577,6 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
get_device_list_changes_in_room_txn,
|
||||
)
|
||||
|
||||
async def get_destinations_for_device(self, stream_id: int) -> StrCollection:
|
||||
return await self.db_pool.simple_select_onecol(
|
||||
table="device_lists_outbound_pokes",
|
||||
keyvalues={"stream_id": stream_id},
|
||||
retcol="destination",
|
||||
desc="get_destinations_for_device",
|
||||
)
|
||||
|
||||
|
||||
class DeviceBackgroundUpdateStore(SQLBaseStore):
|
||||
def __init__(
|
||||
@@ -2131,18 +2109,18 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
user_id: str,
|
||||
device_id: str,
|
||||
hosts: Collection[str],
|
||||
stream_id: int,
|
||||
stream_ids: List[int],
|
||||
context: Optional[Dict[str, str]],
|
||||
) -> None:
|
||||
if self._device_list_federation_stream_cache:
|
||||
for host in hosts:
|
||||
txn.call_after(
|
||||
self._device_list_federation_stream_cache.entity_has_changed,
|
||||
host,
|
||||
stream_id,
|
||||
)
|
||||
for host in hosts:
|
||||
txn.call_after(
|
||||
self._device_list_federation_stream_cache.entity_has_changed,
|
||||
host,
|
||||
stream_ids[-1],
|
||||
)
|
||||
|
||||
now = self._clock.time_msec()
|
||||
stream_id_iterator = iter(stream_ids)
|
||||
|
||||
encoded_context = json_encoder.encode(context)
|
||||
mark_sent = not self.hs.is_mine_id(user_id)
|
||||
@@ -2151,7 +2129,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
(
|
||||
destination,
|
||||
self._instance_name,
|
||||
stream_id,
|
||||
next(stream_id_iterator),
|
||||
user_id,
|
||||
device_id,
|
||||
mark_sent,
|
||||
@@ -2336,22 +2314,22 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
return
|
||||
|
||||
def add_device_list_outbound_pokes_txn(
|
||||
txn: LoggingTransaction, stream_id: int
|
||||
txn: LoggingTransaction, stream_ids: List[int]
|
||||
) -> None:
|
||||
self._add_device_outbound_poke_to_stream_txn(
|
||||
txn,
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
hosts=hosts,
|
||||
stream_id=stream_id,
|
||||
stream_ids=stream_ids,
|
||||
context=context,
|
||||
)
|
||||
|
||||
async with self._device_list_id_gen.get_next() as stream_id:
|
||||
async with self._device_list_id_gen.get_next_mult(len(hosts)) as stream_ids:
|
||||
return await self.db_pool.runInteraction(
|
||||
"add_device_list_outbound_pokes",
|
||||
add_device_list_outbound_pokes_txn,
|
||||
stream_id,
|
||||
stream_ids,
|
||||
)
|
||||
|
||||
async def add_remote_device_list_to_pending(
|
||||
|
||||
@@ -123,9 +123,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
if stream_name == DeviceListsStream.NAME:
|
||||
for row in rows:
|
||||
assert isinstance(row, DeviceListsStream.DeviceListsStreamRow)
|
||||
if not row.hosts_calculated:
|
||||
if row.entity.startswith("@"):
|
||||
self._get_e2e_device_keys_for_federation_query_inner.invalidate(
|
||||
(row.user_id,)
|
||||
(row.entity,)
|
||||
)
|
||||
|
||||
super().process_replication_rows(stream_name, instance_name, token, rows)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user