Compare commits

..

2 Commits

Author SHA1 Message Date
Erik Johnston
cfa5eca168 Newsfile 2024-05-22 15:02:31 +01:00
Erik Johnston
56e6abfecc Allow loading modules in Docker image
This is done by mounting a `/modules` directory and installing packages
into that.
2024-05-22 15:01:09 +01:00
251 changed files with 4081 additions and 31174 deletions

View File

@@ -2,4 +2,4 @@
(using a matrix.org account if necessary). We do not use GitHub issues for (using a matrix.org account if necessary). We do not use GitHub issues for
support. support.
**If you want to report a security issue** please see https://element.io/security/security-disclosure-policy **If you want to report a security issue** please see https://matrix.org/security-disclosure-policy/

View File

@@ -7,7 +7,7 @@ body:
**THIS IS NOT A SUPPORT CHANNEL!** **THIS IS NOT A SUPPORT CHANNEL!**
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary). **IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
If you want to report a security issue, please see https://element.io/security/security-disclosure-policy If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue. This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.

View File

@@ -72,7 +72,7 @@ jobs:
- name: Build and push all platforms - name: Build and push all platforms
id: build-and-push id: build-and-push
uses: docker/build-push-action@v6 uses: docker/build-push-action@v5
with: with:
push: true push: true
labels: | labels: |

View File

@@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess: # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact - name: 📥 Download artifact
uses: dawidd6/action-download-artifact@bf251b5aa9c2f7eeb574a96ee720e24f801b7c11 # v6 uses: dawidd6/action-download-artifact@09f2f74827fd3a8607589e5ad7f9398816f540fe # v3.1.4
with: with:
workflow: docs-pr.yaml workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }} run_id: ${{ github.event.workflow_run.id }}

View File

@@ -102,7 +102,7 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ubuntu-20.04, macos-12] os: [ubuntu-20.04, macos-11]
arch: [x86_64, aarch64] arch: [x86_64, aarch64]
# is_pr is a flag used to exclude certain jobs from the matrix on PRs. # is_pr is a flag used to exclude certain jobs from the matrix on PRs.
# It is not read by the rest of the workflow. # It is not read by the rest of the workflow.
@@ -112,9 +112,9 @@ jobs:
exclude: exclude:
# Don't build macos wheels on PR CI. # Don't build macos wheels on PR CI.
- is_pr: true - is_pr: true
os: "macos-12" os: "macos-11"
# Don't build aarch64 wheels on mac. # Don't build aarch64 wheels on mac.
- os: "macos-12" - os: "macos-11"
arch: aarch64 arch: aarch64
# Don't build aarch64 wheels on PR CI. # Don't build aarch64 wheels on PR CI.
- is_pr: true - is_pr: true
@@ -130,7 +130,7 @@ jobs:
python-version: "3.x" python-version: "3.x"
- name: Install cibuildwheel - name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.19.1 run: python -m pip install cibuildwheel==2.16.2
- name: Set up QEMU to emulate aarch64 - name: Set up QEMU to emulate aarch64
if: matrix.arch == 'aarch64' if: matrix.arch == 'aarch64'

View File

@@ -21,7 +21,6 @@ jobs:
trial: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.trial }} trial: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.trial }}
integration: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.integration }} integration: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.integration }}
linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }} linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }}
linting_readme: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting_readme }}
steps: steps:
- uses: dorny/paths-filter@v3 - uses: dorny/paths-filter@v3
id: filter id: filter
@@ -74,9 +73,6 @@ jobs:
- 'poetry.lock' - 'poetry.lock'
- '.github/workflows/tests.yml' - '.github/workflows/tests.yml'
linting_readme:
- 'README.rst'
check-sampleconfig: check-sampleconfig:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: changes needs: changes
@@ -139,7 +135,7 @@ jobs:
- name: Semantic checks (ruff) - name: Semantic checks (ruff)
# --quiet suppresses the update check. # --quiet suppresses the update check.
run: poetry run ruff check --quiet . run: poetry run ruff --quiet .
lint-mypy: lint-mypy:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -273,20 +269,6 @@ jobs:
- run: cargo fmt --check - run: cargo fmt --check
# This is to detect issues with the rst file, which can otherwise cause issues
# when uploading packages to PyPi.
lint-readme:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.x"
- run: "pip install rstcheck"
- run: "rstcheck --report-level=WARNING README.rst"
# Dummy step to gate other tests on without repeating the whole list # Dummy step to gate other tests on without repeating the whole list
linting-done: linting-done:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
@@ -302,10 +284,9 @@ jobs:
- lint-clippy - lint-clippy
- lint-clippy-nightly - lint-clippy-nightly
- lint-rustfmt - lint-rustfmt
- lint-readme
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: matrix-org/done-action@v3 - uses: matrix-org/done-action@v2
with: with:
needs: ${{ toJSON(needs) }} needs: ${{ toJSON(needs) }}
@@ -320,7 +301,6 @@ jobs:
lint-clippy lint-clippy
lint-clippy-nightly lint-clippy-nightly
lint-rustfmt lint-rustfmt
lint-readme
calculate-test-jobs: calculate-test-jobs:
@@ -499,9 +479,6 @@ jobs:
volumes: volumes:
- ${{ github.workspace }}:/src - ${{ github.workspace }}:/src
env: env:
# If this is a pull request to a release branch, use that branch as default branch for sytest, else use develop
# This works because the release script always create a branch on the sytest repo with the same name as the release branch
SYTEST_DEFAULT_BRANCH: ${{ startsWith(github.base_ref, 'release-') && github.base_ref || 'develop' }}
SYTEST_BRANCH: ${{ github.head_ref }} SYTEST_BRANCH: ${{ github.head_ref }}
POSTGRES: ${{ matrix.job.postgres && 1}} POSTGRES: ${{ matrix.job.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') || '' }} MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') || '' }}
@@ -737,7 +714,7 @@ jobs:
- linting-done - linting-done
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: matrix-org/done-action@v3 - uses: matrix-org/done-action@v2
with: with:
needs: ${{ toJSON(needs) }} needs: ${{ toJSON(needs) }}

View File

@@ -1,433 +1,3 @@
# Synapse 1.113.0rc1 (2024-08-06)
### Features
- Track which rooms have been sent to clients in the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17447](https://github.com/element-hq/synapse/issues/17447))
- Add Account Data extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17477](https://github.com/element-hq/synapse/issues/17477))
- Add receipts extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17489](https://github.com/element-hq/synapse/issues/17489))
- Add typing notification extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17505](https://github.com/element-hq/synapse/issues/17505))
### Bugfixes
- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to handle invite/knock rooms when filtering. ([\#17450](https://github.com/element-hq/synapse/issues/17450))
- Fix a bug introduced in v1.110.0 which caused `/keys/query` to return incomplete results, leading to high network activity and CPU usage on Matrix clients. ([\#17499](https://github.com/element-hq/synapse/issues/17499))
### Improved Documentation
- Update the [`allowed_local_3pids`](https://element-hq.github.io/synapse/v1.112/usage/configuration/config_documentation.html#allowed_local_3pids) config option's msisdn address to a working example. ([\#17476](https://github.com/element-hq/synapse/issues/17476))
### Internal Changes
- Change sliding sync to use their own token format in preparation for storing per-connection state. ([\#17452](https://github.com/element-hq/synapse/issues/17452))
- Ensure we don't send down negative `bump_stamp` in experimental sliding sync endpoint. ([\#17478](https://github.com/element-hq/synapse/issues/17478))
- Do not send down empty room entries down experimental sliding sync endpoint. ([\#17479](https://github.com/element-hq/synapse/issues/17479))
- Refactor Sliding Sync tests to better utilize the `SlidingSyncBase`. ([\#17481](https://github.com/element-hq/synapse/issues/17481), [\#17482](https://github.com/element-hq/synapse/issues/17482))
- Add some opentracing tags and logging to the experimental sliding sync implementation. ([\#17501](https://github.com/element-hq/synapse/issues/17501))
- Split and move Sliding Sync tests so we have some more sane test file sizes. ([\#17504](https://github.com/element-hq/synapse/issues/17504))
- Update the `limited` field description in the Sliding Sync response to accurately describe what it actually represents. ([\#17507](https://github.com/element-hq/synapse/issues/17507))
- Easier to understand `timeline` assertions in Sliding Sync tests. ([\#17511](https://github.com/element-hq/synapse/issues/17511))
- Reset the sliding sync connection if we don't recognize the per-connection state position. ([\#17529](https://github.com/element-hq/synapse/issues/17529))
### Updates to locked dependencies
* Bump bcrypt from 4.1.3 to 4.2.0. ([\#17495](https://github.com/element-hq/synapse/issues/17495))
* Bump black from 24.4.2 to 24.8.0. ([\#17522](https://github.com/element-hq/synapse/issues/17522))
* Bump phonenumbers from 8.13.39 to 8.13.42. ([\#17521](https://github.com/element-hq/synapse/issues/17521))
* Bump ruff from 0.5.4 to 0.5.5. ([\#17494](https://github.com/element-hq/synapse/issues/17494))
* Bump serde_json from 1.0.120 to 1.0.121. ([\#17493](https://github.com/element-hq/synapse/issues/17493))
* Bump serde_json from 1.0.121 to 1.0.122. ([\#17525](https://github.com/element-hq/synapse/issues/17525))
* Bump towncrier from 23.11.0 to 24.7.1. ([\#17523](https://github.com/element-hq/synapse/issues/17523))
* Bump types-pyopenssl from 24.1.0.20240425 to 24.1.0.20240722. ([\#17496](https://github.com/element-hq/synapse/issues/17496))
* Bump types-setuptools from 70.1.0.20240627 to 71.1.0.20240726. ([\#17497](https://github.com/element-hq/synapse/issues/17497))
# Synapse 1.112.0 (2024-07-30)
This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7).
Note that this security fix is also available as **Synapse 1.111.1**, which does not include the rest of the changes in Synapse 1.112.0.
This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request.
If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality.
With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**.
The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration.
Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today.
**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time.
### Internal Changes
- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502))
# Synapse 1.112.0rc1 (2024-07-23)
Please note that this release candidate does not include the security dependency update
included in version 1.111.1 as this version was released before 1.111.1.
The same security fix can be found in the full release of 1.112.0.
### Features
- Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17416](https://github.com/element-hq/synapse/issues/17416))
- Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17418](https://github.com/element-hq/synapse/issues/17418))
- Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17419](https://github.com/element-hq/synapse/issues/17419))
- Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17429](https://github.com/element-hq/synapse/issues/17429))
- Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17432](https://github.com/element-hq/synapse/issues/17432))
- Prepare for authenticated media freeze. ([\#17433](https://github.com/element-hq/synapse/issues/17433))
- Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17454](https://github.com/element-hq/synapse/issues/17454))
### Bugfixes
- Add configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231))
- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. ([\#17434](https://github.com/element-hq/synapse/issues/17434))
- Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). ([\#17435](https://github.com/element-hq/synapse/issues/17435))
- Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. ([\#17438](https://github.com/element-hq/synapse/issues/17438))
### Improved Documentation
- Update the readme image to have a white background, so that it is readable in dark mode. ([\#17387](https://github.com/element-hq/synapse/issues/17387))
- Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. ([\#17423](https://github.com/element-hq/synapse/issues/17423))
- Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. ([\#17451](https://github.com/element-hq/synapse/issues/17451))
### Internal Changes
- Make sure we always use the right logic for enabling the media repo. ([\#17424](https://github.com/element-hq/synapse/issues/17424))
- Fix argument documentation for method `RateLimiter.record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426))
- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428))
- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439))
- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449))
- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. ([\#17453](https://github.com/element-hq/synapse/issues/17453))
- Speed up generating sliding sync responses. ([\#17458](https://github.com/element-hq/synapse/issues/17458))
- Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. ([\#17460](https://github.com/element-hq/synapse/issues/17460))
- Speed up fetching room keys from backup. ([\#17461](https://github.com/element-hq/synapse/issues/17461))
- Speed up sorting of the room list in sliding sync. ([\#17468](https://github.com/element-hq/synapse/issues/17468))
- Implement handling of `$ME` as a state key in sliding sync. ([\#17469](https://github.com/element-hq/synapse/issues/17469))
### Updates to locked dependencies
* Bump bytes from 1.6.0 to 1.6.1. ([\#17441](https://github.com/element-hq/synapse/issues/17441))
* Bump hiredis from 2.3.2 to 3.0.0. ([\#17464](https://github.com/element-hq/synapse/issues/17464))
* Bump jsonschema from 4.22.0 to 4.23.0. ([\#17444](https://github.com/element-hq/synapse/issues/17444))
* Bump matrix-org/done-action from 2 to 3. ([\#17440](https://github.com/element-hq/synapse/issues/17440))
* Bump mypy from 1.9.0 to 1.10.1. ([\#17445](https://github.com/element-hq/synapse/issues/17445))
* Bump pyopenssl from 24.1.0 to 24.2.1. ([\#17465](https://github.com/element-hq/synapse/issues/17465))
* Bump ruff from 0.5.0 to 0.5.4. ([\#17466](https://github.com/element-hq/synapse/issues/17466))
* Bump sentry-sdk from 2.6.0 to 2.8.0. ([\#17456](https://github.com/element-hq/synapse/issues/17456))
* Bump sentry-sdk from 2.8.0 to 2.10.0. ([\#17467](https://github.com/element-hq/synapse/issues/17467))
* Bump setuptools from 67.6.0 to 70.0.0. ([\#17448](https://github.com/element-hq/synapse/issues/17448))
* Bump twine from 5.1.0 to 5.1.1. ([\#17443](https://github.com/element-hq/synapse/issues/17443))
* Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712. ([\#17446](https://github.com/element-hq/synapse/issues/17446))
* Bump ulid from 1.1.2 to 1.1.3. ([\#17442](https://github.com/element-hq/synapse/issues/17442))
* Bump zipp from 3.15.0 to 3.19.1. ([\#17427](https://github.com/element-hq/synapse/issues/17427))
# Synapse 1.111.1 (2024-07-30)
This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7).
This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request.
If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality.
With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**.
The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration.
Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today.
**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time.
### Internal Changes
- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502))
# Synapse 1.111.0 (2024-07-16)
No significant changes since 1.111.0rc2.
# Synapse 1.111.0rc2 (2024-07-10)
### Bugfixes
- Fix bug where using `synapse.app.media_repository` worker configuration would break the new media endpoints. ([\#17420](https://github.com/element-hq/synapse/issues/17420))
### Improved Documentation
- Document the new federation media worker endpoints in the [upgrade notes](https://element-hq.github.io/synapse/v1.111/upgrade.html) and [worker docs](https://element-hq.github.io/synapse/v1.111/workers.html). ([\#17421](https://github.com/element-hq/synapse/issues/17421))
### Internal Changes
- Route authenticated federation media requests to media repository workers in Complement tests. ([\#17422](https://github.com/element-hq/synapse/issues/17422))
# Synapse 1.111.0rc1 (2024-07-09)
### Features
- Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17320](https://github.com/element-hq/synapse/issues/17320))
- Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17337](https://github.com/element-hq/synapse/issues/17337))
- Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17342](https://github.com/element-hq/synapse/issues/17342))
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding [`_matrix/client/v1/media/download`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediadownloadservernamemediaid) endpoint. ([\#17365](https://github.com/element-hq/synapse/issues/17365))
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
by adding [`_matrix/client/v1/media/thumbnail`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediathumbnailservernamemediaid), [`_matrix/federation/v1/media/thumbnail`](https://spec.matrix.org/v1.11/server-server-api/#get_matrixfederationv1mediathumbnailmediaid) endpoints and stabilizing the
remaining [`_matrix/client/v1/media`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediaconfig) endpoints. ([\#17388](https://github.com/element-hq/synapse/issues/17388))
- Add `rooms.bump_stamp` for easier client-side sorting in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17395](https://github.com/element-hq/synapse/issues/17395))
- Forget all of a user's rooms upon deactivation, preventing local room purges from being blocked on deactivated users. ([\#17400](https://github.com/element-hq/synapse/issues/17400))
- Declare support for [Matrix 1.11](https://matrix.org/blog/2024/06/20/matrix-v1.11-release/). ([\#17403](https://github.com/element-hq/synapse/issues/17403))
- [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow overriding the introspection endpoint. ([\#17406](https://github.com/element-hq/synapse/issues/17406))
### Bugfixes
- Fix rare race which caused no new to-device messages to be received from remote server. ([\#17362](https://github.com/element-hq/synapse/issues/17362))
- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using an old database. ([\#17398](https://github.com/element-hq/synapse/issues/17398))
### Improved Documentation
- Clarify that `url_preview_url_blacklist` is a usability feature. ([\#17356](https://github.com/element-hq/synapse/issues/17356))
- Fix broken links in README. ([\#17379](https://github.com/element-hq/synapse/issues/17379))
- Clarify that changelog content *and file extension* need to match in order for entries to merge. ([\#17399](https://github.com/element-hq/synapse/issues/17399))
### Internal Changes
- Make the release script create a release branch for Complement as well. ([\#17318](https://github.com/element-hq/synapse/issues/17318))
- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363))
- Add CI check for the README. ([\#17367](https://github.com/element-hq/synapse/issues/17367))
- Fix linting errors from new `ruff` version. ([\#17381](https://github.com/element-hq/synapse/issues/17381), [\#17411](https://github.com/element-hq/synapse/issues/17411))
- Fix building debian packages on non-clean checkouts. ([\#17390](https://github.com/element-hq/synapse/issues/17390))
- Finish up work to allow per-user feature flags. ([\#17392](https://github.com/element-hq/synapse/issues/17392), [\#17410](https://github.com/element-hq/synapse/issues/17410))
- Allow enabling sliding sync per-user. ([\#17393](https://github.com/element-hq/synapse/issues/17393))
### Updates to locked dependencies
* Bump certifi from 2023.7.22 to 2024.7.4. ([\#17404](https://github.com/element-hq/synapse/issues/17404))
* Bump cryptography from 42.0.7 to 42.0.8. ([\#17382](https://github.com/element-hq/synapse/issues/17382))
* Bump ijson from 3.2.3 to 3.3.0. ([\#17413](https://github.com/element-hq/synapse/issues/17413))
* Bump log from 0.4.21 to 0.4.22. ([\#17384](https://github.com/element-hq/synapse/issues/17384))
* Bump mypy-zope from 1.0.4 to 1.0.5. ([\#17414](https://github.com/element-hq/synapse/issues/17414))
* Bump pillow from 10.3.0 to 10.4.0. ([\#17412](https://github.com/element-hq/synapse/issues/17412))
* Bump pydantic from 2.7.1 to 2.8.2. ([\#17415](https://github.com/element-hq/synapse/issues/17415))
* Bump ruff from 0.3.7 to 0.5.0. ([\#17381](https://github.com/element-hq/synapse/issues/17381))
* Bump serde from 1.0.203 to 1.0.204. ([\#17409](https://github.com/element-hq/synapse/issues/17409))
* Bump serde_json from 1.0.117 to 1.0.120. ([\#17385](https://github.com/element-hq/synapse/issues/17385), [\#17408](https://github.com/element-hq/synapse/issues/17408))
* Bump types-setuptools from 69.5.0.20240423 to 70.1.0.20240627. ([\#17380](https://github.com/element-hq/synapse/issues/17380))
# Synapse 1.110.0 (2024-07-03)
No significant changes since 1.110.0rc3.
# Synapse 1.110.0rc3 (2024-07-02)
### Bugfixes
- Fix bug where `/sync` requests could get blocked indefinitely after an upgrade from Synapse versions before v1.109.0. ([\#17386](https://github.com/element-hq/synapse/issues/17386), [\#17391](https://github.com/element-hq/synapse/issues/17391))
### Internal Changes
- Limit size of presence EDUs to 50 entries. ([\#17371](https://github.com/element-hq/synapse/issues/17371))
- Fix building debian package for debian sid. ([\#17389](https://github.com/element-hq/synapse/issues/17389))
# Synapse 1.110.0rc2 (2024-06-26)
### Internal Changes
- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363))
# Synapse 1.110.0rc1 (2024-06-26)
### Features
- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17187](https://github.com/element-hq/synapse/issues/17187))
- Add experimental support for [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension. ([\#17255](https://github.com/element-hq/synapse/issues/17255))
- Improve ratelimiting in Synapse. ([\#17256](https://github.com/element-hq/synapse/issues/17256))
- Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API. ([\#17270](https://github.com/element-hq/synapse/issues/17270), [\#17296](https://github.com/element-hq/synapse/issues/17296))
- Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api). ([\#17276](https://github.com/element-hq/synapse/issues/17276))
- Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17277](https://github.com/element-hq/synapse/issues/17277))
- Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17281](https://github.com/element-hq/synapse/issues/17281))
- Include user membership in events served to clients, per [MSC4115](https://github.com/matrix-org/matrix-spec-proposals/pull/4115). ([\#17282](https://github.com/element-hq/synapse/issues/17282))
- Do not require user-interactive authentication for uploading cross-signing keys for the first time, per [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967). ([\#17284](https://github.com/element-hq/synapse/issues/17284))
- Add `stream_ordering` sort to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17293](https://github.com/element-hq/synapse/issues/17293))
- `register_new_matrix_user` now supports a --password-file flag, which
is useful for scripting. ([\#17294](https://github.com/element-hq/synapse/issues/17294))
- `register_new_matrix_user` now supports a --exists-ok flag to allow registration of users that already exist in the database.
This is useful for scripts that bootstrap user accounts with initial passwords. ([\#17304](https://github.com/element-hq/synapse/issues/17304))
- Add support for via query parameter from [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156). ([\#17322](https://github.com/element-hq/synapse/issues/17322))
- Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17335](https://github.com/element-hq/synapse/issues/17335))
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350))
### Bugfixes
- Fix searching for users with their exact localpart whose ID includes a hyphen. ([\#17254](https://github.com/element-hq/synapse/issues/17254))
- Fix wrong retention policy being used when filtering events. ([\#17272](https://github.com/element-hq/synapse/issues/17272))
- Fix bug where OTKs were not always included in `/sync` response when using workers. ([\#17275](https://github.com/element-hq/synapse/issues/17275))
- Fix a long-standing bug where an invalid 'from' parameter to [`/notifications`](https://spec.matrix.org/v1.10/client-server-api/#get_matrixclientv3notifications) would result in an Internal Server Error. ([\#17283](https://github.com/element-hq/synapse/issues/17283))
- Fix edge case in `/sync` returning the wrong the state when using sharded event persisters. ([\#17295](https://github.com/element-hq/synapse/issues/17295))
- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17301](https://github.com/element-hq/synapse/issues/17301))
- Fix email notification subject when invited to a space. ([\#17336](https://github.com/element-hq/synapse/issues/17336))
### Improved Documentation
- Add missing quotes for example for `exclude_rooms_from_sync`. ([\#17308](https://github.com/element-hq/synapse/issues/17308))
- Update header in the README to visually fix the the auto-generated table of contents. ([\#17329](https://github.com/element-hq/synapse/issues/17329))
- Fix stale references to the Foundation's Security Disclosure Policy. ([\#17341](https://github.com/element-hq/synapse/issues/17341))
- Add default values for `rc_invites.per_issuer` to docs. ([\#17347](https://github.com/element-hq/synapse/issues/17347))
- Fix an error in the docs for `search_all_users` parameter under `user_directory`. ([\#17348](https://github.com/element-hq/synapse/issues/17348))
### Internal Changes
- Remove unused `expire_access_token` option in the Synapse Docker config file. Contributed by @AaronDewes. ([\#17198](https://github.com/element-hq/synapse/issues/17198))
- Use fully-qualified `PersistedEventPosition` when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation. ([\#17265](https://github.com/element-hq/synapse/issues/17265))
- Add debug logging for when room keys are uploaded, including whether they are replacing other room keys. ([\#17266](https://github.com/element-hq/synapse/issues/17266))
- Handle OTK uploads off master. ([\#17271](https://github.com/element-hq/synapse/issues/17271))
- Don't try and resync devices for remote users whose servers are marked as down. ([\#17273](https://github.com/element-hq/synapse/issues/17273))
- Re-organize Pydantic models and types used in handlers. ([\#17279](https://github.com/element-hq/synapse/issues/17279))
- Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`. ([\#17300](https://github.com/element-hq/synapse/issues/17300))
- Update the README with Element branding, improve headers and fix the #synapse:matrix.org support room link rendering. ([\#17324](https://github.com/element-hq/synapse/issues/17324))
- Change path of the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync implementation to `/org.matrix.simplified_msc3575/sync` since our simplified API is slightly incompatible with what's in the current MSC. ([\#17331](https://github.com/element-hq/synapse/issues/17331))
- Handle device lists notifications for large accounts more efficiently in worker mode. ([\#17333](https://github.com/element-hq/synapse/issues/17333), [\#17358](https://github.com/element-hq/synapse/issues/17358))
- Do not block event sending/receiving while calculating large event auth chains. ([\#17338](https://github.com/element-hq/synapse/issues/17338))
- Tidy up `parse_integer` docs and call sites to reflect the fact that they require non-negative integers by default, and bring `parse_integer_from_args` default in alignment. Contributed by Denis Kasak (@dkasak). ([\#17339](https://github.com/element-hq/synapse/issues/17339))
### Updates to locked dependencies
* Bump authlib from 1.3.0 to 1.3.1. ([\#17343](https://github.com/element-hq/synapse/issues/17343))
* Bump dawidd6/action-download-artifact from 3.1.4 to 5. ([\#17289](https://github.com/element-hq/synapse/issues/17289))
* Bump dawidd6/action-download-artifact from 5 to 6. ([\#17313](https://github.com/element-hq/synapse/issues/17313))
* Bump docker/build-push-action from 5 to 6. ([\#17312](https://github.com/element-hq/synapse/issues/17312))
* Bump jinja2 from 3.1.3 to 3.1.4. ([\#17287](https://github.com/element-hq/synapse/issues/17287))
* Bump lazy_static from 1.4.0 to 1.5.0. ([\#17355](https://github.com/element-hq/synapse/issues/17355))
* Bump msgpack from 1.0.7 to 1.0.8. ([\#17317](https://github.com/element-hq/synapse/issues/17317))
* Bump netaddr from 1.2.1 to 1.3.0. ([\#17353](https://github.com/element-hq/synapse/issues/17353))
* Bump packaging from 24.0 to 24.1. ([\#17352](https://github.com/element-hq/synapse/issues/17352))
* Bump phonenumbers from 8.13.37 to 8.13.39. ([\#17315](https://github.com/element-hq/synapse/issues/17315))
* Bump regex from 1.10.4 to 1.10.5. ([\#17290](https://github.com/element-hq/synapse/issues/17290))
* Bump requests from 2.31.0 to 2.32.2. ([\#17345](https://github.com/element-hq/synapse/issues/17345))
* Bump sentry-sdk from 2.1.1 to 2.3.1. ([\#17263](https://github.com/element-hq/synapse/issues/17263))
* Bump sentry-sdk from 2.3.1 to 2.6.0. ([\#17351](https://github.com/element-hq/synapse/issues/17351))
* Bump tornado from 6.4 to 6.4.1. ([\#17344](https://github.com/element-hq/synapse/issues/17344))
* Bump mypy from 1.8.0 to 1.9.0. ([\#17297](https://github.com/element-hq/synapse/issues/17297))
* Bump types-jsonschema from 4.21.0.20240311 to 4.22.0.20240610. ([\#17288](https://github.com/element-hq/synapse/issues/17288))
* Bump types-netaddr from 1.2.0.20240219 to 1.3.0.20240530. ([\#17314](https://github.com/element-hq/synapse/issues/17314))
* Bump types-pillow from 10.2.0.20240423 to 10.2.0.20240520. ([\#17285](https://github.com/element-hq/synapse/issues/17285))
* Bump types-pyyaml from 6.0.12.12 to 6.0.12.20240311. ([\#17316](https://github.com/element-hq/synapse/issues/17316))
* Bump typing-extensions from 4.11.0 to 4.12.2. ([\#17354](https://github.com/element-hq/synapse/issues/17354))
* Bump urllib3 from 2.0.7 to 2.2.2. ([\#17346](https://github.com/element-hq/synapse/issues/17346))
# Synapse 1.109.0 (2024-06-18)
### Internal Changes
- Fix the building of binary wheels for macOS by switching to macOS 12 CI runners. ([\#17319](https://github.com/element-hq/synapse/issues/17319))
# Synapse 1.109.0rc3 (2024-06-17)
### Bugfixes
- When rolling back to a previous Synapse version and then forwards again to this release, don't require server operators to manually run SQL. ([\#17305](https://github.com/element-hq/synapse/issues/17305), [\#17309](https://github.com/element-hq/synapse/issues/17309))
### Internal Changes
- Use the release branch for sytest in release-branch PRs. ([\#17306](https://github.com/element-hq/synapse/issues/17306))
# Synapse 1.109.0rc2 (2024-06-11)
### Bugfixes
- Fix bug where one-time-keys were not always included in `/sync` response when using workers. Introduced in v1.109.0rc1. ([\#17275](https://github.com/element-hq/synapse/issues/17275))
- Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1. ([\#17292](https://github.com/element-hq/synapse/issues/17292))
# Synapse 1.109.0rc1 (2024-06-04)
### Features
- Add the ability to auto-accept invites on the behalf of users. See the [`auto_accept_invites`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#auto-accept-invites) config option for details. ([\#17147](https://github.com/element-hq/synapse/issues/17147))
- Add experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync/e2ee` endpoint for to-device messages and device encryption info. ([\#17167](https://github.com/element-hq/synapse/issues/17167))
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/issues/3916) by adding unstable media endpoints to `/_matrix/client`. ([\#17213](https://github.com/element-hq/synapse/issues/17213))
- Add logging to tasks managed by the task scheduler, showing CPU and database usage. ([\#17219](https://github.com/element-hq/synapse/issues/17219))
### Bugfixes
- Fix deduplicating of membership events to not create unused state groups. ([\#17164](https://github.com/element-hq/synapse/issues/17164))
- Fix bug where duplicate events could be sent down sync when using workers that are overloaded. ([\#17215](https://github.com/element-hq/synapse/issues/17215))
- Ignore attempts to send to-device messages to bad users, to avoid log spam when we try to connect to the bad server. ([\#17240](https://github.com/element-hq/synapse/issues/17240))
- Fix handling of duplicate concurrent uploading of device one-time-keys. ([\#17241](https://github.com/element-hq/synapse/issues/17241))
- Fix reporting of default tags to Sentry, such as worker name. Broke in v1.108.0. ([\#17251](https://github.com/element-hq/synapse/issues/17251))
- Fix bug where typing updates would not be sent when using workers after a restart. ([\#17252](https://github.com/element-hq/synapse/issues/17252))
### Improved Documentation
- Update the LemonLDAP documentation to say that claims should be explicitly included in the returned `id_token`, as Synapse won't request them. ([\#17204](https://github.com/element-hq/synapse/issues/17204))
### Internal Changes
- Improve DB usage when fetching related events. ([\#17083](https://github.com/element-hq/synapse/issues/17083))
- Log exceptions when failing to auto-join new user according to the `auto_join_rooms` option. ([\#17176](https://github.com/element-hq/synapse/issues/17176))
- Reduce work of calculating outbound device lists updates. ([\#17211](https://github.com/element-hq/synapse/issues/17211))
- Improve performance of calculating device lists changes in `/sync`. ([\#17216](https://github.com/element-hq/synapse/issues/17216))
- Move towards using `MultiWriterIdGenerator` everywhere. ([\#17226](https://github.com/element-hq/synapse/issues/17226))
- Replaces all usages of `StreamIdGenerator` with `MultiWriterIdGenerator`. ([\#17229](https://github.com/element-hq/synapse/issues/17229))
- Change the `allow_unsafe_locale` config option to also apply when setting up new databases. ([\#17238](https://github.com/element-hq/synapse/issues/17238))
- Fix errors in logs about closing incorrect logging contexts when media gets rejected by a module. ([\#17239](https://github.com/element-hq/synapse/issues/17239), [\#17246](https://github.com/element-hq/synapse/issues/17246))
- Clean out invalid destinations from `device_federation_outbox` table. ([\#17242](https://github.com/element-hq/synapse/issues/17242))
- Stop logging errors when receiving invalid User IDs in key querys requests. ([\#17250](https://github.com/element-hq/synapse/issues/17250))
### Updates to locked dependencies
* Bump anyhow from 1.0.83 to 1.0.86. ([\#17220](https://github.com/element-hq/synapse/issues/17220))
* Bump bcrypt from 4.1.2 to 4.1.3. ([\#17224](https://github.com/element-hq/synapse/issues/17224))
* Bump lxml from 5.2.1 to 5.2.2. ([\#17261](https://github.com/element-hq/synapse/issues/17261))
* Bump mypy-zope from 1.0.3 to 1.0.4. ([\#17262](https://github.com/element-hq/synapse/issues/17262))
* Bump phonenumbers from 8.13.35 to 8.13.37. ([\#17235](https://github.com/element-hq/synapse/issues/17235))
* Bump prometheus-client from 0.19.0 to 0.20.0. ([\#17233](https://github.com/element-hq/synapse/issues/17233))
* Bump pyasn1 from 0.5.1 to 0.6.0. ([\#17223](https://github.com/element-hq/synapse/issues/17223))
* Bump pyicu from 2.13 to 2.13.1. ([\#17236](https://github.com/element-hq/synapse/issues/17236))
* Bump pyopenssl from 24.0.0 to 24.1.0. ([\#17234](https://github.com/element-hq/synapse/issues/17234))
* Bump serde from 1.0.201 to 1.0.202. ([\#17221](https://github.com/element-hq/synapse/issues/17221))
* Bump serde from 1.0.202 to 1.0.203. ([\#17232](https://github.com/element-hq/synapse/issues/17232))
* Bump twine from 5.0.0 to 5.1.0. ([\#17225](https://github.com/element-hq/synapse/issues/17225))
* Bump types-psycopg2 from 2.9.21.20240311 to 2.9.21.20240417. ([\#17222](https://github.com/element-hq/synapse/issues/17222))
* Bump types-pyopenssl from 24.0.0.20240311 to 24.1.0.20240425. ([\#17260](https://github.com/element-hq/synapse/issues/17260))
# Synapse 1.108.0 (2024-05-28)
No significant changes since 1.108.0rc1.
# Synapse 1.108.0rc1 (2024-05-21) # Synapse 1.108.0rc1 (2024-05-21)
### Features ### Features

33
Cargo.lock generated
View File

@@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]] [[package]]
name = "bytes" name = "bytes"
version = "1.7.1" version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9"
[[package]] [[package]]
name = "cfg-if" name = "cfg-if"
@@ -212,9 +212,9 @@ dependencies = [
[[package]] [[package]]
name = "lazy_static" name = "lazy_static"
version = "1.5.0" version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]] [[package]]
name = "libc" name = "libc"
@@ -234,9 +234,9 @@ dependencies = [
[[package]] [[package]]
name = "log" name = "log"
version = "0.4.22" version = "0.4.21"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
[[package]] [[package]]
name = "memchr" name = "memchr"
@@ -444,9 +444,9 @@ dependencies = [
[[package]] [[package]]
name = "regex" name = "regex"
version = "1.10.6" version = "1.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
dependencies = [ dependencies = [
"aho-corasick", "aho-corasick",
"memchr", "memchr",
@@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]] [[package]]
name = "serde" name = "serde"
version = "1.0.204" version = "1.0.202"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395"
dependencies = [ dependencies = [
"serde_derive", "serde_derive",
] ]
[[package]] [[package]]
name = "serde_derive" name = "serde_derive"
version = "1.0.204" version = "1.0.202"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@@ -505,12 +505,11 @@ dependencies = [
[[package]] [[package]]
name = "serde_json" name = "serde_json"
version = "1.0.122" version = "1.0.117"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3"
dependencies = [ dependencies = [
"itoa", "itoa",
"memchr",
"ryu", "ryu",
"serde", "serde",
] ]
@@ -598,9 +597,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]] [[package]]
name = "ulid" name = "ulid"
version = "1.1.3" version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04f903f293d11f31c0c29e4148f6dc0d033a7f80cebc0282bea147611667d289" checksum = "34778c17965aa2a08913b57e1f34db9b4a63f5de31768b55bf20d2795f921259"
dependencies = [ dependencies = [
"getrandom", "getrandom",
"rand", "rand",

View File

@@ -1,34 +1,21 @@
.. image:: ./docs/element_logo_white_bg.svg =========================================================================
:height: 60px Synapse |support| |development| |documentation| |license| |pypi| |python|
=========================================================================
**Element Synapse - Matrix homeserver implementation** Synapse is an open-source `Matrix <https://matrix.org/>`_ homeserver written and
maintained by the Matrix.org Foundation. We began rapid development in 2014,
reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues
in earnest today.
|support| |development| |documentation| |license| |pypi| |python| Briefly, Matrix is an open standard for communications on the internet, supporting
federation, encryption and VoIP. Matrix.org has more to say about the `goals of the
Synapse is an open source `Matrix <https://matrix.org>`__ homeserver Matrix project <https://matrix.org/docs/guides/introduction>`_, and the `formal specification
implementation, written and maintained by `Element <https://element.io>`_. <https://spec.matrix.org/>`_ describes the technical details.
`Matrix <https://github.com/matrix-org>`__ is the open standard for
secure and interoperable real time communications. You can directly run
and manage the source code in this repository, available under an AGPL
license. There is no support provided from Element unless you have a
subscription.
Subscription alternative
========================
Alternatively, for those that need an enterprise-ready solution, Element
Server Suite (ESS) is `available as a subscription <https://element.io/pricing>`_.
ESS builds on Synapse to offer a complete Matrix-based backend including the full
`Admin Console product <https://element.io/enterprise-functionality/admin-console>`_,
giving admins the power to easily manage an organization-wide
deployment. It includes advanced identity management, auditing,
moderation and data retention options as well as Long Term Support and
SLAs. ESS can be used to support any Matrix-based frontend client.
.. contents:: .. contents::
🛠️ Installing and configuration Installing and configuration
=============================== ============================
The Synapse documentation describes `how to install Synapse <https://element-hq.github.io/synapse/latest/setup/installation.html>`_. We recommend using The Synapse documentation describes `how to install Synapse <https://element-hq.github.io/synapse/latest/setup/installation.html>`_. We recommend using
`Docker images <https://element-hq.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org `Docker images <https://element-hq.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org
@@ -118,8 +105,8 @@ Following this advice ensures that even if an XSS is found in Synapse, the
impact to other applications will be minimal. impact to other applications will be minimal.
🧪 Testing a new installation Testing a new installation
============================= ==========================
The easiest way to try out your new Synapse installation is by connecting to it The easiest way to try out your new Synapse installation is by connecting to it
from a web client. from a web client.
@@ -172,20 +159,8 @@ the form of::
As when logging in, you will need to specify a "Custom server". Specify your As when logging in, you will need to specify a "Custom server". Specify your
desired ``localpart`` in the 'User name' box. desired ``localpart`` in the 'User name' box.
🎯 Troubleshooting and support Troubleshooting and support
============================== ===========================
🚀 Professional support
-----------------------
Enterprise quality support for Synapse including SLAs is available as part of an
`Element Server Suite (ESS) <https://element.io/pricing>`_ subscription.
If you are an existing ESS subscriber then you can raise a `support request <https://ems.element.io/support>`_
and access the `knowledge base <https://ems-docs.element.io>`_.
🤝 Community support
--------------------
The `Admin FAQ <https://element-hq.github.io/synapse/latest/usage/administration/admin_faq.html>`_ The `Admin FAQ <https://element-hq.github.io/synapse/latest/usage/administration/admin_faq.html>`_
includes tips on dealing with some common problems. For more details, see includes tips on dealing with some common problems. For more details, see
@@ -201,8 +176,8 @@ issues for support requests, only for bug reports and feature requests.
.. |docs| replace:: ``docs`` .. |docs| replace:: ``docs``
.. _docs: docs .. _docs: docs
🪪 Identity Servers Identity Servers
=================== ================
Identity servers have the job of mapping email addresses and other 3rd Party Identity servers have the job of mapping email addresses and other 3rd Party
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
@@ -231,8 +206,8 @@ an email address with your account, or send an invite to another user via their
email address. email address.
🛠️ Development Development
============== ===========
We welcome contributions to Synapse from the community! We welcome contributions to Synapse from the community!
The best place to get started is our The best place to get started is our
@@ -250,8 +225,8 @@ Alongside all that, join our developer community on Matrix:
`#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans! `#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans!
.. |support| image:: https://img.shields.io/badge/matrix-community%20support-success .. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
:alt: (get community support in #synapse:matrix.org) :alt: (get support on #synapse:matrix.org)
:target: https://matrix.to/#/#synapse:matrix.org :target: https://matrix.to/#/#synapse:matrix.org
.. |development| image:: https://img.shields.io/matrix/synapse-dev:matrix.org?label=development&logo=matrix .. |development| image:: https://img.shields.io/matrix/synapse-dev:matrix.org?label=development&logo=matrix

View File

@@ -0,0 +1 @@
Add the ability to auto-accept invites on the behalf of users. See the [`auto_accept_invites`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#auto-accept-invites) config option for details.

1
changelog.d/17204.doc Normal file
View File

@@ -0,0 +1 @@
Update OIDC documentation: by default Matrix doesn't query userinfo endpoint, then claims should be put on id_token.

1
changelog.d/17211.misc Normal file
View File

@@ -0,0 +1 @@
Reduce work of calculating outbound device lists updates.

1
changelog.d/17216.misc Normal file
View File

@@ -0,0 +1 @@
Improve performance of calculating device lists changes in `/sync`.

1
changelog.d/17228.docker Normal file
View File

@@ -0,0 +1 @@
Support loading pluggable modules from Docker images.

View File

@@ -1 +0,0 @@
Add more tracing to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

View File

@@ -1,3 +0,0 @@
Clarify default behaviour of the
[`auto_accept_invites.worker_to_run_on`](https://element-hq.github.io/synapse/develop/usage/configuration/config_documentation.html#auto-accept-invites)
option.

View File

@@ -1 +0,0 @@
Fixup comment in sliding sync implementation.

View File

@@ -1 +0,0 @@
Replace override of deprecated method `HTTPAdapter.get_connection` with `get_connection_with_tls_context`.

97
debian/changelog vendored
View File

@@ -1,100 +1,3 @@
matrix-synapse-py3 (1.113.0~rc1) stable; urgency=medium
* New Synapse release 1.113.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Aug 2024 12:23:23 +0100
matrix-synapse-py3 (1.112.0) stable; urgency=medium
* New Synapse release 1.112.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Jul 2024 17:15:48 +0100
matrix-synapse-py3 (1.112.0~rc1) stable; urgency=medium
* New Synapse release 1.112.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 23 Jul 2024 08:58:55 -0600
matrix-synapse-py3 (1.111.1) stable; urgency=medium
* New Synapse release 1.111.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Jul 2024 16:13:52 +0100
matrix-synapse-py3 (1.111.0) stable; urgency=medium
* New Synapse release 1.111.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Jul 2024 12:42:46 +0200
matrix-synapse-py3 (1.111.0~rc2) stable; urgency=medium
* New synapse release 1.111.0rc2.
-- Synapse Packaging team <packages@matrix.org> Wed, 10 Jul 2024 08:46:54 +0000
matrix-synapse-py3 (1.111.0~rc1) stable; urgency=medium
* New synapse release 1.111.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 09 Jul 2024 09:49:25 +0000
matrix-synapse-py3 (1.110.0) stable; urgency=medium
* New Synapse release 1.110.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 03 Jul 2024 09:08:59 -0600
matrix-synapse-py3 (1.110.0~rc3) stable; urgency=medium
* New Synapse release 1.110.0rc3.
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Jul 2024 08:28:56 -0600
matrix-synapse-py3 (1.110.0~rc2) stable; urgency=medium
* New Synapse release 1.110.0rc2.
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Jun 2024 18:14:48 +0200
matrix-synapse-py3 (1.110.0~rc1) stable; urgency=medium
* `register_new_matrix_user` now supports a --password-file and a --exists-ok flag.
* New Synapse release 1.110.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Jun 2024 14:07:56 +0200
matrix-synapse-py3 (1.109.0) stable; urgency=medium
* New synapse release 1.109.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jun 2024 09:45:15 +0000
matrix-synapse-py3 (1.109.0~rc3) stable; urgency=medium
* New synapse release 1.109.0rc3.
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Jun 2024 12:05:24 +0000
matrix-synapse-py3 (1.109.0~rc2) stable; urgency=medium
* New synapse release 1.109.0rc2.
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jun 2024 13:20:17 +0000
matrix-synapse-py3 (1.109.0~rc1) stable; urgency=medium
* New Synapse release 1.109.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Jun 2024 09:42:46 +0100
matrix-synapse-py3 (1.108.0) stable; urgency=medium
* New Synapse release 1.108.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 May 2024 11:54:22 +0100
matrix-synapse-py3 (1.108.0~rc1) stable; urgency=medium matrix-synapse-py3 (1.108.0~rc1) stable; urgency=medium
* New Synapse release 1.108.0rc1. * New Synapse release 1.108.0rc1.

View File

@@ -31,12 +31,8 @@ A sample YAML file accepted by `register_new_matrix_user` is described below:
Local part of the new user. Will prompt if omitted. Local part of the new user. Will prompt if omitted.
* `-p`, `--password`: * `-p`, `--password`:
New password for user. Will prompt if this option and `--password-file` are omitted. New password for user. Will prompt if omitted. Supplying the password
Supplying the password on the command line is not recommended. on the command line is not recommended. Use the STDIN instead.
* `--password-file`:
File containing the new password for user. If set, overrides `--password`.
This is a more secure alternative to specifying the password on the command line.
* `-a`, `--admin`: * `-a`, `--admin`:
Register new user as an admin. Will prompt if omitted. Register new user as an admin. Will prompt if omitted.
@@ -48,9 +44,6 @@ A sample YAML file accepted by `register_new_matrix_user` is described below:
Shared secret as defined in server config file. This is an optional Shared secret as defined in server config file. This is an optional
parameter as it can be also supplied via the YAML file. parameter as it can be also supplied via the YAML file.
* `--exists-ok`:
Do not fail if the user already exists. The user account will be not updated in this case.
* `server_url`: * `server_url`:
URL of the home server. Defaults to 'https://localhost:8448'. URL of the home server. Defaults to 'https://localhost:8448'.

2
debian/templates vendored
View File

@@ -5,7 +5,7 @@ _Description: Name of the server:
servers via federation. This is normally the public hostname of the servers via federation. This is normally the public hostname of the
server running synapse, but can be different if you set up delegation. server running synapse, but can be different if you set up delegation.
Please refer to the delegation documentation in this case: Please refer to the delegation documentation in this case:
https://element-hq.github.io/synapse/latest/delegate.html. https://github.com/element-hq/synapse/blob/master/docs/delegate.md.
Template: matrix-synapse/report-stats Template: matrix-synapse/report-stats
Type: boolean Type: boolean

View File

@@ -27,7 +27,7 @@ ARG PYTHON_VERSION=3.11
### ###
# We hardcode the use of Debian bookworm here because this could change upstream # We hardcode the use of Debian bookworm here because this could change upstream
# and other Dockerfiles used for testing are expecting bookworm. # and other Dockerfiles used for testing are expecting bookworm.
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS requirements FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as requirements
# RUN --mount is specific to buildkit and is documented at # RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount. # https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
@@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
### ###
### Stage 1: builder ### Stage 1: builder
### ###
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS builder FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as builder
# install the OS build deps # install the OS build deps
RUN \ RUN \

View File

@@ -24,7 +24,7 @@ ARG distro=""
# https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but # https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but
# it's not obviously easier to use that than to build our own.) # it's not obviously easier to use that than to build our own.)
FROM docker.io/library/${distro} AS builder FROM docker.io/library/${distro} as builder
RUN apt-get update -qq -o Acquire::Languages=none RUN apt-get update -qq -o Acquire::Languages=none
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
@@ -73,8 +73,6 @@ RUN apt-get update -qq -o Acquire::Languages=none \
curl \ curl \
debhelper \ debhelper \
devscripts \ devscripts \
# Required for building cffi from source.
libffi-dev \
libsystemd-dev \ libsystemd-dev \
lsb-release \ lsb-release \
pkg-config \ pkg-config \

View File

@@ -243,3 +243,26 @@ healthcheck:
Jemalloc is embedded in the image and will be used instead of the default allocator. Jemalloc is embedded in the image and will be used instead of the default allocator.
You can read about jemalloc by reading the Synapse You can read about jemalloc by reading the Synapse
[Admin FAQ](https://element-hq.github.io/synapse/latest/usage/administration/admin_faq.html#help-synapse-is-slow-and-eats-all-my-ramcpu). [Admin FAQ](https://element-hq.github.io/synapse/latest/usage/administration/admin_faq.html#help-synapse-is-slow-and-eats-all-my-ramcpu).
## Modules
Synapse supports loading additional modules, using the
[`modules`](https://element-hq.github.io/synapse/latest/modules/index.html)
config. Synapse will look for these modules `/modules`.
To install a package, simply run:
```
pip install --target <module_directory> <package>
```
Where `<module_directory>` is the directory mounted to `/modules`, and
`<package>` is either the package name or a path to the package. See
`pip install` for more details.
**Note**: Packages already installed as part of Synapse cannot be overridden by
different versions of the package in `/modules`, e.g. if the Synapse version
uses Twisted 24.3.0 then installing Twisted 23.10.0 in `/modules` won't have any
effect. This can cause issues if the required version of a package is different
between Synapse and the module being installed.

View File

@@ -11,9 +11,6 @@ DIST=$(cut -d ':' -f2 <<< "${distro:?}")
cp -aT /synapse/source /synapse/build cp -aT /synapse/source /synapse/build
cd /synapse/build cd /synapse/build
# Delete any existing `.so` files to ensure a clean build.
rm -f /synapse/build/synapse/*.so
# if this is a prerelease, set the Section accordingly. # if this is a prerelease, set the Section accordingly.
# #
# When the package is later added to the package repo, reprepro will use the # When the package is later added to the package repo, reprepro will use the

View File

@@ -105,6 +105,8 @@ experimental_features:
# Expose a room summary for public rooms # Expose a room summary for public rooms
msc3266_enabled: true msc3266_enabled: true
msc4115_membership_on_events: true
server_notices: server_notices:
system_mxid_localpart: _server system_mxid_localpart: _server
system_mxid_display_name: "Server Alert" system_mxid_display_name: "Server Alert"

View File

@@ -176,6 +176,7 @@ app_service_config_files:
{% endif %} {% endif %}
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}" macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
expire_access_token: False
## Signing Keys ## ## Signing Keys ##

View File

@@ -117,7 +117,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
}, },
"media_repository": { "media_repository": {
"app": "synapse.app.generic_worker", "app": "synapse.app.generic_worker",
"listener_resources": ["media", "client"], "listener_resources": ["media"],
"endpoint_patterns": [ "endpoint_patterns": [
"^/_matrix/media/", "^/_matrix/media/",
"^/_synapse/admin/v1/purge_media_cache$", "^/_synapse/admin/v1/purge_media_cache$",
@@ -125,8 +125,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_synapse/admin/v1/user/.*/media.*$", "^/_synapse/admin/v1/user/.*/media.*$",
"^/_synapse/admin/v1/media/.*$", "^/_synapse/admin/v1/media/.*$",
"^/_synapse/admin/v1/quarantine_media/.*$", "^/_synapse/admin/v1/quarantine_media/.*$",
"^/_matrix/client/v1/media/.*$",
"^/_matrix/federation/v1/media/.*$",
], ],
# The first configured media worker will run the media background jobs # The first configured media worker will run the media background jobs
"shared_extra_conf": { "shared_extra_conf": {

View File

@@ -269,6 +269,15 @@ running with 'migrate_config'. See the README for more details.
args += ["--config-path", config_path] args += ["--config-path", config_path]
# Add the `/modules` directly to python search path, which allows users to
# add custom modules.
#
# We want to add the directory *last* so that nothing can overwrite the
# existing package versions. Therefore we load the current path and append
# `/modules` to that
path = ":".join(sys.path)
environ["PYTHONPATH"] = f"{path}:/modules"
log("Starting synapse with args " + " ".join(args)) log("Starting synapse with args " + " ".join(args))
args = [sys.executable] + args args = [sys.executable] + args

View File

@@ -1,17 +1,21 @@
# Experimental Features API # Experimental Features API
This API allows a server administrator to enable or disable some experimental features on a per-user This API allows a server administrator to enable or disable some experimental features on a per-user
basis. The currently supported features are: basis. The currently supported features are:
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications - [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy
for another client presence state enabled
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support - [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
for another client
- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require
UIA when first uploading cross-signing keys.
To use it, you will need to authenticate by providing an `access_token` To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/). for a server admin: see [Admin API](../usage/administration/admin_api/).
## Enabling/Disabling Features ## Enabling/Disabling Features
This API allows a server administrator to enable experimental features for a given user. The request must This API allows a server administrator to enable experimental features for a given user. The request must
provide a body containing the user id and listing the features to enable/disable in the following format: provide a body containing the user id and listing the features to enable/disable in the following format:
```json ```json
{ {
@@ -31,7 +35,7 @@ PUT /_synapse/admin/v1/experimental_features/<user_id>
``` ```
## Listing Enabled Features ## Listing Enabled Features
To list which features are enabled/disabled for a given user send a request to the following API: To list which features are enabled/disabled for a given user send a request to the following API:
``` ```
@@ -48,4 +52,4 @@ user like so:
"msc3967": false "msc3967": false
} }
} }
``` ```

View File

@@ -36,10 +36,6 @@ The following query parameters are available:
- the room's name, - the room's name,
- the local part of the room's canonical alias, or - the local part of the room's canonical alias, or
- the complete (local and server part) room's id (case sensitive). - the complete (local and server part) room's id (case sensitive).
* `public_rooms` - Optional flag to filter public rooms. If `true`, only public rooms are queried. If `false`, public rooms are excluded from
the query. When the flag is absent (the default), **both** public and non-public rooms are included in the search results.
* `empty_rooms` - Optional flag to filter empty rooms. A room is empty if joined_members is zero. If `true`, only empty rooms are queried. If `false`, empty rooms are excluded from
the query. When the flag is absent (the default), **both** empty and non-empty rooms are included in the search results.
Defaults to no filtering. Defaults to no filtering.

View File

@@ -449,9 +449,9 @@ For example, a fix in PR #1234 would have its changelog entry in
> The security levels of Florbs are now validated when received > The security levels of Florbs are now validated when received
> via the `/federation/florb` endpoint. Contributed by Jane Matrix. > via the `/federation/florb` endpoint. Contributed by Jane Matrix.
If there are multiple pull requests involved in a single bugfix/feature/etc, then the If there are multiple pull requests involved in a single bugfix/feature/etc,
content for each `changelog.d` file and file extension should be the same. Towncrier then the content for each `changelog.d` file should be the same. Towncrier will
will merge the matching files together into a single changelog entry when we come to merge the matching files together into a single changelog entry when we come to
release. release.
### How do I know what to call the changelog file before I create the PR? ### How do I know what to call the changelog file before I create the PR?

View File

@@ -1,94 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="41.440346mm"
height="10.383124mm"
viewBox="0 0 41.440346 10.383125"
version="1.1"
id="svg1"
xml:space="preserve"
sodipodi:docname="element_logo_white_bg.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"><sodipodi:namedview
id="namedview1"
pagecolor="#ffffff"
bordercolor="#000000"
borderopacity="0.25"
inkscape:showpageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1"
inkscape:document-units="mm"
showgrid="false"
inkscape:export-bgcolor="#ffffffff" /><defs
id="defs1" /><g
id="layer1"
transform="translate(-84.803844,-143.2075)"
inkscape:export-filename="element_logo_white_bg.svg"
inkscape:export-xdpi="96"
inkscape:export-ydpi="96"><g
style="fill:none"
id="g1"
transform="matrix(0.26458333,0,0,0.26458333,85.841658,144.26667)"><rect
style="display:inline;fill:#ffffff;fill-opacity:1;stroke:#ffffff;stroke-width:1.31041;stroke-dasharray:none;stroke-opacity:1"
id="rect20"
width="155.31451"
height="37.932892"
x="-3.2672384"
y="-3.3479743"
rx="3.3718522"
ry="3.7915266"
transform="translate(-2.1259843e-6)"
inkscape:label="rect20"
inkscape:export-filename="rect20.svg"
inkscape:export-xdpi="96"
inkscape:export-ydpi="96" /><path
fill-rule="evenodd"
clip-rule="evenodd"
d="M 16,32 C 24.8366,32 32,24.8366 32,16 32,7.16344 24.8366,0 16,0 7.16344,0 0,7.16344 0,16 0,24.8366 7.16344,32 16,32 Z"
fill="#0dbd8b"
id="path1" /><path
fill-rule="evenodd"
clip-rule="evenodd"
d="m 13.0756,7.455 c 0,-0.64584 0.5247,-1.1694 1.1719,-1.1694 4.3864,0 7.9423,3.54853 7.9423,7.9259 0,0.6458 -0.5246,1.1694 -1.1718,1.1694 -0.6472,0 -1.1719,-0.5236 -1.1719,-1.1694 0,-3.0857 -2.5066,-5.58711 -5.5986,-5.58711 -0.6472,0 -1.1719,-0.52355 -1.1719,-1.16939 z"
fill="#ffffff"
id="path2" /><path
fill-rule="evenodd"
clip-rule="evenodd"
d="m 24.5424,13.042 c 0.6472,0 1.1719,0.5235 1.1719,1.1694 0,4.3773 -3.5559,7.9258 -7.9424,7.9258 -0.6472,0 -1.1718,-0.5235 -1.1718,-1.1693 0,-0.6459 0.5246,-1.1694 1.1718,-1.1694 3.0921,0 5.5987,-2.5015 5.5987,-5.5871 0,-0.6459 0.5247,-1.1694 1.1718,-1.1694 z"
fill="#ffffff"
id="path3" /><path
fill-rule="evenodd"
clip-rule="evenodd"
d="m 18.9446,24.5446 c 0,0.6459 -0.5247,1.1694 -1.1718,1.1694 -4.3865,0 -7.94239,-3.5485 -7.94239,-7.9258 0,-0.6459 0.52469,-1.1694 1.17179,-1.1694 0.6472,0 1.1719,0.5235 1.1719,1.1694 0,3.0856 2.5066,5.587 5.5987,5.587 0.6471,0 1.1718,0.5236 1.1718,1.1694 z"
fill="#ffffff"
id="path4" /><path
fill-rule="evenodd"
clip-rule="evenodd"
d="m 7.45823,18.9576 c -0.64718,0 -1.17183,-0.5235 -1.17183,-1.1694 0,-4.3773 3.55591,-7.92581 7.9423,-7.92581 0.6472,0 1.1719,0.52351 1.1719,1.16941 0,0.6458 -0.5247,1.1694 -1.1719,1.1694 -3.092,0 -5.59864,2.5014 -5.59864,5.587 0,0.6459 -0.52465,1.1694 -1.17183,1.1694 z"
fill="#ffffff"
id="path5" /><path
d="M 56.2856,18.1428 H 44.9998 c 0.1334,1.181 0.5619,2.1238 1.2858,2.8286 0.7238,0.6857 1.6761,1.0286 2.8571,1.0286 0.7809,0 1.4857,-0.1905 2.1143,-0.5715 0.6286,-0.3809 1.0762,-0.8952 1.3428,-1.5428 h 3.4286 c -0.4571,1.5047 -1.3143,2.7238 -2.5714,3.6571 -1.2381,0.9143 -2.7048,1.3715 -4.4,1.3715 -2.2095,0 -4,-0.7334 -5.3714,-2.2 -1.3524,-1.4667 -2.0286,-3.3239 -2.0286,-5.5715 0,-2.1905 0.6857,-4.0285 2.0571,-5.5143 1.3715,-1.4857 3.1429,-2.22853 5.3143,-2.22853 2.1714,0 3.9238,0.73333 5.2572,2.20003 1.3523,1.4476 2.0285,3.2762 2.0285,5.4857 z m -7.2572,-5.9714 c -1.0667,0 -1.9524,0.3143 -2.6571,0.9429 -0.7048,0.6285 -1.1429,1.4666 -1.3143,2.5142 h 7.8857 c -0.1524,-1.0476 -0.5714,-1.8857 -1.2571,-2.5142 -0.6858,-0.6286 -1.5715,-0.9429 -2.6572,-0.9429 z"
fill="#000000"
id="path6" /><path
d="M 58.6539,20.1428 V 3.14282 h 3.4 V 20.2 c 0,0.7619 0.419,1.1428 1.2571,1.1428 l 0.6,-0.0285 v 3.2285 c -0.3238,0.0572 -0.6667,0.0857 -1.0286,0.0857 -1.4666,0 -2.5428,-0.3714 -3.2285,-1.1142 -0.6667,-0.7429 -1,-1.8667 -1,-3.3715 z"
fill="#000000"
id="path7" /><path
d="M 79.7454,18.1428 H 68.4597 c 0.1333,1.181 0.5619,2.1238 1.2857,2.8286 0.7238,0.6857 1.6762,1.0286 2.8571,1.0286 0.781,0 1.4857,-0.1905 2.1143,-0.5715 0.6286,-0.3809 1.0762,-0.8952 1.3429,-1.5428 h 3.4285 c -0.4571,1.5047 -1.3143,2.7238 -2.5714,3.6571 -1.2381,0.9143 -2.7048,1.3715 -4.4,1.3715 -2.2095,0 -4,-0.7334 -5.3714,-2.2 -1.3524,-1.4667 -2.0286,-3.3239 -2.0286,-5.5715 0,-2.1905 0.6857,-4.0285 2.0571,-5.5143 1.3715,-1.4857 3.1429,-2.22853 5.3143,-2.22853 2.1715,0 3.9238,0.73333 5.2572,2.20003 1.3524,1.4476 2.0285,3.2762 2.0285,5.4857 z m -7.2572,-5.9714 c -1.0666,0 -1.9524,0.3143 -2.6571,0.9429 -0.7048,0.6285 -1.1429,1.4666 -1.3143,2.5142 h 7.8857 c -0.1524,-1.0476 -0.5714,-1.8857 -1.2571,-2.5142 -0.6857,-0.6286 -1.5715,-0.9429 -2.6572,-0.9429 z"
fill="#000000"
id="path8" /><path
d="m 95.0851,16.0571 v 8.5143 h -3.4 v -8.8857 c 0,-2.2476 -0.9333,-3.3714 -2.8,-3.3714 -1.0095,0 -1.819,0.3238 -2.4286,0.9714 -0.5904,0.6476 -0.8857,1.5333 -0.8857,2.6571 v 8.6286 h -3.4 V 9.74282 h 3.1429 v 1.97148 c 0.3619,-0.6667 0.9143,-1.2191 1.6571,-1.6572 0.7429,-0.43809 1.6667,-0.65713 2.7714,-0.65713 2.0572,0 3.5429,0.78093 4.4572,2.34283 1.2571,-1.5619 2.9333,-2.34283 5.0286,-2.34283 1.733,0 3.067,0.54285 4,1.62853 0.933,1.0667 1.4,2.4762 1.4,4.2286 v 9.3143 h -3.4 v -8.8857 c 0,-2.2476 -0.933,-3.3714 -2.8,-3.3714 -1.0286,0 -1.8477,0.3333 -2.4572,1 -0.5905,0.6476 -0.8857,1.5619 -0.8857,2.7428 z"
fill="#000000"
id="path9" /><path
d="m 121.537,18.1428 h -11.286 c 0.133,1.181 0.562,2.1238 1.286,2.8286 0.723,0.6857 1.676,1.0286 2.857,1.0286 0.781,0 1.486,-0.1905 2.114,-0.5715 0.629,-0.3809 1.076,-0.8952 1.343,-1.5428 h 3.429 c -0.458,1.5047 -1.315,2.7238 -2.572,3.6571 -1.238,0.9143 -2.705,1.3715 -4.4,1.3715 -2.209,0 -4,-0.7334 -5.371,-2.2 -1.353,-1.4667 -2.029,-3.3239 -2.029,-5.5715 0,-2.1905 0.686,-4.0285 2.057,-5.5143 1.372,-1.4857 3.143,-2.22853 5.315,-2.22853 2.171,0 3.923,0.73333 5.257,2.20003 1.352,1.4476 2.028,3.2762 2.028,5.4857 z m -7.257,-5.9714 c -1.067,0 -1.953,0.3143 -2.658,0.9429 -0.704,0.6285 -1.142,1.4666 -1.314,2.5142 h 7.886 c -0.153,-1.0476 -0.572,-1.8857 -1.257,-2.5142 -0.686,-0.6286 -1.572,-0.9429 -2.657,-0.9429 z"
fill="#000000"
id="path10" /><path
d="m 127.105,9.74282 v 1.97148 c 0.343,-0.6477 0.905,-1.1905 1.686,-1.6286 0.8,-0.45716 1.762,-0.68573 2.885,-0.68573 1.753,0 3.105,0.53333 4.058,1.60003 0.971,1.0666 1.457,2.4857 1.457,4.2571 v 9.3143 h -3.4 v -8.8857 c 0,-1.0476 -0.248,-1.8667 -0.743,-2.4572 -0.476,-0.6095 -1.21,-0.9142 -2.2,-0.9142 -1.086,0 -1.943,0.3238 -2.572,0.9714 -0.609,0.6476 -0.914,1.5428 -0.914,2.6857 v 8.6 h -3.4 V 9.74282 Z"
fill="#000000"
id="path11" /><path
d="m 147.12,21.5428 v 2.9429 c -0.419,0.1143 -1.009,0.1714 -1.771,0.1714 -2.895,0 -4.343,-1.4571 -4.343,-4.3714 v -7.8286 h -2.257 V 9.74282 h 2.257 V 5.88568 h 3.4 v 3.85714 h 2.772 v 2.71428 h -2.772 v 7.4857 c 0,1.1619 0.552,1.7429 1.657,1.7429 z"
fill="#000000"
id="path12" /></g></g></svg>

Before

Width:  |  Height:  |  Size: 7.5 KiB

View File

@@ -242,11 +242,12 @@ host all all ::1/128 ident
### Fixing incorrect `COLLATE` or `CTYPE` ### Fixing incorrect `COLLATE` or `CTYPE`
Synapse will refuse to start when using a database with incorrect values of Synapse will refuse to set up a new database if it has the wrong values of
`COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in the `COLLATE` and `CTYPE` set. Synapse will also refuse to start an existing database with incorrect values
`database` section of the config, is set to true. Using different locales can of `COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in the
cause issues if the locale library is updated from underneath the database, or `database` section of the config, is set to true. Using different locales can cause issues if the locale library is updated from
if a different version of the locale is used on any replicas. underneath the database, or if a different version of the locale is used on any
replicas.
If you have a database with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with If you have a database with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with
the correct locale parameter (as shown above). It is also possible to change the the correct locale parameter (as shown above). It is also possible to change the
@@ -255,3 +256,13 @@ however extreme care must be taken to avoid database corruption.
Note that the above may fail with an error about duplicate rows if corruption Note that the above may fail with an error about duplicate rows if corruption
has already occurred, and such duplicate rows will need to be manually removed. has already occurred, and such duplicate rows will need to be manually removed.
### Fixing inconsistent sequences error
Synapse uses Postgres sequences to generate IDs for various tables. A sequence
and associated table can get out of sync if, for example, Synapse has been
downgraded and then upgraded again.
To fix the issue shut down Synapse (including any and all workers) and run the
SQL command included in the error message. Once done Synapse should start
successfully.

View File

@@ -67,7 +67,7 @@ in Synapse can be deactivated.
**NOTE**: This has an impact on security and is for testing purposes only! **NOTE**: This has an impact on security and is for testing purposes only!
To deactivate the certificate validation, the following setting must be added to To deactivate the certificate validation, the following setting must be added to
your [homeserver.yaml](../usage/configuration/homeserver_sample_config.md). your [homserver.yaml](../usage/configuration/homeserver_sample_config.md).
```yaml ```yaml
use_insecure_ssl_client_just_for_testing_do_not_use: true use_insecure_ssl_client_just_for_testing_do_not_use: true

View File

@@ -309,62 +309,7 @@ sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
libwebp-devel libxml2-devel libxslt-devel libpq-devel \ libwebp-devel libxml2-devel libxslt-devel libpq-devel \
python3-virtualenv libffi-devel openssl-devel python3-devel \ python3-virtualenv libffi-devel openssl-devel python3-devel \
libicu-devel libicu-devel
sudo dnf group install "Development Tools" sudo dnf groupinstall "Development Tools"
```
##### Red Hat Enterprise Linux / Rocky Linux
*Note: The term "RHEL" below refers to both Red Hat Enterprise Linux and Rocky Linux. The distributions are 1:1 binary compatible.*
It's recommended to use the latest Python versions.
RHEL 8 in particular ships with Python 3.6 by default which is EOL and therefore no longer supported by Synapse. RHEL 9 ship with Python 3.9 which is still supported by the Python core team as of this writing. However, newer Python versions provide significant performance improvements and they're available in official distributions' repositories. Therefore it's recommended to use them.
Python 3.11 and 3.12 are available for both RHEL 8 and 9.
These commands should be run as root user.
RHEL 8
```bash
# Enable PowerTools repository
dnf config-manager --set-enabled powertools
```
RHEL 9
```bash
# Enable CodeReady Linux Builder repository
crb enable
```
Install new version of Python. You only need one of these:
```bash
# Python 3.11
dnf install python3.11 python3.11-devel
```
```bash
# Python 3.12
dnf install python3.12 python3.12-devel
```
Finally, install common prerequisites
```bash
dnf install libicu libicu-devel libpq5 libpq5-devel lz4 pkgconf
dnf group install "Development Tools"
```
###### Using venv module instead of virtualenv command
It's recommended to use Python venv module directly rather than the virtualenv command.
* On RHEL 9, virtualenv is only available on [EPEL](https://docs.fedoraproject.org/en-US/epel/).
* On RHEL 8, virtualenv is based on Python 3.6. It does not support creating 3.11/3.12 virtual environments.
Here's an example of creating Python 3.12 virtual environment and installing Synapse from PyPI.
```bash
mkdir -p ~/synapse
# To use Python 3.11, simply use the command "python3.11" instead.
python3.12 -m venv ~/synapse/env
source ~/synapse/env/bin/activate
pip install --upgrade pip
pip install --upgrade setuptools
pip install matrix-synapse
``` ```
##### macOS ##### macOS

View File

@@ -117,20 +117,6 @@ each upgrade are complete before moving on to the next upgrade, to avoid
stacking them up. You can monitor the currently running background updates with stacking them up. You can monitor the currently running background updates with
[the Admin API](usage/administration/admin_api/background_updates.html#status). [the Admin API](usage/administration/admin_api/background_updates.html#status).
# Upgrading to v1.111.0
## New worker endpoints for authenticated client and federation media
[Media repository workers](./workers.md#synapseappmedia_repository) handling
Media APIs can now handle the following endpoint patterns:
```
^/_matrix/client/v1/media/.*$
^/_matrix/federation/v1/media/.*$
```
Please update your reverse proxy configuration.
# Upgrading to v1.106.0 # Upgrading to v1.106.0
## Minimum supported Rust version ## Minimum supported Rust version

View File

@@ -246,7 +246,6 @@ Example configuration:
```yaml ```yaml
presence: presence:
enabled: false enabled: false
include_offline_users_on_sync: false
``` ```
`enabled` can also be set to a special value of "untracked" which ignores updates `enabled` can also be set to a special value of "untracked" which ignores updates
@@ -255,10 +254,6 @@ received via clients and federation, while still accepting updates from the
*The "untracked" option was added in Synapse 1.96.0.* *The "untracked" option was added in Synapse 1.96.0.*
When clients perform an initial or `full_state` sync, presence results for offline users are
not included by default. Setting `include_offline_users_on_sync` to `true` will always include
offline users in the results. Defaults to false.
--- ---
### `require_auth_for_profile_requests` ### `require_auth_for_profile_requests`
@@ -1764,9 +1759,8 @@ rc_3pid_validation:
### `rc_invites` ### `rc_invites`
This option sets ratelimiting how often invites can be sent in a room or to a This option sets ratelimiting how often invites can be sent in a room or to a
specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10`, specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and
`per_user` defaults to `per_second: 0.003`, `burst_count: 5`, and `per_issuer` `per_user` defaults to `per_second: 0.003`, `burst_count: 5`.
defaults to `per_second: 0.3`, `burst_count: 10`.
Client requests that invite user(s) when [creating a Client requests that invite user(s) when [creating a
room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom) room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom)
@@ -1868,18 +1862,6 @@ federation_rr_transactions_per_room_per_second: 40
## Media Store ## Media Store
Config options related to Synapse's media store. Config options related to Synapse's media store.
---
### `enable_authenticated_media`
When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy
unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but
this will change to true in a future Synapse release.
Example configuration:
```yaml
enable_authenticated_media: true
```
--- ---
### `enable_media_repo` ### `enable_media_repo`
@@ -1964,24 +1946,6 @@ Example configuration:
max_image_pixels: 35M max_image_pixels: 35M
``` ```
--- ---
### `remote_media_download_burst_count`
Remote media downloads are ratelimited using a [leaky bucket algorithm](https://en.wikipedia.org/wiki/Leaky_bucket), where a given "bucket" is keyed to the IP address of the requester when requesting remote media downloads. This configuration option sets the size of the bucket against which the size in bytes of downloads are penalized - if the bucket is full, ie a given number of bytes have already been downloaded, further downloads will be denied until the bucket drains. Defaults to 500MiB. See also `remote_media_download_per_second` which determines the rate at which the "bucket" is emptied and thus has available space to authorize new requests.
Example configuration:
```yaml
remote_media_download_burst_count: 200M
```
---
### `remote_media_download_per_second`
Works in conjunction with `remote_media_download_burst_count` to ratelimit remote media downloads - this configuration option determines the rate at which the "bucket" (see above) leaks in bytes per second. As requests are made to download remote media, the size of those requests in bytes is added to the bucket, and once the bucket has reached it's capacity, no more requests will be allowed until a number of bytes has "drained" from the bucket. This setting determines the rate at which bytes drain from the bucket, with the practical effect that the larger the number, the faster the bucket leaks, allowing for more bytes downloaded over a shorter period of time. Defaults to 87KiB per second. See also `remote_media_download_burst_count`.
Example configuration:
```yaml
remote_media_download_per_second: 40K
```
---
### `prevent_media_downloads_from` ### `prevent_media_downloads_from`
A list of domains to never download media from. Media from these A list of domains to never download media from. Media from these
@@ -1993,10 +1957,9 @@ This will not prevent the listed domains from accessing media themselves.
It simply prevents users on this server from downloading media originating It simply prevents users on this server from downloading media originating
from the listed servers. from the listed servers.
This will have no effect on media originating from the local server. This only This will have no effect on media originating from the local server.
affects media downloaded from other Matrix servers, to control URL previews see This only affects media downloaded from other Matrix servers, to
[`url_preview_ip_range_blacklist`](#url_preview_ip_range_blacklist) or block domains from URL previews see [`url_preview_url_blacklist`](#url_preview_url_blacklist).
[`url_preview_url_blacklist`](#url_preview_url_blacklist).
Defaults to an empty list (nothing blocked). Defaults to an empty list (nothing blocked).
@@ -2148,14 +2111,12 @@ url_preview_ip_range_whitelist:
--- ---
### `url_preview_url_blacklist` ### `url_preview_url_blacklist`
Optional list of URL matches that the URL preview spider is denied from Optional list of URL matches that the URL preview spider is
accessing. This is a usability feature, not a security one. You should use denied from accessing. You should use `url_preview_ip_range_blacklist`
`url_preview_ip_range_blacklist` in preference to this, otherwise someone could in preference to this, otherwise someone could define a public DNS
define a public DNS entry that points to a private IP address and circumvent entry that points to a private IP address and circumvent the blacklist.
the blacklist. Applications that perform redirects or serve different content This is more useful if you know there is an entire shape of URL that
when detecting that Synapse is accessing them can also bypass the blacklist. you know that will never want synapse to try to spider.
This is more useful if you know there is an entire shape of URL that you know
that you do not want Synapse to preview.
Each list entry is a dictionary of url component attributes as returned Each list entry is a dictionary of url component attributes as returned
by urlparse.urlsplit as applied to the absolute form of the URL. See by urlparse.urlsplit as applied to the absolute form of the URL. See
@@ -2386,7 +2347,7 @@ enable_registration_without_verification: true
--- ---
### `registrations_require_3pid` ### `registrations_require_3pid`
If this is set, users must provide all of the specified types of [3PID](https://spec.matrix.org/latest/appendices/#3pid-types) when registering an account. If this is set, users must provide all of the specified types of 3PID when registering an account.
Note that [`enable_registration`](#enable_registration) must also be set to allow account registration. Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
@@ -2411,9 +2372,6 @@ disable_msisdn_registration: true
Mandate that users are only allowed to associate certain formats of Mandate that users are only allowed to associate certain formats of
3PIDs with accounts on this server, as specified by the `medium` and `pattern` sub-options. 3PIDs with accounts on this server, as specified by the `medium` and `pattern` sub-options.
`pattern` is a [Perl-like regular expression](https://docs.python.org/3/library/re.html#module-re).
More information about 3PIDs, allowed `medium` types and their `address` syntax can be found [in the Matrix spec](https://spec.matrix.org/latest/appendices/#3pid-types).
Example configuration: Example configuration:
```yaml ```yaml
@@ -2423,7 +2381,7 @@ allowed_local_3pids:
- medium: email - medium: email
pattern: '^[^@]+@vector\.im$' pattern: '^[^@]+@vector\.im$'
- medium: msisdn - medium: msisdn
pattern: '^44\d{10}$' pattern: '\+44'
``` ```
--- ---
### `enable_3pid_lookup` ### `enable_3pid_lookup`
@@ -2742,7 +2700,7 @@ Example configuration:
session_lifetime: 24h session_lifetime: 24h
``` ```
--- ---
### `refreshable_access_token_lifetime` ### `refresh_access_token_lifetime`
Time that an access token remains valid for, if the session is using refresh tokens. Time that an access token remains valid for, if the session is using refresh tokens.
@@ -3830,8 +3788,7 @@ This setting defines options related to the user directory.
This option has the following sub-options: This option has the following sub-options:
* `enabled`: Defines whether users can search the user directory. If false then * `enabled`: Defines whether users can search the user directory. If false then
empty responses are returned to all queries. Defaults to true. empty responses are returned to all queries. Defaults to true.
* `search_all_users`: Defines whether to search all users visible to your homeserver at the time the search is performed. * `search_all_users`: Defines whether to search all users visible to your HS at the time the search is performed. If set to true, will return all users who share a room with the user from the homeserver.
If set to true, will return all users known to the homeserver matching the search query.
If false, search results will only contain users If false, search results will only contain users
visible in public rooms and users sharing a room with the requester. visible in public rooms and users sharing a room with the requester.
Defaults to false. Defaults to false.
@@ -4154,38 +4111,6 @@ default_power_level_content_override:
trusted_private_chat: null trusted_private_chat: null
public_chat: null public_chat: null
``` ```
The default power levels for each preset are:
```yaml
"m.room.name": 50
"m.room.power_levels": 100
"m.room.history_visibility": 100
"m.room.canonical_alias": 50
"m.room.avatar": 50
"m.room.tombstone": 100
"m.room.server_acl": 100
"m.room.encryption": 100
```
So a complete example where the default power-levels for a preset are maintained
but the power level for a new key is set is:
```yaml
default_power_level_content_override:
private_chat:
events:
"com.example.foo": 0
"m.room.name": 50
"m.room.power_levels": 100
"m.room.history_visibility": 100
"m.room.canonical_alias": 50
"m.room.avatar": 50
"m.room.tombstone": 100
"m.room.server_acl": 100
"m.room.encryption": 100
trusted_private_chat: null
public_chat: null
```
--- ---
### `forget_rooms_on_leave` ### `forget_rooms_on_leave`
@@ -4207,7 +4132,7 @@ By default, no room is excluded.
Example configuration: Example configuration:
```yaml ```yaml
exclude_rooms_from_sync: exclude_rooms_from_sync:
- "!foo:example.com" - !foo:example.com
``` ```
--- ---
@@ -4685,9 +4610,7 @@ This setting has the following sub-options:
* `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only * `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only
for direct messages. Defaults to false. for direct messages. Defaults to false.
* `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false. * `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false.
* `worker_to_run_on`: Which worker to run this module on. This must match * `worker_to_run_on`: Which worker to run this module on. This must match the "worker_name".
the "worker_name". If not set or `null`, invites will be accepted on the
main process.
NOTE: Care should be taken not to enable this setting if the `synapse_auto_accept_invite` module is enabled and installed. NOTE: Care should be taken not to enable this setting if the `synapse_auto_accept_invite` module is enabled and installed.
The two modules will compete to perform the same task and may result in undesired behaviour. For example, multiple join The two modules will compete to perform the same task and may result in undesired behaviour. For example, multiple join

View File

@@ -62,6 +62,6 @@ following documentation:
## Reporting a security vulnerability ## Reporting a security vulnerability
If you've found a security issue in Synapse or any other Element project, If you've found a security issue in Synapse or any other Matrix.org Foundation
please report it to us in accordance with our [Security Disclosure project, please report it to us in accordance with our [Security Disclosure
Policy](https://element.io/security/security-disclosure-policy). Thank you! Policy](https://www.matrix.org/security-disclosure-policy/). Thank you!

View File

@@ -739,8 +739,6 @@ An example for a federation sender instance:
Handles the media repository. It can handle all endpoints starting with: Handles the media repository. It can handle all endpoints starting with:
/_matrix/media/ /_matrix/media/
/_matrix/client/v1/media/
/_matrix/federation/v1/media/
... and the following regular expressions matching media-specific administration APIs: ... and the following regular expressions matching media-specific administration APIs:

View File

@@ -96,6 +96,3 @@ ignore_missing_imports = True
# https://github.com/twisted/treq/pull/366 # https://github.com/twisted/treq/pull/366
[mypy-treq.*] [mypy-treq.*]
ignore_missing_imports = True ignore_missing_imports = True
[mypy-multipart.*]
ignore_missing_imports = True

1698
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -43,7 +43,6 @@ target-version = ['py38', 'py39', 'py310', 'py311']
[tool.ruff] [tool.ruff]
line-length = 88 line-length = 88
[tool.ruff.lint]
# See https://beta.ruff.rs/docs/rules/#error-e # See https://beta.ruff.rs/docs/rules/#error-e
# for error codes. The ones we ignore are: # for error codes. The ones we ignore are:
# E501: Line too long (black enforces this for us) # E501: Line too long (black enforces this for us)
@@ -97,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry] [tool.poetry]
name = "matrix-synapse" name = "matrix-synapse"
version = "1.113.0rc1" version = "1.108.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol" description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later" license = "AGPL-3.0-or-later"
@@ -201,8 +200,10 @@ netaddr = ">=0.7.18"
# add a lower bound to the Jinja2 dependency. # add a lower bound to the Jinja2 dependency.
Jinja2 = ">=3.0" Jinja2 = ">=3.0"
bleach = ">=1.4.3" bleach = ">=1.4.3"
# We use `assert_never`, which were added in `typing-extensions` 4.1. # We use `ParamSpec` and `Concatenate`, which were added in `typing-extensions` 3.10.0.0.
typing-extensions = ">=4.1" # Additionally we need https://github.com/python/typing/pull/817 to allow types to be
# generic over ParamSpecs.
typing-extensions = ">=3.10.0.1"
# We enforce that we have a `cryptography` version that bundles an `openssl` # We enforce that we have a `cryptography` version that bundles an `openssl`
# with the latest security patches. # with the latest security patches.
cryptography = ">=3.4.7" cryptography = ">=3.4.7"
@@ -225,8 +226,6 @@ pydantic = ">=1.7.4, <3"
# needed. # needed.
setuptools_rust = ">=1.3" setuptools_rust = ">=1.3"
# This is used for parsing multipart responses
python-multipart = ">=0.0.9"
# Optional Dependencies # Optional Dependencies
# --------------------- # ---------------------
@@ -322,7 +321,7 @@ all = [
# This helps prevents merge conflicts when running a batch of dependabot updates. # This helps prevents merge conflicts when running a batch of dependabot updates.
isort = ">=5.10.1" isort = ">=5.10.1"
black = ">=22.7.0" black = ">=22.7.0"
ruff = "0.5.5" ruff = "0.3.7"
# Type checking only works with the pydantic.v1 compat module from pydantic v2 # Type checking only works with the pydantic.v1 compat module from pydantic v2
pydantic = "^2" pydantic = "^2"

View File

@@ -204,8 +204,6 @@ pub struct EventInternalMetadata {
/// The stream ordering of this event. None, until it has been persisted. /// The stream ordering of this event. None, until it has been persisted.
#[pyo3(get, set)] #[pyo3(get, set)]
stream_ordering: Option<NonZeroI64>, stream_ordering: Option<NonZeroI64>,
#[pyo3(get, set)]
instance_name: Option<String>,
/// whether this event is an outlier (ie, whether we have the state at that /// whether this event is an outlier (ie, whether we have the state at that
/// point in the DAG) /// point in the DAG)
@@ -234,7 +232,6 @@ impl EventInternalMetadata {
Ok(EventInternalMetadata { Ok(EventInternalMetadata {
data, data,
stream_ordering: None, stream_ordering: None,
instance_name: None,
outlier: false, outlier: false,
}) })
} }

View File

@@ -223,6 +223,7 @@ test_packages=(
./tests/msc3930 ./tests/msc3930
./tests/msc3902 ./tests/msc3902
./tests/msc3967 ./tests/msc3967
./tests/msc4115
) )
# Enable dirty runs, so tests will reuse the same container where possible. # Enable dirty runs, so tests will reuse the same container where possible.

View File

@@ -43,7 +43,7 @@ import argparse
import base64 import base64
import json import json
import sys import sys
from typing import Any, Dict, Mapping, Optional, Tuple, Union from typing import Any, Dict, Optional, Tuple
from urllib import parse as urlparse from urllib import parse as urlparse
import requests import requests
@@ -75,7 +75,7 @@ def encode_canonical_json(value: object) -> bytes:
value, value,
# Encode code-points outside of ASCII as UTF-8 rather than \u escapes # Encode code-points outside of ASCII as UTF-8 rather than \u escapes
ensure_ascii=False, ensure_ascii=False,
# Remove unnecessary white space. # Remove unecessary white space.
separators=(",", ":"), separators=(",", ":"),
# Sort the keys of dictionaries. # Sort the keys of dictionaries.
sort_keys=True, sort_keys=True,
@@ -298,41 +298,12 @@ class MatrixConnectionAdapter(HTTPAdapter):
return super().send(request, *args, **kwargs) return super().send(request, *args, **kwargs)
# def get_connection( def get_connection(
# self, url: str, proxies: Optional[Dict[str, str]] = None, self, url: str, proxies: Optional[Dict[str, str]] = None
# ) -> HTTPConnectionPool:
# # overrides the get_connection() method in the base class
# parsed = urlparse.urlsplit(url)
# (host, port, ssl_server_name) = self._lookup(parsed.netloc)
# print(
# f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr
# )
# return self.poolmanager.connection_from_host(
# host,
# port=port,
# scheme="https",
# pool_kwargs={"server_hostname": ssl_server_name},
# )
def get_connection_with_tls_context(
self,
request: PreparedRequest,
verify: Optional[Union[bool, str]],
proxies: Optional[Mapping[str, str]] = None,
cert: Optional[Union[Tuple[str, str], str]] = None,
) -> HTTPConnectionPool: ) -> HTTPConnectionPool:
# overrides the get_connection_with_tls_context() method in the base class
# return self.get_connection(request.url, proxies)
# overrides the get_connection() method in the base class # overrides the get_connection() method in the base class
parsed = urlparse.urlsplit(request.url) parsed = urlparse.urlsplit(url)
(host, port, ssl_server_name) = self._lookup(parsed.netloc)
# Extract the hostname from the request URL and ensure it's a str.
hostname = parsed.netloc
if isinstance(hostname, bytes):
hostname = hostname.decode("utf-8")
assert isinstance(hostname, str)
(host, port, ssl_server_name) = self._lookup(hostname)
print( print(
f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr
) )

View File

@@ -112,7 +112,7 @@ python3 -m black "${files[@]}"
# Catch any common programming mistakes in Python code. # Catch any common programming mistakes in Python code.
# --quiet suppresses the update check. # --quiet suppresses the update check.
ruff check --quiet --fix "${files[@]}" ruff --quiet --fix "${files[@]}"
# Catch any common programming mistakes in Rust code. # Catch any common programming mistakes in Rust code.
# #

View File

@@ -70,7 +70,6 @@ def cli() -> None:
pip install -e .[dev] pip install -e .[dev]
- A checkout of the sytest repository at ../sytest - A checkout of the sytest repository at ../sytest
- A checkout of the complement repository at ../complement
Then to use: Then to use:
@@ -113,12 +112,10 @@ def _prepare() -> None:
# Make sure we're in a git repo. # Make sure we're in a git repo.
synapse_repo = get_repo_and_check_clean_checkout() synapse_repo = get_repo_and_check_clean_checkout()
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest") sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
complement_repo = get_repo_and_check_clean_checkout("../complement", "complement")
click.secho("Updating Synapse and Sytest git repos...") click.secho("Updating Synapse and Sytest git repos...")
synapse_repo.remote().fetch() synapse_repo.remote().fetch()
sytest_repo.remote().fetch() sytest_repo.remote().fetch()
complement_repo.remote().fetch()
# Get the current version and AST from root Synapse module. # Get the current version and AST from root Synapse module.
current_version = get_package_version() current_version = get_package_version()
@@ -211,15 +208,7 @@ def _prepare() -> None:
"Which branch should the release be based on?", default=default "Which branch should the release be based on?", default=default
) )
for repo_name, repo in { for repo_name, repo in {"synapse": synapse_repo, "sytest": sytest_repo}.items():
"synapse": synapse_repo,
"sytest": sytest_repo,
"complement": complement_repo,
}.items():
# Special case for Complement: `develop` maps to `main`
if repo_name == "complement" and branch_name == "develop":
branch_name = "main"
base_branch = find_ref(repo, branch_name) base_branch = find_ref(repo, branch_name)
if not base_branch: if not base_branch:
print(f"Could not find base branch {branch_name} for {repo_name}!") print(f"Could not find base branch {branch_name} for {repo_name}!")
@@ -242,12 +231,6 @@ def _prepare() -> None:
if click.confirm("Push new SyTest branch?", default=True): if click.confirm("Push new SyTest branch?", default=True):
sytest_repo.git.push("-u", sytest_repo.remote().name, release_branch_name) sytest_repo.git.push("-u", sytest_repo.remote().name, release_branch_name)
# Same for Complement
if click.confirm("Push new Complement branch?", default=True):
complement_repo.git.push(
"-u", complement_repo.remote().name, release_branch_name
)
# Switch to the release branch and ensure it's up to date. # Switch to the release branch and ensure it's up to date.
synapse_repo.git.checkout(release_branch_name) synapse_repo.git.checkout(release_branch_name)
update_branch(synapse_repo) update_branch(synapse_repo)
@@ -647,9 +630,6 @@ def _merge_back() -> None:
else: else:
# Full release # Full release
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest") sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
complement_repo = get_repo_and_check_clean_checkout(
"../complement", "complement"
)
if click.confirm(f"Merge {branch_name} → master?", default=True): if click.confirm(f"Merge {branch_name} → master?", default=True):
_merge_into(synapse_repo, branch_name, "master") _merge_into(synapse_repo, branch_name, "master")
@@ -663,9 +643,6 @@ def _merge_back() -> None:
if click.confirm("On SyTest, merge master → develop?", default=True): if click.confirm("On SyTest, merge master → develop?", default=True):
_merge_into(sytest_repo, "master", "develop") _merge_into(sytest_repo, "master", "develop")
if click.confirm(f"On Complement, merge {branch_name} → main?", default=True):
_merge_into(complement_repo, branch_name, "main")
@cli.command() @cli.command()
def announce() -> None: def announce() -> None:

View File

@@ -44,7 +44,7 @@ logger = logging.getLogger("generate_workers_map")
class MockHomeserver(HomeServer): class MockHomeserver(HomeServer):
DATASTORE_CLASS = DataStore DATASTORE_CLASS = DataStore # type: ignore
def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None: def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None:
super().__init__(config.server.server_name, config=config) super().__init__(config.server.server_name, config=config)

View File

@@ -52,7 +52,6 @@ def request_registration(
user_type: Optional[str] = None, user_type: Optional[str] = None,
_print: Callable[[str], None] = print, _print: Callable[[str], None] = print,
exit: Callable[[int], None] = sys.exit, exit: Callable[[int], None] = sys.exit,
exists_ok: bool = False,
) -> None: ) -> None:
url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),) url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),)
@@ -98,10 +97,6 @@ def request_registration(
r = requests.post(url, json=data) r = requests.post(url, json=data)
if r.status_code != 200: if r.status_code != 200:
response = r.json()
if exists_ok and response["errcode"] == "M_USER_IN_USE":
_print("User already exists. Skipping.")
return
_print("ERROR! Received %d %s" % (r.status_code, r.reason)) _print("ERROR! Received %d %s" % (r.status_code, r.reason))
if 400 <= r.status_code < 500: if 400 <= r.status_code < 500:
try: try:
@@ -120,7 +115,6 @@ def register_new_user(
shared_secret: str, shared_secret: str,
admin: Optional[bool], admin: Optional[bool],
user_type: Optional[str], user_type: Optional[str],
exists_ok: bool = False,
) -> None: ) -> None:
if not user: if not user:
try: try:
@@ -160,13 +154,7 @@ def register_new_user(
admin = False admin = False
request_registration( request_registration(
user, user, password, server_location, shared_secret, bool(admin), user_type
password,
server_location,
shared_secret,
bool(admin),
user_type,
exists_ok=exists_ok,
) )
@@ -186,22 +174,10 @@ def main() -> None:
help="Local part of the new user. Will prompt if omitted.", help="Local part of the new user. Will prompt if omitted.",
) )
parser.add_argument( parser.add_argument(
"--exists-ok",
action="store_true",
help="Do not fail if user already exists.",
)
password_group = parser.add_mutually_exclusive_group()
password_group.add_argument(
"-p", "-p",
"--password", "--password",
default=None, default=None,
help="New password for user. Will prompt for a password if " help="New password for user. Will prompt if omitted.",
"this flag and `--password-file` are both omitted.",
)
password_group.add_argument(
"--password-file",
default=None,
help="File containing the new password for user. If set, will override `--password`.",
) )
parser.add_argument( parser.add_argument(
"-t", "-t",
@@ -209,7 +185,6 @@ def main() -> None:
default=None, default=None,
help="User type as specified in synapse.api.constants.UserTypes", help="User type as specified in synapse.api.constants.UserTypes",
) )
admin_group = parser.add_mutually_exclusive_group() admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument( admin_group.add_argument(
"-a", "-a",
@@ -272,11 +247,6 @@ def main() -> None:
print(_NO_SHARED_SECRET_OPTS_ERROR, file=sys.stderr) print(_NO_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
sys.exit(1) sys.exit(1)
if args.password_file:
password = _read_file(args.password_file, "password-file").strip()
else:
password = args.password
if args.server_url: if args.server_url:
server_url = args.server_url server_url = args.server_url
elif config is not None: elif config is not None:
@@ -300,13 +270,7 @@ def main() -> None:
admin = args.admin admin = args.admin
register_new_user( register_new_user(
args.user, args.user, args.password, server_url, secret, admin, args.user_type
password,
server_url,
secret,
admin,
args.user_type,
exists_ok=args.exists_ok,
) )

View File

@@ -119,19 +119,18 @@ BOOLEAN_COLUMNS = {
"e2e_room_keys": ["is_verified"], "e2e_room_keys": ["is_verified"],
"event_edges": ["is_state"], "event_edges": ["is_state"],
"events": ["processed", "outlier", "contains_url"], "events": ["processed", "outlier", "contains_url"],
"local_media_repository": ["safe_from_quarantine", "authenticated"], "local_media_repository": ["safe_from_quarantine"],
"per_user_experimental_features": ["enabled"],
"presence_list": ["accepted"], "presence_list": ["accepted"],
"presence_stream": ["currently_active"], "presence_stream": ["currently_active"],
"public_room_list_stream": ["visibility"], "public_room_list_stream": ["visibility"],
"pushers": ["enabled"], "pushers": ["enabled"],
"redactions": ["have_censored"], "redactions": ["have_censored"],
"remote_media_cache": ["authenticated"],
"room_stats_state": ["is_federatable"], "room_stats_state": ["is_federatable"],
"rooms": ["is_public", "has_auth_chain_index"], "rooms": ["is_public", "has_auth_chain_index"],
"users": ["shadow_banned", "approved", "locked", "suspended"], "users": ["shadow_banned", "approved", "locked", "suspended"],
"un_partial_stated_event_stream": ["rejection_status_changed"], "un_partial_stated_event_stream": ["rejection_status_changed"],
"users_who_share_rooms": ["share_private"], "users_who_share_rooms": ["share_private"],
"per_user_experimental_features": ["enabled"],
} }
@@ -778,74 +777,22 @@ class Porter:
await self._setup_events_stream_seqs() await self._setup_events_stream_seqs()
await self._setup_sequence( await self._setup_sequence(
"un_partial_stated_event_stream_sequence", "un_partial_stated_event_stream_sequence",
[("un_partial_stated_event_stream", "stream_id")], ("un_partial_stated_event_stream",),
) )
await self._setup_sequence( await self._setup_sequence(
"device_inbox_sequence", "device_inbox_sequence", ("device_inbox", "device_federation_outbox")
[
("device_inbox", "stream_id"),
("device_federation_outbox", "stream_id"),
],
) )
await self._setup_sequence( await self._setup_sequence(
"account_data_sequence", "account_data_sequence",
[ ("room_account_data", "room_tags_revisions", "account_data"),
("room_account_data", "stream_id"),
("room_tags_revisions", "stream_id"),
("account_data", "stream_id"),
],
)
await self._setup_sequence(
"receipts_sequence",
[
("receipts_linearized", "stream_id"),
],
)
await self._setup_sequence(
"presence_stream_sequence",
[
("presence_stream", "stream_id"),
],
) )
await self._setup_sequence("receipts_sequence", ("receipts_linearized",))
await self._setup_sequence("presence_stream_sequence", ("presence_stream",))
await self._setup_auth_chain_sequence() await self._setup_auth_chain_sequence()
await self._setup_sequence( await self._setup_sequence(
"application_services_txn_id_seq", "application_services_txn_id_seq",
[ ("application_services_txns",),
( "txn_id",
"application_services_txns",
"txn_id",
)
],
)
await self._setup_sequence(
"device_lists_sequence",
[
("device_lists_stream", "stream_id"),
("user_signature_stream", "stream_id"),
("device_lists_outbound_pokes", "stream_id"),
("device_lists_changes_in_room", "stream_id"),
("device_lists_remote_pending", "stream_id"),
("device_lists_changes_converted_stream_position", "stream_id"),
],
)
await self._setup_sequence(
"e2e_cross_signing_keys_sequence",
[
("e2e_cross_signing_keys", "stream_id"),
],
)
await self._setup_sequence(
"push_rules_stream_sequence",
[
("push_rules_stream", "stream_id"),
],
)
await self._setup_sequence(
"pushers_sequence",
[
("pushers", "id"),
("deleted_pushers", "stream_id"),
],
) )
# Step 3. Get tables. # Step 3. Get tables.
@@ -1154,11 +1101,12 @@ class Porter:
async def _setup_sequence( async def _setup_sequence(
self, self,
sequence_name: str, sequence_name: str,
stream_id_tables: Iterable[Tuple[str, str]], stream_id_tables: Iterable[str],
column_name: str = "stream_id",
) -> None: ) -> None:
"""Set a sequence to the correct value.""" """Set a sequence to the correct value."""
current_stream_ids = [] current_stream_ids = []
for stream_id_table, column_name in stream_id_tables: for stream_id_table in stream_id_tables:
max_stream_id = cast( max_stream_id = cast(
int, int,
await self.sqlite_store.db_pool.simple_select_one_onecol( await self.sqlite_store.db_pool.simple_select_one_onecol(

View File

@@ -41,7 +41,7 @@ logger = logging.getLogger("update_database")
class MockHomeserver(HomeServer): class MockHomeserver(HomeServer):
DATASTORE_CLASS = DataStore DATASTORE_CLASS = DataStore # type: ignore [assignment]
def __init__(self, config: HomeServerConfig): def __init__(self, config: HomeServerConfig):
super().__init__( super().__init__(

View File

@@ -18,7 +18,7 @@
# [This file includes modifications made by New Vector Limited] # [This file includes modifications made by New Vector Limited]
# #
# #
from typing import TYPE_CHECKING, Optional, Tuple from typing import Optional, Tuple
from typing_extensions import Protocol from typing_extensions import Protocol
@@ -28,9 +28,6 @@ from synapse.appservice import ApplicationService
from synapse.http.site import SynapseRequest from synapse.http.site import SynapseRequest
from synapse.types import Requester from synapse.types import Requester
if TYPE_CHECKING:
from synapse.rest.admin.experimental_features import ExperimentalFeature
# guests always get this device id. # guests always get this device id.
GUEST_DEVICE_ID = "guest_device" GUEST_DEVICE_ID = "guest_device"
@@ -90,19 +87,6 @@ class Auth(Protocol):
AuthError if access is denied for the user in the access token AuthError if access is denied for the user in the access token
""" """
async def get_user_by_req_experimental_feature(
self,
request: SynapseRequest,
feature: "ExperimentalFeature",
allow_guest: bool = False,
allow_expired: bool = False,
allow_locked: bool = False,
) -> Requester:
"""Like `get_user_by_req`, except also checks if the user has access to
the experimental feature. If they don't returns a 404 unrecognized
request.
"""
async def validate_appservice_can_control_user_id( async def validate_appservice_can_control_user_id(
self, app_service: ApplicationService, user_id: str self, app_service: ApplicationService, user_id: str
) -> None: ) -> None:

View File

@@ -28,7 +28,6 @@ from synapse.api.errors import (
Codes, Codes,
InvalidClientTokenError, InvalidClientTokenError,
MissingClientTokenError, MissingClientTokenError,
UnrecognizedRequestError,
) )
from synapse.http.site import SynapseRequest from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import active_span, force_tracing, start_active_span from synapse.logging.opentracing import active_span, force_tracing, start_active_span
@@ -39,10 +38,8 @@ from . import GUEST_DEVICE_ID
from .base import BaseAuth from .base import BaseAuth
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.rest.admin.experimental_features import ExperimentalFeature
from synapse.server import HomeServer from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -109,32 +106,6 @@ class InternalAuth(BaseAuth):
parent_span.set_tag("appservice_id", requester.app_service.id) parent_span.set_tag("appservice_id", requester.app_service.id)
return requester return requester
async def get_user_by_req_experimental_feature(
self,
request: SynapseRequest,
feature: "ExperimentalFeature",
allow_guest: bool = False,
allow_expired: bool = False,
allow_locked: bool = False,
) -> Requester:
try:
requester = await self.get_user_by_req(
request,
allow_guest=allow_guest,
allow_expired=allow_expired,
allow_locked=allow_locked,
)
if await self.store.is_feature_enabled(requester.user.to_string(), feature):
return requester
raise UnrecognizedRequestError(code=404)
except (AuthError, InvalidClientTokenError):
if feature.is_globally_enabled(self.hs.config):
# If its globally enabled then return the auth error
raise
raise UnrecognizedRequestError(code=404)
@cancellable @cancellable
async def _wrapped_get_user_by_req( async def _wrapped_get_user_by_req(
self, self,

View File

@@ -40,7 +40,6 @@ from synapse.api.errors import (
OAuthInsufficientScopeError, OAuthInsufficientScopeError,
StoreError, StoreError,
SynapseError, SynapseError,
UnrecognizedRequestError,
) )
from synapse.http.site import SynapseRequest from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable from synapse.logging.context import make_deferred_yieldable
@@ -49,7 +48,6 @@ from synapse.util import json_decoder
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.rest.admin.experimental_features import ExperimentalFeature
from synapse.server import HomeServer from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -145,18 +143,6 @@ class MSC3861DelegatedAuth(BaseAuth):
# metadata.validate_introspection_endpoint() # metadata.validate_introspection_endpoint()
return metadata return metadata
async def _introspection_endpoint(self) -> str:
"""
Returns the introspection endpoint of the issuer
It uses the config option if set, otherwise it will use OIDC discovery to get it
"""
if self._config.introspection_endpoint is not None:
return self._config.introspection_endpoint
metadata = await self._load_metadata()
return metadata.get("introspection_endpoint")
async def _introspect_token(self, token: str) -> IntrospectionToken: async def _introspect_token(self, token: str) -> IntrospectionToken:
""" """
Send a token to the introspection endpoint and returns the introspection response Send a token to the introspection endpoint and returns the introspection response
@@ -173,7 +159,8 @@ class MSC3861DelegatedAuth(BaseAuth):
Returns: Returns:
The introspection response The introspection response
""" """
introspection_endpoint = await self._introspection_endpoint() metadata = await self._issuer_metadata.get()
introspection_endpoint = metadata.get("introspection_endpoint")
raw_headers: Dict[str, str] = { raw_headers: Dict[str, str] = {
"Content-Type": "application/x-www-form-urlencoded", "Content-Type": "application/x-www-form-urlencoded",
"User-Agent": str(self._http_client.user_agent, "utf-8"), "User-Agent": str(self._http_client.user_agent, "utf-8"),
@@ -258,32 +245,6 @@ class MSC3861DelegatedAuth(BaseAuth):
return requester return requester
async def get_user_by_req_experimental_feature(
self,
request: SynapseRequest,
feature: "ExperimentalFeature",
allow_guest: bool = False,
allow_expired: bool = False,
allow_locked: bool = False,
) -> Requester:
try:
requester = await self.get_user_by_req(
request,
allow_guest=allow_guest,
allow_expired=allow_expired,
allow_locked=allow_locked,
)
if await self.store.is_feature_enabled(requester.user.to_string(), feature):
return requester
raise UnrecognizedRequestError(code=404)
except (AuthError, InvalidClientTokenError):
if feature.is_globally_enabled(self.hs.config):
# If its globally enabled then return the auth error
raise
raise UnrecognizedRequestError(code=404)
async def get_user_by_access_token( async def get_user_by_access_token(
self, self,
token: str, token: str,

View File

@@ -50,7 +50,7 @@ class Membership:
KNOCK: Final = "knock" KNOCK: Final = "knock"
LEAVE: Final = "leave" LEAVE: Final = "leave"
BAN: Final = "ban" BAN: Final = "ban"
LIST: Final = frozenset((INVITE, JOIN, KNOCK, LEAVE, BAN)) LIST: Final = (INVITE, JOIN, KNOCK, LEAVE, BAN)
class PresenceState: class PresenceState:
@@ -128,13 +128,9 @@ class EventTypes:
SpaceParent: Final = "m.space.parent" SpaceParent: Final = "m.space.parent"
Reaction: Final = "m.reaction" Reaction: Final = "m.reaction"
Sticker: Final = "m.sticker"
LiveLocationShareStart: Final = "m.beacon_info"
CallInvite: Final = "m.call.invite" CallInvite: Final = "m.call.invite"
PollStart: Final = "m.poll.start"
class ToDeviceEventTypes: class ToDeviceEventTypes:
RoomKeyRequest: Final = "m.room_key_request" RoomKeyRequest: Final = "m.room_key_request"
@@ -225,11 +221,6 @@ class EventContentFields:
# This is deprecated in MSC2175. # This is deprecated in MSC2175.
ROOM_CREATOR: Final = "creator" ROOM_CREATOR: Final = "creator"
# The version of the room for `m.room.create` events.
ROOM_VERSION: Final = "room_version"
ROOM_NAME: Final = "name"
# Used in m.room.guest_access events. # Used in m.room.guest_access events.
GUEST_ACCESS: Final = "guest_access" GUEST_ACCESS: Final = "guest_access"
@@ -242,15 +233,12 @@ class EventContentFields:
# an unspecced field added to to-device messages to identify them uniquely-ish # an unspecced field added to to-device messages to identify them uniquely-ish
TO_DEVICE_MSGID: Final = "org.matrix.msgid" TO_DEVICE_MSGID: Final = "org.matrix.msgid"
# `m.room.encryption`` algorithm field
ENCRYPTION_ALGORITHM: Final = "algorithm"
class EventUnsignedContentFields: class EventUnsignedContentFields:
"""Fields found inside the 'unsigned' data on events""" """Fields found inside the 'unsigned' data on events"""
# Requesting user's membership, per MSC4115 # Requesting user's membership, per MSC4115
MEMBERSHIP: Final = "membership" MSC4115_MEMBERSHIP: Final = "io.element.msc4115.membership"
class RoomTypes: class RoomTypes:

View File

@@ -128,10 +128,6 @@ class Codes(str, Enum):
# MSC2677 # MSC2677
DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION" DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION"
# MSC3575 we are telling the client they need to expire their sliding sync
# connection.
UNKNOWN_POS = "M_UNKNOWN_POS"
class CodeMessageException(RuntimeError): class CodeMessageException(RuntimeError):
"""An exception with integer code, a message string attributes and optional headers. """An exception with integer code, a message string attributes and optional headers.
@@ -851,17 +847,3 @@ class PartialStateConflictError(SynapseError):
msg=PartialStateConflictError.message(), msg=PartialStateConflictError.message(),
errcode=Codes.UNKNOWN, errcode=Codes.UNKNOWN,
) )
class SlidingSyncUnknownPosition(SynapseError):
"""An error that Synapse can return to signal to the client to expire their
sliding sync connection (i.e. send a new request without a `?since=`
param).
"""
def __init__(self) -> None:
super().__init__(
HTTPStatus.BAD_REQUEST,
msg="Unknown position",
errcode=Codes.UNKNOWN_POS,
)

View File

@@ -130,8 +130,7 @@ class Ratelimiter:
Overrides the value set during instantiation if set. Overrides the value set during instantiation if set.
burst_count: How many actions that can be performed before being limited. burst_count: How many actions that can be performed before being limited.
Overrides the value set during instantiation if set. Overrides the value set during instantiation if set.
update: Whether to count this check as performing the action. If the action update: Whether to count this check as performing the action
cannot be performed, the user's action count is not incremented at all.
n_actions: The number of times the user wants to do this action. If the user n_actions: The number of times the user wants to do this action. If the user
cannot do all of the actions, the user's action count is not incremented cannot do all of the actions, the user's action count is not incremented
at all. at all.
@@ -236,8 +235,9 @@ class Ratelimiter:
requester: The requester that is doing the action, if any. requester: The requester that is doing the action, if any.
key: An arbitrary key used to classify an action. Defaults to the key: An arbitrary key used to classify an action. Defaults to the
requester's user ID. requester's user ID.
n_actions: The number of times the user performed the action. May be negative n_actions: The number of times the user wants to do this action. If the user
to "refund" the rate limit. cannot do all of the actions, the user's action count is not incremented
at all.
_time_now_s: The current time. Optional, defaults to the current time according _time_now_s: The current time. Optional, defaults to the current time according
to self.clock. Only used by tests. to self.clock. Only used by tests.
""" """

View File

@@ -681,17 +681,17 @@ def setup_sentry(hs: "HomeServer") -> None:
) )
# We set some default tags that give some context to this instance # We set some default tags that give some context to this instance
global_scope = sentry_sdk.Scope.get_global_scope() with sentry_sdk.configure_scope() as scope:
global_scope.set_tag("matrix_server_name", hs.config.server.server_name) scope.set_tag("matrix_server_name", hs.config.server.server_name)
app = ( app = (
hs.config.worker.worker_app hs.config.worker.worker_app
if hs.config.worker.worker_app if hs.config.worker.worker_app
else "synapse.app.homeserver" else "synapse.app.homeserver"
) )
name = hs.get_instance_name() name = hs.get_instance_name()
global_scope.set_tag("worker_app", app) scope.set_tag("worker_app", app)
global_scope.set_tag("worker_name", name) scope.set_tag("worker_name", name)
def setup_sdnotify(hs: "HomeServer") -> None: def setup_sdnotify(hs: "HomeServer") -> None:

View File

@@ -110,7 +110,7 @@ class AdminCmdStore(
class AdminCmdServer(HomeServer): class AdminCmdServer(HomeServer):
DATASTORE_CLASS = AdminCmdStore DATASTORE_CLASS = AdminCmdStore # type: ignore
async def export_data_command(hs: HomeServer, args: argparse.Namespace) -> None: async def export_data_command(hs: HomeServer, args: argparse.Namespace) -> None:

View File

@@ -74,9 +74,6 @@ from synapse.storage.databases.main.event_push_actions import (
EventPushActionsWorkerStore, EventPushActionsWorkerStore,
) )
from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.databases.main.experimental_features import (
ExperimentalFeaturesStore,
)
from synapse.storage.databases.main.filtering import FilteringWorkerStore from synapse.storage.databases.main.filtering import FilteringWorkerStore
from synapse.storage.databases.main.keys import KeyStore from synapse.storage.databases.main.keys import KeyStore
from synapse.storage.databases.main.lock import LockStore from synapse.storage.databases.main.lock import LockStore
@@ -158,7 +155,6 @@ class GenericWorkerStore(
LockStore, LockStore,
SessionStore, SessionStore,
TaskSchedulerWorkerStore, TaskSchedulerWorkerStore,
ExperimentalFeaturesStore,
): ):
# Properties that multiple storage classes define. Tell mypy what the # Properties that multiple storage classes define. Tell mypy what the
# expected type is. # expected type is.
@@ -167,7 +163,7 @@ class GenericWorkerStore(
class GenericWorkerServer(HomeServer): class GenericWorkerServer(HomeServer):
DATASTORE_CLASS = GenericWorkerStore DATASTORE_CLASS = GenericWorkerStore # type: ignore
def _listen_http(self, listener_config: ListenerConfig) -> None: def _listen_http(self, listener_config: ListenerConfig) -> None:
assert listener_config.http_options is not None assert listener_config.http_options is not None

View File

@@ -81,7 +81,7 @@ def gz_wrap(r: Resource) -> Resource:
class SynapseHomeServer(HomeServer): class SynapseHomeServer(HomeServer):
DATASTORE_CLASS = DataStore DATASTORE_CLASS = DataStore # type: ignore
def _listener_http( def _listener_http(
self, self,
@@ -217,7 +217,7 @@ class SynapseHomeServer(HomeServer):
) )
if name in ["media", "federation", "client"]: if name in ["media", "federation", "client"]:
if self.config.media.can_load_media_repo: if self.config.server.enable_media_repo:
media_repo = self.get_media_repository_resource() media_repo = self.get_media_repository_resource()
resources.update( resources.update(
{ {

View File

@@ -140,12 +140,6 @@ class MSC3861:
("experimental", "msc3861", "client_auth_method"), ("experimental", "msc3861", "client_auth_method"),
) )
introspection_endpoint: Optional[str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(str)),
)
"""The URL of the introspection endpoint used to validate access tokens."""
account_management_url: Optional[str] = attr.ib( account_management_url: Optional[str] = attr.ib(
default=None, default=None,
validator=attr.validators.optional(attr.validators.instance_of(str)), validator=attr.validators.optional(attr.validators.instance_of(str)),
@@ -338,9 +332,6 @@ class ExperimentalConfig(Config):
# MSC3391: Removing account data. # MSC3391: Removing account data.
self.msc3391_enabled = experimental.get("msc3391_enabled", False) self.msc3391_enabled = experimental.get("msc3391_enabled", False)
# MSC3575 (Sliding Sync API endpoints)
self.msc3575_enabled: bool = experimental.get("msc3575_enabled", False)
# MSC3773: Thread notifications # MSC3773: Thread notifications
self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False) self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
@@ -399,6 +390,9 @@ class ExperimentalConfig(Config):
# MSC3391: Removing account data. # MSC3391: Removing account data.
self.msc3391_enabled = experimental.get("msc3391_enabled", False) self.msc3391_enabled = experimental.get("msc3391_enabled", False)
# MSC3967: Do not require UIA when first uploading cross signing keys
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
# MSC3861: Matrix architecture change to delegate authentication via OIDC # MSC3861: Matrix architecture change to delegate authentication via OIDC
try: try:
self.msc3861 = MSC3861(**experimental.get("msc3861", {})) self.msc3861 = MSC3861(**experimental.get("msc3861", {}))
@@ -439,12 +433,6 @@ class ExperimentalConfig(Config):
("experimental", "msc4108_delegation_endpoint"), ("experimental", "msc4108_delegation_endpoint"),
) )
self.msc3823_account_suspension = experimental.get( self.msc4115_membership_on_events = experimental.get(
"msc3823_account_suspension", False "msc4115_membership_on_events", False
) )
# MSC4151: Report room API (Client-Server API)
self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
# MSC4156: Migrate server_name to via
self.msc4156_enabled: bool = experimental.get("msc4156_enabled", False)

View File

@@ -218,13 +218,3 @@ class RatelimitConfig(Config):
"rc_media_create", "rc_media_create",
defaults={"per_second": 10, "burst_count": 50}, defaults={"per_second": 10, "burst_count": 50},
) )
self.remote_media_downloads = RatelimitSettings(
key="rc_remote_media_downloads",
per_second=self.parse_size(
config.get("remote_media_download_per_second", "87K")
),
burst_count=self.parse_size(
config.get("remote_media_download_burst_count", "500M")
),
)

View File

@@ -126,7 +126,7 @@ class ContentRepositoryConfig(Config):
# Only enable the media repo if either the media repo is enabled or the # Only enable the media repo if either the media repo is enabled or the
# current worker app is the media repo. # current worker app is the media repo.
if ( if (
config.get("enable_media_repo", True) is False self.root.server.enable_media_repo is False
and config.get("worker_app") != "synapse.app.media_repository" and config.get("worker_app") != "synapse.app.media_repository"
): ):
self.can_load_media_repo = False self.can_load_media_repo = False
@@ -272,10 +272,6 @@ class ContentRepositoryConfig(Config):
remote_media_lifetime remote_media_lifetime
) )
self.enable_authenticated_media = config.get(
"enable_authenticated_media", False
)
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str: def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
assert data_dir_path is not None assert data_dir_path is not None
media_store = os.path.join(data_dir_path, "media_store") media_store = os.path.join(data_dir_path, "media_store")

View File

@@ -384,11 +384,6 @@ class ServerConfig(Config):
# Whether to internally track presence, requires that presence is enabled, # Whether to internally track presence, requires that presence is enabled,
self.track_presence = self.presence_enabled and presence_enabled != "untracked" self.track_presence = self.presence_enabled and presence_enabled != "untracked"
# Determines if presence results for offline users are included on initial/full sync
self.presence_include_offline_users_on_sync = presence_config.get(
"include_offline_users_on_sync", False
)
# Custom presence router module # Custom presence router module
# This is the legacy way of configuring it (the config should now be put in the modules section) # This is the legacy way of configuring it (the config should now be put in the modules section)
self.presence_router_module_class = None self.presence_router_module_class = None
@@ -400,6 +395,12 @@ class ServerConfig(Config):
self.presence_router_config, self.presence_router_config,
) = load_module(presence_router_config, ("presence", "presence_router")) ) = load_module(presence_router_config, ("presence", "presence_router"))
# whether to enable the media repository endpoints. This should be set
# to false if the media repository is running as a separate endpoint;
# doing so ensures that we will not run cache cleanup jobs on the
# master, potentially causing inconsistency.
self.enable_media_repo = config.get("enable_media_repo", True)
# Whether to require authentication to retrieve profile data (avatars, # Whether to require authentication to retrieve profile data (avatars,
# display names) of other users through the client API. # display names) of other users through the client API.
self.require_auth_for_profile_requests = config.get( self.require_auth_for_profile_requests = config.get(

View File

@@ -554,22 +554,3 @@ def relation_from_event(event: EventBase) -> Optional[_EventRelation]:
aggregation_key = None aggregation_key = None
return _EventRelation(parent_id, rel_type, aggregation_key) return _EventRelation(parent_id, rel_type, aggregation_key)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class StrippedStateEvent:
"""
A stripped down state event. Usually used for remote invite/knocks so the user can
make an informed decision on whether they want to join.
Attributes:
type: Event `type`
state_key: Event `state_key`
sender: Event `sender`
content: Event `content`
"""
type: str
state_key: str
sender: str
content: Dict[str, Any]

View File

@@ -49,7 +49,7 @@ from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion from synapse.api.room_versions import RoomVersion
from synapse.types import JsonDict, Requester from synapse.types import JsonDict, Requester
from . import EventBase, StrippedStateEvent, make_event_from_dict from . import EventBase, make_event_from_dict
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.handlers.relations import BundledAggregations from synapse.handlers.relations import BundledAggregations
@@ -90,7 +90,6 @@ def prune_event(event: EventBase) -> EventBase:
pruned_event.internal_metadata.stream_ordering = ( pruned_event.internal_metadata.stream_ordering = (
event.internal_metadata.stream_ordering event.internal_metadata.stream_ordering
) )
pruned_event.internal_metadata.instance_name = event.internal_metadata.instance_name
pruned_event.internal_metadata.outlier = event.internal_metadata.outlier pruned_event.internal_metadata.outlier = event.internal_metadata.outlier
# Mark the event as redacted # Mark the event as redacted
@@ -117,7 +116,6 @@ def clone_event(event: EventBase) -> EventBase:
new_event.internal_metadata.stream_ordering = ( new_event.internal_metadata.stream_ordering = (
event.internal_metadata.stream_ordering event.internal_metadata.stream_ordering
) )
new_event.internal_metadata.instance_name = event.internal_metadata.instance_name
new_event.internal_metadata.outlier = event.internal_metadata.outlier new_event.internal_metadata.outlier = event.internal_metadata.outlier
return new_event return new_event
@@ -836,48 +834,3 @@ def maybe_upsert_event_field(
del container[key] del container[key]
return upsert_okay return upsert_okay
def strip_event(event: EventBase) -> JsonDict:
"""
Used for "stripped state" events which provide a simplified view of the state of a
room intended to help a potential joiner identify the room (relevant when the user
is invited or knocked).
Stripped state events can only have the `sender`, `type`, `state_key` and `content`
properties present.
"""
return {
"type": event.type,
"state_key": event.state_key,
"content": event.content,
"sender": event.sender,
}
def parse_stripped_state_event(raw_stripped_event: Any) -> Optional[StrippedStateEvent]:
"""
Given a raw value from an event's `unsigned` field, attempt to parse it into a
`StrippedStateEvent`.
"""
if isinstance(raw_stripped_event, dict):
# All of these fields are required
type = raw_stripped_event.get("type")
state_key = raw_stripped_event.get("state_key")
sender = raw_stripped_event.get("sender")
content = raw_stripped_event.get("content")
if (
isinstance(type, str)
and isinstance(state_key, str)
and isinstance(sender, str)
and isinstance(content, dict)
):
return StrippedStateEvent(
type=type,
state_key=state_key,
sender=sender,
content=content,
)
return None

View File

@@ -47,9 +47,9 @@ from synapse.events.utils import (
validate_canonicaljson, validate_canonicaljson,
) )
from synapse.http.servlet import validate_json_object from synapse.http.servlet import validate_json_object
from synapse.rest.models import RequestBodyModel
from synapse.storage.controllers.state import server_acl_evaluator_from_event from synapse.storage.controllers.state import server_acl_evaluator_from_event
from synapse.types import EventID, JsonDict, RoomID, StrCollection, UserID from synapse.types import EventID, JsonDict, RoomID, StrCollection, UserID
from synapse.types.rest import RequestBodyModel
class EventValidator: class EventValidator:

View File

@@ -56,7 +56,6 @@ from synapse.api.errors import (
SynapseError, SynapseError,
UnsupportedRoomVersionError, UnsupportedRoomVersionError,
) )
from synapse.api.ratelimiting import Ratelimiter
from synapse.api.room_versions import ( from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS, KNOWN_ROOM_VERSIONS,
EventFormatVersions, EventFormatVersions,
@@ -1871,52 +1870,6 @@ class FederationClient(FederationBase):
return filtered_statuses, filtered_failures return filtered_statuses, filtered_failures
async def federation_download_media(
self,
destination: str,
media_id: str,
output_stream: BinaryIO,
max_size: int,
max_timeout_ms: int,
download_ratelimiter: Ratelimiter,
ip_address: str,
) -> Union[
Tuple[int, Dict[bytes, List[bytes]], bytes],
Tuple[int, Dict[bytes, List[bytes]]],
]:
try:
return await self.transport_layer.federation_download_media(
destination,
media_id,
output_stream=output_stream,
max_size=max_size,
max_timeout_ms=max_timeout_ms,
download_ratelimiter=download_ratelimiter,
ip_address=ip_address,
)
except HttpResponseException as e:
# If an error is received that is due to an unrecognised endpoint,
# fallback to the _matrix/media/v3/download endpoint. Otherwise, consider it a legitimate error
# and raise.
if not is_unknown_endpoint(e):
raise
logger.debug(
"Couldn't download media %s/%s over _matrix/federation/v1/media/download, falling back to _matrix/media/v3/download path",
destination,
media_id,
)
return await self.transport_layer.download_media_v3(
destination,
media_id,
output_stream=output_stream,
max_size=max_size,
max_timeout_ms=max_timeout_ms,
download_ratelimiter=download_ratelimiter,
ip_address=ip_address,
)
async def download_media( async def download_media(
self, self,
destination: str, destination: str,
@@ -1924,8 +1877,6 @@ class FederationClient(FederationBase):
output_stream: BinaryIO, output_stream: BinaryIO,
max_size: int, max_size: int,
max_timeout_ms: int, max_timeout_ms: int,
download_ratelimiter: Ratelimiter,
ip_address: str,
) -> Tuple[int, Dict[bytes, List[bytes]]]: ) -> Tuple[int, Dict[bytes, List[bytes]]]:
try: try:
return await self.transport_layer.download_media_v3( return await self.transport_layer.download_media_v3(
@@ -1934,8 +1885,6 @@ class FederationClient(FederationBase):
output_stream=output_stream, output_stream=output_stream,
max_size=max_size, max_size=max_size,
max_timeout_ms=max_timeout_ms, max_timeout_ms=max_timeout_ms,
download_ratelimiter=download_ratelimiter,
ip_address=ip_address,
) )
except HttpResponseException as e: except HttpResponseException as e:
# If an error is received that is due to an unrecognised endpoint, # If an error is received that is due to an unrecognised endpoint,
@@ -1956,8 +1905,6 @@ class FederationClient(FederationBase):
output_stream=output_stream, output_stream=output_stream,
max_size=max_size, max_size=max_size,
max_timeout_ms=max_timeout_ms, max_timeout_ms=max_timeout_ms,
download_ratelimiter=download_ratelimiter,
ip_address=ip_address,
) )

View File

@@ -674,7 +674,7 @@ class FederationServer(FederationBase):
# This is in addition to the HS-level rate limiting applied by # This is in addition to the HS-level rate limiting applied by
# BaseFederationServlet. # BaseFederationServlet.
# type-ignore: mypy doesn't seem able to deduce the type of the limiter(!?) # type-ignore: mypy doesn't seem able to deduce the type of the limiter(!?)
await self._room_member_handler._join_rate_per_room_limiter.ratelimit( await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type]
requester=None, requester=None,
key=room_id, key=room_id,
update=False, update=False,
@@ -717,7 +717,7 @@ class FederationServer(FederationBase):
SynapseTags.SEND_JOIN_RESPONSE_IS_PARTIAL_STATE, SynapseTags.SEND_JOIN_RESPONSE_IS_PARTIAL_STATE,
caller_supports_partial_state, caller_supports_partial_state,
) )
await self._room_member_handler._join_rate_per_room_limiter.ratelimit( await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type]
requester=None, requester=None,
key=room_id, key=room_id,
update=False, update=False,

View File

@@ -21,7 +21,6 @@
# #
import datetime import datetime
import logging import logging
from collections import OrderedDict
from types import TracebackType from types import TracebackType
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, Type from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, Type
@@ -69,10 +68,6 @@ sent_edus_by_type = Counter(
# If the retry interval is larger than this then we enter "catchup" mode # If the retry interval is larger than this then we enter "catchup" mode
CATCHUP_RETRY_INTERVAL = 60 * 60 * 1000 CATCHUP_RETRY_INTERVAL = 60 * 60 * 1000
# Limit how many presence states we add to each presence EDU, to ensure that
# they are bounded in size.
MAX_PRESENCE_STATES_PER_EDU = 50
class PerDestinationQueue: class PerDestinationQueue:
""" """
@@ -149,7 +144,7 @@ class PerDestinationQueue:
# Map of user_id -> UserPresenceState of pending presence to be sent to this # Map of user_id -> UserPresenceState of pending presence to be sent to this
# destination # destination
self._pending_presence: OrderedDict[str, UserPresenceState] = OrderedDict() self._pending_presence: Dict[str, UserPresenceState] = {}
# List of room_id -> receipt_type -> user_id -> receipt_dict, # List of room_id -> receipt_type -> user_id -> receipt_dict,
# #
@@ -338,11 +333,12 @@ class PerDestinationQueue:
# not caught up yet # not caught up yet
return return
pending_pdus = []
while True: while True:
self._new_data_to_send = False self._new_data_to_send = False
async with _TransactionQueueManager(self) as ( async with _TransactionQueueManager(self) as (
pending_pdus, # noqa: F811 pending_pdus,
pending_edus, pending_edus,
): ):
if not pending_pdus and not pending_edus: if not pending_pdus and not pending_edus:
@@ -403,7 +399,7 @@ class PerDestinationQueue:
# through another mechanism, because this is all volatile! # through another mechanism, because this is all volatile!
self._pending_edus = [] self._pending_edus = []
self._pending_edus_keyed = {} self._pending_edus_keyed = {}
self._pending_presence.clear() self._pending_presence = {}
self._pending_receipt_edus = [] self._pending_receipt_edus = []
self._start_catching_up() self._start_catching_up()
@@ -725,26 +721,22 @@ class _TransactionQueueManager:
# Add presence EDU. # Add presence EDU.
if self.queue._pending_presence: if self.queue._pending_presence:
# Only send max 50 presence entries in the EDU, to bound the amount
# of data we're sending.
presence_to_add: List[JsonDict] = []
while (
self.queue._pending_presence
and len(presence_to_add) < MAX_PRESENCE_STATES_PER_EDU
):
_, presence = self.queue._pending_presence.popitem(last=False)
presence_to_add.append(
format_user_presence_state(presence, self.queue._clock.time_msec())
)
pending_edus.append( pending_edus.append(
Edu( Edu(
origin=self.queue._server_name, origin=self.queue._server_name,
destination=self.queue._destination, destination=self.queue._destination,
edu_type=EduTypes.PRESENCE, edu_type=EduTypes.PRESENCE,
content={"push": presence_to_add}, content={
"push": [
format_user_presence_state(
presence, self.queue._clock.time_msec()
)
for presence in self.queue._pending_presence.values()
]
},
) )
) )
self.queue._pending_presence = {}
# Add read receipt EDUs. # Add read receipt EDUs.
pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5)) pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))

View File

@@ -43,7 +43,6 @@ import ijson
from synapse.api.constants import Direction, Membership from synapse.api.constants import Direction, Membership
from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.api.errors import Codes, HttpResponseException, SynapseError
from synapse.api.ratelimiting import Ratelimiter
from synapse.api.room_versions import RoomVersion from synapse.api.room_versions import RoomVersion
from synapse.api.urls import ( from synapse.api.urls import (
FEDERATION_UNSTABLE_PREFIX, FEDERATION_UNSTABLE_PREFIX,
@@ -820,10 +819,9 @@ class TransportLayerClient:
output_stream: BinaryIO, output_stream: BinaryIO,
max_size: int, max_size: int,
max_timeout_ms: int, max_timeout_ms: int,
download_ratelimiter: Ratelimiter,
ip_address: str,
) -> Tuple[int, Dict[bytes, List[bytes]]]: ) -> Tuple[int, Dict[bytes, List[bytes]]]:
path = f"/_matrix/media/r0/download/{destination}/{media_id}" path = f"/_matrix/media/r0/download/{destination}/{media_id}"
return await self.client.get_file( return await self.client.get_file(
destination, destination,
path, path,
@@ -836,8 +834,6 @@ class TransportLayerClient:
"allow_remote": "false", "allow_remote": "false",
"timeout_ms": str(max_timeout_ms), "timeout_ms": str(max_timeout_ms),
}, },
download_ratelimiter=download_ratelimiter,
ip_address=ip_address,
) )
async def download_media_v3( async def download_media_v3(
@@ -847,10 +843,9 @@ class TransportLayerClient:
output_stream: BinaryIO, output_stream: BinaryIO,
max_size: int, max_size: int,
max_timeout_ms: int, max_timeout_ms: int,
download_ratelimiter: Ratelimiter,
ip_address: str,
) -> Tuple[int, Dict[bytes, List[bytes]]]: ) -> Tuple[int, Dict[bytes, List[bytes]]]:
path = f"/_matrix/media/v3/download/{destination}/{media_id}" path = f"/_matrix/media/v3/download/{destination}/{media_id}"
return await self.client.get_file( return await self.client.get_file(
destination, destination,
path, path,
@@ -867,31 +862,6 @@ class TransportLayerClient:
"allow_redirect": "true", "allow_redirect": "true",
}, },
follow_redirects=True, follow_redirects=True,
download_ratelimiter=download_ratelimiter,
ip_address=ip_address,
)
async def federation_download_media(
self,
destination: str,
media_id: str,
output_stream: BinaryIO,
max_size: int,
max_timeout_ms: int,
download_ratelimiter: Ratelimiter,
ip_address: str,
) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
path = f"/_matrix/federation/v1/media/download/{media_id}"
return await self.client.federation_get_file(
destination,
path,
output_stream=output_stream,
max_size=max_size,
args={
"timeout_ms": str(max_timeout_ms),
},
download_ratelimiter=download_ratelimiter,
ip_address=ip_address,
) )

View File

@@ -32,8 +32,6 @@ from synapse.federation.transport.server._base import (
from synapse.federation.transport.server.federation import ( from synapse.federation.transport.server.federation import (
FEDERATION_SERVLET_CLASSES, FEDERATION_SERVLET_CLASSES,
FederationAccountStatusServlet, FederationAccountStatusServlet,
FederationMediaDownloadServlet,
FederationMediaThumbnailServlet,
FederationUnstableClientKeysClaimServlet, FederationUnstableClientKeysClaimServlet,
) )
from synapse.http.server import HttpServer, JsonResource from synapse.http.server import HttpServer, JsonResource
@@ -317,13 +315,6 @@ def register_servlets(
): ):
continue continue
if (
servletclass == FederationMediaDownloadServlet
or servletclass == FederationMediaThumbnailServlet
):
if not hs.config.media.can_load_media_repo:
continue
servletclass( servletclass(
hs=hs, hs=hs,
authenticator=authenticator, authenticator=authenticator,

View File

@@ -360,33 +360,13 @@ class BaseFederationServlet:
"request" "request"
) )
return None return None
if (
func.__self__.__class__.__name__ # type: ignore
== "FederationMediaDownloadServlet"
or func.__self__.__class__.__name__ # type: ignore
== "FederationMediaThumbnailServlet"
):
response = await func(
origin, content, request, *args, **kwargs
)
else:
response = await func(
origin, content, request.args, *args, **kwargs
)
else:
if (
func.__self__.__class__.__name__ # type: ignore
== "FederationMediaDownloadServlet"
or func.__self__.__class__.__name__ # type: ignore
== "FederationMediaThumbnailServlet"
):
response = await func(
origin, content, request, *args, **kwargs
)
else:
response = await func( response = await func(
origin, content, request.args, *args, **kwargs origin, content, request.args, *args, **kwargs
) )
else:
response = await func(
origin, content, request.args, *args, **kwargs
)
finally: finally:
# if we used the origin's context as the parent, add a new span using # if we used the origin's context as the parent, add a new span using
# the servlet span as a parent, so that we have a link # the servlet span as a parent, so that we have a link

View File

@@ -44,15 +44,10 @@ from synapse.federation.transport.server._base import (
) )
from synapse.http.servlet import ( from synapse.http.servlet import (
parse_boolean_from_args, parse_boolean_from_args,
parse_integer,
parse_integer_from_args, parse_integer_from_args,
parse_string,
parse_string_from_args, parse_string_from_args,
parse_strings_from_args, parse_strings_from_args,
) )
from synapse.http.site import SynapseRequest
from synapse.media._base import DEFAULT_MAX_TIMEOUT_MS, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS
from synapse.media.thumbnailer import ThumbnailProvider
from synapse.types import JsonDict from synapse.types import JsonDict
from synapse.util import SYNAPSE_VERSION from synapse.util import SYNAPSE_VERSION
from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.ratelimitutils import FederationRateLimiter
@@ -792,95 +787,6 @@ class FederationAccountStatusServlet(BaseFederationServerServlet):
return 200, {"account_statuses": statuses, "failures": failures} return 200, {"account_statuses": statuses, "failures": failures}
class FederationMediaDownloadServlet(BaseFederationServerServlet):
"""
Implementation of new federation media `/download` endpoint outlined in MSC3916. Returns
a multipart/mixed response consisting of a JSON object and the requested media
item. This endpoint only returns local media.
"""
PATH = "/media/download/(?P<media_id>[^/]*)"
RATELIMIT = True
def __init__(
self,
hs: "HomeServer",
ratelimiter: FederationRateLimiter,
authenticator: Authenticator,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.media_repo = self.hs.get_media_repository()
async def on_GET(
self,
origin: Optional[str],
content: Literal[None],
request: SynapseRequest,
media_id: str,
) -> None:
max_timeout_ms = parse_integer(
request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
)
max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
await self.media_repo.get_local_media(
request, media_id, None, max_timeout_ms, federation=True
)
class FederationMediaThumbnailServlet(BaseFederationServerServlet):
"""
Implementation of new federation media `/thumbnail` endpoint outlined in MSC3916. Returns
a multipart/mixed response consisting of a JSON object and the requested media
item. This endpoint only returns local media.
"""
PATH = "/media/thumbnail/(?P<media_id>[^/]*)"
RATELIMIT = True
def __init__(
self,
hs: "HomeServer",
ratelimiter: FederationRateLimiter,
authenticator: Authenticator,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.media_repo = self.hs.get_media_repository()
self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
self.thumbnail_provider = ThumbnailProvider(
hs, self.media_repo, self.media_repo.media_storage
)
async def on_GET(
self,
origin: Optional[str],
content: Literal[None],
request: SynapseRequest,
media_id: str,
) -> None:
width = parse_integer(request, "width", required=True)
height = parse_integer(request, "height", required=True)
method = parse_string(request, "method", "scale")
# TODO Parse the Accept header to get an prioritised list of thumbnail types.
m_type = "image/png"
max_timeout_ms = parse_integer(
request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
)
max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
if self.dynamic_thumbnails:
await self.thumbnail_provider.select_or_generate_local_thumbnail(
request, media_id, width, height, method, m_type, max_timeout_ms, True
)
else:
await self.thumbnail_provider.respond_local_thumbnail(
request, media_id, width, height, method, m_type, max_timeout_ms, True
)
self.media_repo.mark_recently_accessed(None, media_id)
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = ( FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationSendServlet, FederationSendServlet,
FederationEventServlet, FederationEventServlet,
@@ -912,6 +818,4 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationV1SendKnockServlet, FederationV1SendKnockServlet,
FederationMakeKnockServlet, FederationMakeKnockServlet,
FederationAccountStatusServlet, FederationAccountStatusServlet,
FederationMediaDownloadServlet,
FederationMediaThumbnailServlet,
) )

View File

@@ -42,6 +42,7 @@ class AdminHandler:
self._device_handler = hs.get_device_handler() self._device_handler = hs.get_device_handler()
self._storage_controllers = hs.get_storage_controllers() self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state self._state_storage_controller = self._storage_controllers.state
self._hs_config = hs.config
self._msc3866_enabled = hs.config.experimental.msc3866.enabled self._msc3866_enabled = hs.config.experimental.msc3866.enabled
async def get_whois(self, user: UserID) -> JsonMapping: async def get_whois(self, user: UserID) -> JsonMapping:
@@ -125,7 +126,13 @@ class AdminHandler:
# Get all rooms the user is in or has been in # Get all rooms the user is in or has been in
rooms = await self._store.get_rooms_for_local_user_where_membership_is( rooms = await self._store.get_rooms_for_local_user_where_membership_is(
user_id, user_id,
membership_list=Membership.LIST, membership_list=(
Membership.JOIN,
Membership.LEAVE,
Membership.BAN,
Membership.INVITE,
Membership.KNOCK,
),
) )
# We only try and fetch events for rooms the user has been in. If # We only try and fetch events for rooms the user has been in. If
@@ -172,7 +179,7 @@ class AdminHandler:
if room.membership == Membership.JOIN: if room.membership == Membership.JOIN:
stream_ordering = self._store.get_room_max_stream_ordering() stream_ordering = self._store.get_room_max_stream_ordering()
else: else:
stream_ordering = room.event_pos.stream stream_ordering = room.stream_ordering
from_key = RoomStreamToken(topological=0, stream=0) from_key = RoomStreamToken(topological=0, stream=0)
to_key = RoomStreamToken(stream=stream_ordering) to_key = RoomStreamToken(stream=stream_ordering)
@@ -214,6 +221,7 @@ class AdminHandler:
self._storage_controllers, self._storage_controllers,
user_id, user_id,
events, events,
msc4115_membership_on_events=self._hs_config.experimental.msc4115_membership_on_events,
) )
writer.write_events(room_id, events) writer.write_events(room_id, events)

View File

@@ -283,10 +283,6 @@ class DeactivateAccountHandler:
ratelimit=False, ratelimit=False,
require_consent=False, require_consent=False,
) )
# Mark the room forgotten too, because they won't be able to do this
# for us. This may lead to the room being purged eventually.
await self._room_member_handler.forget(user, room_id)
except Exception: except Exception:
logger.exception( logger.exception(
"Failed to part user %r from room %r: ignoring and continuing", "Failed to part user %r from room %r: ignoring and continuing",

View File

@@ -39,7 +39,6 @@ from synapse.metrics.background_process_metrics import (
) )
from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo
from synapse.types import ( from synapse.types import (
DeviceListUpdates,
JsonDict, JsonDict,
JsonMapping, JsonMapping,
ScheduledTask, ScheduledTask,
@@ -215,7 +214,7 @@ class DeviceWorkerHandler:
@cancellable @cancellable
async def get_user_ids_changed( async def get_user_ids_changed(
self, user_id: str, from_token: StreamToken self, user_id: str, from_token: StreamToken
) -> DeviceListUpdates: ) -> JsonDict:
"""Get list of users that have had the devices updated, or have newly """Get list of users that have had the devices updated, or have newly
joined a room, that `user_id` may be interested in. joined a room, that `user_id` may be interested in.
""" """
@@ -342,19 +341,11 @@ class DeviceWorkerHandler:
possibly_joined = set() possibly_joined = set()
possibly_left = set() possibly_left = set()
device_list_updates = DeviceListUpdates( result = {"changed": list(possibly_joined), "left": list(possibly_left)}
changed=possibly_joined,
left=possibly_left,
)
log_kv( log_kv(result)
{
"changed": device_list_updates.changed,
"left": device_list_updates.left,
}
)
return device_list_updates return result
async def on_federation_query_user_devices(self, user_id: str) -> JsonDict: async def on_federation_query_user_devices(self, user_id: str) -> JsonDict:
if not self.hs.is_mine(UserID.from_string(user_id)): if not self.hs.is_mine(UserID.from_string(user_id)):

View File

@@ -236,13 +236,6 @@ class DeviceMessageHandler:
local_messages = {} local_messages = {}
remote_messages: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} remote_messages: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
for user_id, by_device in messages.items(): for user_id, by_device in messages.items():
if not UserID.is_valid(user_id):
logger.warning(
"Ignoring attempt to send device message to invalid user: %r",
user_id,
)
continue
# add an opentracing log entry for each message # add an opentracing log entry for each message
for device_id, message_content in by_device.items(): for device_id, message_content in by_device.items():
log_kv( log_kv(

View File

@@ -35,7 +35,6 @@ from synapse.api.errors import CodeMessageException, Codes, NotFoundError, Synap
from synapse.handlers.device import DeviceHandler from synapse.handlers.device import DeviceHandler
from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet
from synapse.types import ( from synapse.types import (
JsonDict, JsonDict,
JsonMapping, JsonMapping,
@@ -46,10 +45,7 @@ from synapse.types import (
from synapse.util import json_decoder from synapse.util import json_decoder
from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.cancellation import cancellable from synapse.util.cancellation import cancellable
from synapse.util.retryutils import ( from synapse.util.retryutils import NotRetryingDestination
NotRetryingDestination,
filter_destinations_by_retry_limiter,
)
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.server import HomeServer from synapse.server import HomeServer
@@ -57,9 +53,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
ONE_TIME_KEY_UPLOAD = "one_time_key_upload_lock"
class E2eKeysHandler: class E2eKeysHandler:
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
self.config = hs.config self.config = hs.config
@@ -69,7 +62,6 @@ class E2eKeysHandler:
self._appservice_handler = hs.get_application_service_handler() self._appservice_handler = hs.get_application_service_handler()
self.is_mine = hs.is_mine self.is_mine = hs.is_mine
self.clock = hs.get_clock() self.clock = hs.get_clock()
self._worker_lock_handler = hs.get_worker_locks_handler()
federation_registry = hs.get_federation_registry() federation_registry = hs.get_federation_registry()
@@ -90,12 +82,6 @@ class E2eKeysHandler:
edu_updater.incoming_signing_key_update, edu_updater.incoming_signing_key_update,
) )
self.device_key_uploader = self.upload_device_keys_for_user
else:
self.device_key_uploader = (
ReplicationUploadKeysForUserRestServlet.make_client(hs)
)
# doesn't really work as part of the generic query API, because the # doesn't really work as part of the generic query API, because the
# query request requires an object POST, but we abuse the # query request requires an object POST, but we abuse the
# "query handler" interface. # "query handler" interface.
@@ -159,11 +145,6 @@ class E2eKeysHandler:
remote_queries = {} remote_queries = {}
for user_id, device_ids in device_keys_query.items(): for user_id, device_ids in device_keys_query.items():
if not UserID.is_valid(user_id):
# Ignore invalid user IDs, which is the same behaviour as if
# the user existed but had no keys.
continue
# we use UserID.from_string to catch invalid user ids # we use UserID.from_string to catch invalid user ids
if self.is_mine(UserID.from_string(user_id)): if self.is_mine(UserID.from_string(user_id)):
local_query[user_id] = device_ids local_query[user_id] = device_ids
@@ -278,8 +259,10 @@ class E2eKeysHandler:
"%d destinations to query devices for", len(remote_queries_not_in_cache) "%d destinations to query devices for", len(remote_queries_not_in_cache)
) )
async def _query(destination: str) -> None: async def _query(
queries = remote_queries_not_in_cache[destination] destination_queries: Tuple[str, Dict[str, Iterable[str]]]
) -> None:
destination, queries = destination_queries
return await self._query_devices_for_destination( return await self._query_devices_for_destination(
results, results,
cross_signing_keys, cross_signing_keys,
@@ -289,27 +272,9 @@ class E2eKeysHandler:
timeout, timeout,
) )
# Only try and fetch keys for destinations that are not marked as
# down.
unfiltered_destinations = remote_queries_not_in_cache.keys()
filtered_destinations = set(
await filter_destinations_by_retry_limiter(
unfiltered_destinations,
self.clock,
self.store,
# Let's give an arbitrary grace period for those hosts that are
# only recently down
retry_due_within_ms=60 * 1000,
)
)
failures.update(
(dest, _NOT_READY_FOR_RETRY_FAILURE)
for dest in (unfiltered_destinations - filtered_destinations)
)
await concurrently_execute( await concurrently_execute(
_query, _query,
filtered_destinations, remote_queries_not_in_cache.items(),
10, 10,
delay_cancellation=True, delay_cancellation=True,
) )
@@ -810,17 +775,36 @@ class E2eKeysHandler:
"one_time_keys": A mapping from algorithm to number of keys for that "one_time_keys": A mapping from algorithm to number of keys for that
algorithm, including those previously persisted. algorithm, including those previously persisted.
""" """
# This can only be called from the main process.
assert isinstance(self.device_handler, DeviceHandler)
time_now = self.clock.time_msec() time_now = self.clock.time_msec()
# TODO: Validate the JSON to make sure it has the right keys. # TODO: Validate the JSON to make sure it has the right keys.
device_keys = keys.get("device_keys", None) device_keys = keys.get("device_keys", None)
if device_keys: if device_keys:
await self.device_key_uploader( logger.info(
user_id=user_id, "Updating device_keys for device %r for user %s at %d",
device_id=device_id, device_id,
keys={"device_keys": device_keys}, user_id,
time_now,
) )
log_kv(
{
"message": "Updating device_keys for user.",
"user_id": user_id,
"device_id": device_id,
}
)
# TODO: Sign the JSON with the server key
changed = await self.store.set_e2e_device_keys(
user_id, device_id, time_now, device_keys
)
if changed:
# Only notify about device updates *if* the keys actually changed
await self.device_handler.notify_device_update(user_id, [device_id])
else:
log_kv({"message": "Not updating device_keys for user", "user_id": user_id})
one_time_keys = keys.get("one_time_keys", None) one_time_keys = keys.get("one_time_keys", None)
if one_time_keys: if one_time_keys:
log_kv( log_kv(
@@ -856,49 +840,6 @@ class E2eKeysHandler:
{"message": "Did not update fallback_keys", "reason": "no keys given"} {"message": "Did not update fallback_keys", "reason": "no keys given"}
) )
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
set_tag("one_time_key_counts", str(result))
return {"one_time_key_counts": result}
@tag_args
async def upload_device_keys_for_user(
self, user_id: str, device_id: str, keys: JsonDict
) -> None:
"""
Args:
user_id: user whose keys are being uploaded.
device_id: device whose keys are being uploaded.
device_keys: the `device_keys` of an /keys/upload request.
"""
# This can only be called from the main process.
assert isinstance(self.device_handler, DeviceHandler)
time_now = self.clock.time_msec()
device_keys = keys["device_keys"]
logger.info(
"Updating device_keys for device %r for user %s at %d",
device_id,
user_id,
time_now,
)
log_kv(
{
"message": "Updating device_keys for user.",
"user_id": user_id,
"device_id": device_id,
}
)
# TODO: Sign the JSON with the server key
changed = await self.store.set_e2e_device_keys(
user_id, device_id, time_now, device_keys
)
if changed:
# Only notify about device updates *if* the keys actually changed
await self.device_handler.notify_device_update(user_id, [device_id])
# the device should have been registered already, but it may have been # the device should have been registered already, but it may have been
# deleted due to a race with a DELETE request. Or we may be using an # deleted due to a race with a DELETE request. Or we may be using an
# old access_token without an associated device_id. Either way, we # old access_token without an associated device_id. Either way, we
@@ -906,56 +847,53 @@ class E2eKeysHandler:
# keys without a corresponding device. # keys without a corresponding device.
await self.device_handler.check_device_registered(user_id, device_id) await self.device_handler.check_device_registered(user_id, device_id)
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
set_tag("one_time_key_counts", str(result))
return {"one_time_key_counts": result}
async def _upload_one_time_keys_for_user( async def _upload_one_time_keys_for_user(
self, user_id: str, device_id: str, time_now: int, one_time_keys: JsonDict self, user_id: str, device_id: str, time_now: int, one_time_keys: JsonDict
) -> None: ) -> None:
# We take out a lock so that we don't have to worry about a client logger.info(
# sending duplicate requests. "Adding one_time_keys %r for device %r for user %r at %d",
lock_key = f"{user_id}_{device_id}" one_time_keys.keys(),
async with self._worker_lock_handler.acquire_lock( device_id,
ONE_TIME_KEY_UPLOAD, lock_key user_id,
): time_now,
logger.info( )
"Adding one_time_keys %r for device %r for user %r at %d",
one_time_keys.keys(),
device_id,
user_id,
time_now,
)
# make a list of (alg, id, key) tuples # make a list of (alg, id, key) tuples
key_list = [] key_list = []
for key_id, key_obj in one_time_keys.items(): for key_id, key_obj in one_time_keys.items():
algorithm, key_id = key_id.split(":") algorithm, key_id = key_id.split(":")
key_list.append((algorithm, key_id, key_obj)) key_list.append((algorithm, key_id, key_obj))
# First we check if we have already persisted any of the keys. # First we check if we have already persisted any of the keys.
existing_key_map = await self.store.get_e2e_one_time_keys( existing_key_map = await self.store.get_e2e_one_time_keys(
user_id, device_id, [k_id for _, k_id, _ in key_list] user_id, device_id, [k_id for _, k_id, _ in key_list]
) )
new_keys = [] # Keys that we need to insert. (alg, id, json) tuples. new_keys = [] # Keys that we need to insert. (alg, id, json) tuples.
for algorithm, key_id, key in key_list: for algorithm, key_id, key in key_list:
ex_json = existing_key_map.get((algorithm, key_id), None) ex_json = existing_key_map.get((algorithm, key_id), None)
if ex_json: if ex_json:
if not _one_time_keys_match(ex_json, key): if not _one_time_keys_match(ex_json, key):
raise SynapseError( raise SynapseError(
400, 400,
( (
"One time key %s:%s already exists. " "One time key %s:%s already exists. "
"Old key: %s; new key: %r" "Old key: %s; new key: %r"
)
% (algorithm, key_id, ex_json, key),
) )
else: % (algorithm, key_id, ex_json, key),
new_keys.append(
(algorithm, key_id, encode_canonical_json(key).decode("ascii"))
) )
else:
new_keys.append(
(algorithm, key_id, encode_canonical_json(key).decode("ascii"))
)
log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys}) log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
await self.store.add_e2e_one_time_keys( await self.store.add_e2e_one_time_keys(user_id, device_id, time_now, new_keys)
user_id, device_id, time_now, new_keys
)
async def upload_signing_keys_for_user( async def upload_signing_keys_for_user(
self, user_id: str, keys: JsonDict self, user_id: str, keys: JsonDict
@@ -1648,9 +1586,6 @@ def _check_device_signature(
raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE) raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE)
_NOT_READY_FOR_RETRY_FAILURE = {"status": 503, "message": "Not ready for retry"}
def _exception_to_failure(e: Exception) -> JsonDict: def _exception_to_failure(e: Exception) -> JsonDict:
if isinstance(e, SynapseError): if isinstance(e, SynapseError):
return {"status": e.code, "errcode": e.errcode, "message": str(e)} return {"status": e.code, "errcode": e.errcode, "message": str(e)}
@@ -1659,7 +1594,7 @@ def _exception_to_failure(e: Exception) -> JsonDict:
return {"status": e.code, "message": str(e)} return {"status": e.code, "message": str(e)}
if isinstance(e, NotRetryingDestination): if isinstance(e, NotRetryingDestination):
return _NOT_READY_FOR_RETRY_FAILURE return {"status": 503, "message": "Not ready for retry"}
# include ConnectionRefused and other errors # include ConnectionRefused and other errors
# #

View File

@@ -34,7 +34,7 @@ from synapse.api.errors import (
from synapse.logging.opentracing import log_kv, trace from synapse.logging.opentracing import log_kv, trace
from synapse.storage.databases.main.e2e_room_keys import RoomKey from synapse.storage.databases.main.e2e_room_keys import RoomKey
from synapse.types import JsonDict from synapse.types import JsonDict
from synapse.util.async_helpers import ReadWriteLock from synapse.util.async_helpers import Linearizer
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.server import HomeServer from synapse.server import HomeServer
@@ -58,7 +58,7 @@ class E2eRoomKeysHandler:
# clients belonging to a user will receive and try to upload a new session at # clients belonging to a user will receive and try to upload a new session at
# roughly the same time. Also used to lock out uploads when the key is being # roughly the same time. Also used to lock out uploads when the key is being
# changed. # changed.
self._upload_lock = ReadWriteLock() self._upload_linearizer = Linearizer("upload_room_keys_lock")
@trace @trace
async def get_room_keys( async def get_room_keys(
@@ -89,7 +89,7 @@ class E2eRoomKeysHandler:
# we deliberately take the lock to get keys so that changing the version # we deliberately take the lock to get keys so that changing the version
# works atomically # works atomically
async with self._upload_lock.read(user_id): async with self._upload_linearizer.queue(user_id):
# make sure the backup version exists # make sure the backup version exists
try: try:
await self.store.get_e2e_room_keys_version_info(user_id, version) await self.store.get_e2e_room_keys_version_info(user_id, version)
@@ -132,7 +132,7 @@ class E2eRoomKeysHandler:
""" """
# lock for consistency with uploading # lock for consistency with uploading
async with self._upload_lock.write(user_id): async with self._upload_linearizer.queue(user_id):
# make sure the backup version exists # make sure the backup version exists
try: try:
version_info = await self.store.get_e2e_room_keys_version_info( version_info = await self.store.get_e2e_room_keys_version_info(
@@ -193,7 +193,7 @@ class E2eRoomKeysHandler:
# TODO: Validate the JSON to make sure it has the right keys. # TODO: Validate the JSON to make sure it has the right keys.
# XXX: perhaps we should use a finer grained lock here? # XXX: perhaps we should use a finer grained lock here?
async with self._upload_lock.write(user_id): async with self._upload_linearizer.queue(user_id):
# Check that the version we're trying to upload is the current version # Check that the version we're trying to upload is the current version
try: try:
version_info = await self.store.get_e2e_room_keys_version_info(user_id) version_info = await self.store.get_e2e_room_keys_version_info(user_id)
@@ -247,12 +247,6 @@ class E2eRoomKeysHandler:
if current_room_key: if current_room_key:
if self._should_replace_room_key(current_room_key, room_key): if self._should_replace_room_key(current_room_key, room_key):
log_kv({"message": "Replacing room key."}) log_kv({"message": "Replacing room key."})
logger.debug(
"Replacing room key. room=%s session=%s user=%s",
room_id,
session_id,
user_id,
)
# updates are done one at a time in the DB, so send # updates are done one at a time in the DB, so send
# updates right away rather than batching them up, # updates right away rather than batching them up,
# like we do with the inserts # like we do with the inserts
@@ -262,12 +256,6 @@ class E2eRoomKeysHandler:
changed = True changed = True
else: else:
log_kv({"message": "Not replacing room_key."}) log_kv({"message": "Not replacing room_key."})
logger.debug(
"Not replacing room key. room=%s session=%s user=%s",
room_id,
session_id,
user_id,
)
else: else:
log_kv( log_kv(
{ {
@@ -277,12 +265,6 @@ class E2eRoomKeysHandler:
} }
) )
log_kv({"message": "Replacing room key."}) log_kv({"message": "Replacing room key."})
logger.debug(
"Inserting new room key. room=%s session=%s user=%s",
room_id,
session_id,
user_id,
)
to_insert.append((room_id, session_id, room_key)) to_insert.append((room_id, session_id, room_key))
changed = True changed = True
@@ -355,7 +337,7 @@ class E2eRoomKeysHandler:
# TODO: Validate the JSON to make sure it has the right keys. # TODO: Validate the JSON to make sure it has the right keys.
# lock everyone out until we've switched version # lock everyone out until we've switched version
async with self._upload_lock.write(user_id): async with self._upload_linearizer.queue(user_id):
new_version = await self.store.create_e2e_room_keys_version( new_version = await self.store.create_e2e_room_keys_version(
user_id, version_info user_id, version_info
) )
@@ -382,7 +364,7 @@ class E2eRoomKeysHandler:
} }
""" """
async with self._upload_lock.read(user_id): async with self._upload_linearizer.queue(user_id):
try: try:
res = await self.store.get_e2e_room_keys_version_info(user_id, version) res = await self.store.get_e2e_room_keys_version_info(user_id, version)
except StoreError as e: except StoreError as e:
@@ -407,7 +389,7 @@ class E2eRoomKeysHandler:
NotFoundError: if this backup version doesn't exist NotFoundError: if this backup version doesn't exist
""" """
async with self._upload_lock.write(user_id): async with self._upload_linearizer.queue(user_id):
try: try:
await self.store.delete_e2e_room_keys_version(user_id, version) await self.store.delete_e2e_room_keys_version(user_id, version)
except StoreError as e: except StoreError as e:
@@ -437,7 +419,7 @@ class E2eRoomKeysHandler:
raise SynapseError( raise SynapseError(
400, "Version in body does not match", Codes.INVALID_PARAM 400, "Version in body does not match", Codes.INVALID_PARAM
) )
async with self._upload_lock.write(user_id): async with self._upload_linearizer.queue(user_id):
try: try:
old_info = await self.store.get_e2e_room_keys_version_info( old_info = await self.store.get_e2e_room_keys_version_info(
user_id, version user_id, version

View File

@@ -148,6 +148,7 @@ class EventHandler:
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers() self._storage_controllers = hs.get_storage_controllers()
self._config = hs.config
async def get_event( async def get_event(
self, self,
@@ -193,6 +194,7 @@ class EventHandler:
user.to_string(), user.to_string(),
[event], [event],
is_peeking=is_peeking, is_peeking=is_peeking,
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
) )
if not filtered: if not filtered:

View File

@@ -199,7 +199,7 @@ class InitialSyncHandler:
) )
elif event.membership == Membership.LEAVE: elif event.membership == Membership.LEAVE:
room_end_token = RoomStreamToken( room_end_token = RoomStreamToken(
stream=event.event_pos.stream, stream=event.stream_ordering,
) )
deferred_room_state = run_in_background( deferred_room_state = run_in_background(
self._state_storage_controller.get_state_for_events, self._state_storage_controller.get_state_for_events,
@@ -224,6 +224,7 @@ class InitialSyncHandler:
self._storage_controllers, self._storage_controllers,
user_id, user_id,
messages, messages,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
) )
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token) start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
@@ -382,6 +383,7 @@ class InitialSyncHandler:
requester.user.to_string(), requester.user.to_string(),
messages, messages,
is_peeking=is_peeking, is_peeking=is_peeking,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
) )
start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token) start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token)
@@ -496,6 +498,7 @@ class InitialSyncHandler:
requester.user.to_string(), requester.user.to_string(),
messages, messages,
is_peeking=is_peeking, is_peeking=is_peeking,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
) )
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token) start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)

View File

@@ -201,7 +201,7 @@ class MessageHandler:
if at_token: if at_token:
last_event_id = ( last_event_id = (
await self.store.get_last_event_id_in_room_before_stream_ordering( await self.store.get_last_event_in_room_before_stream_ordering(
room_id, room_id,
end_token=at_token.room_key, end_token=at_token.room_key,
) )
@@ -496,6 +496,13 @@ class EventCreationHandler:
self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state
self.membership_types_to_include_profile_data_in = {
Membership.JOIN,
Membership.KNOCK,
}
if self.hs.config.server.include_profile_data_on_invite:
self.membership_types_to_include_profile_data_in.add(Membership.INVITE)
self.send_event = ReplicationSendEventRestServlet.make_client(hs) self.send_event = ReplicationSendEventRestServlet.make_client(hs)
self.send_events = ReplicationSendEventsRestServlet.make_client(hs) self.send_events = ReplicationSendEventsRestServlet.make_client(hs)
@@ -587,6 +594,8 @@ class EventCreationHandler:
Creates an FrozenEvent object, filling out auth_events, prev_events, Creates an FrozenEvent object, filling out auth_events, prev_events,
etc. etc.
Adds display names to Join membership events.
Args: Args:
requester requester
event_dict: An entire event event_dict: An entire event
@@ -642,17 +651,6 @@ class EventCreationHandler:
""" """
await self.auth_blocking.check_auth_blocking(requester=requester) await self.auth_blocking.check_auth_blocking(requester=requester)
if event_dict["type"] == EventTypes.Message:
requester_suspended = await self.store.get_user_suspended_status(
requester.user.to_string()
)
if requester_suspended:
raise SynapseError(
403,
"Sending messages while account is suspended is not allowed.",
Codes.USER_ACCOUNT_SUSPENDED,
)
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "": if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
room_version_id = event_dict["content"]["room_version"] room_version_id = event_dict["content"]["room_version"]
maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id) maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
@@ -674,6 +672,29 @@ class EventCreationHandler:
self.validator.validate_builder(builder) self.validator.validate_builder(builder)
if builder.type == EventTypes.Member:
membership = builder.content.get("membership", None)
target = UserID.from_string(builder.state_key)
if membership in self.membership_types_to_include_profile_data_in:
# If event doesn't include a display name, add one.
profile = self.profile_handler
content = builder.content
try:
if "displayname" not in content:
displayname = await profile.get_displayname(target)
if displayname is not None:
content["displayname"] = displayname
if "avatar_url" not in content:
avatar_url = await profile.get_avatar_url(target)
if avatar_url is not None:
content["avatar_url"] = avatar_url
except Exception as e:
logger.info(
"Failed to get profile information for %r: %s", target, e
)
is_exempt = await self._is_exempt_from_privacy_policy(builder, requester) is_exempt = await self._is_exempt_from_privacy_policy(builder, requester)
if require_consent and not is_exempt: if require_consent and not is_exempt:
await self.assert_accepted_privacy_policy(requester) await self.assert_accepted_privacy_policy(requester)
@@ -1562,7 +1583,6 @@ class EventCreationHandler:
# stream_ordering entry manually (as it was persisted on # stream_ordering entry manually (as it was persisted on
# another worker). # another worker).
event.internal_metadata.stream_ordering = stream_id event.internal_metadata.stream_ordering = stream_id
event.internal_metadata.instance_name = writer_instance
return event return event

View File

@@ -27,6 +27,7 @@ from synapse.api.constants import Direction, EventTypes, Membership
from synapse.api.errors import SynapseError from synapse.api.errors import SynapseError
from synapse.api.filtering import Filter from synapse.api.filtering import Filter
from synapse.events.utils import SerializeEventConfig from synapse.events.utils import SerializeEventConfig
from synapse.handlers.room import ShutdownRoomParams, ShutdownRoomResponse
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
from synapse.logging.opentracing import trace from synapse.logging.opentracing import trace
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
@@ -40,7 +41,6 @@ from synapse.types import (
StreamKeyType, StreamKeyType,
TaskStatus, TaskStatus,
) )
from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
from synapse.types.state import StateFilter from synapse.types.state import StateFilter
from synapse.util.async_helpers import ReadWriteLock from synapse.util.async_helpers import ReadWriteLock
from synapse.visibility import filter_events_for_client from synapse.visibility import filter_events_for_client
@@ -623,6 +623,7 @@ class PaginationHandler:
user_id, user_id,
events, events,
is_peeking=(member_event_id is None), is_peeking=(member_event_id is None),
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
) )
# if after the filter applied there are no more events # if after the filter applied there are no more events

View File

@@ -286,14 +286,8 @@ class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]):
room_ids: Iterable[str], room_ids: Iterable[str],
is_guest: bool, is_guest: bool,
explicit_room_id: Optional[str] = None, explicit_room_id: Optional[str] = None,
to_key: Optional[MultiWriterStreamToken] = None,
) -> Tuple[List[JsonMapping], MultiWriterStreamToken]: ) -> Tuple[List[JsonMapping], MultiWriterStreamToken]:
""" to_key = self.get_current_key()
Find read receipts for given rooms (> `from_token` and <= `to_token`)
"""
if to_key is None:
to_key = self.get_current_key()
if from_key == to_key: if from_key == to_key:
return [], to_key return [], to_key

View File

@@ -590,7 +590,7 @@ class RegistrationHandler:
# moving away from bare excepts is a good thing to do. # moving away from bare excepts is a good thing to do.
logger.error("Failed to join new user to %r: %r", r, e) logger.error("Failed to join new user to %r: %r", r, e)
except Exception as e: except Exception as e:
logger.error("Failed to join new user to %r: %r", r, e, exc_info=True) logger.error("Failed to join new user to %r: %r", r, e)
async def _auto_join_rooms(self, user_id: str) -> None: async def _auto_join_rooms(self, user_id: str) -> None:
"""Automatically joins users to auto join rooms - creating the room in the first place """Automatically joins users to auto join rooms - creating the room in the first place

View File

@@ -95,6 +95,7 @@ class RelationsHandler:
self._event_handler = hs.get_event_handler() self._event_handler = hs.get_event_handler()
self._event_serializer = hs.get_event_client_serializer() self._event_serializer = hs.get_event_client_serializer()
self._event_creation_handler = hs.get_event_creation_handler() self._event_creation_handler = hs.get_event_creation_handler()
self._config = hs.config
async def get_relations( async def get_relations(
self, self,
@@ -163,6 +164,7 @@ class RelationsHandler:
user_id, user_id,
events, events,
is_peeking=(member_event_id is None), is_peeking=(member_event_id is None),
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
) )
# The relations returned for the requested event do include their # The relations returned for the requested event do include their
@@ -391,9 +393,9 @@ class RelationsHandler:
# Attempt to find another event to use as the latest event. # Attempt to find another event to use as the latest event.
potential_events, _ = await self._main_store.get_relations_for_event( potential_events, _ = await self._main_store.get_relations_for_event(
room_id,
event_id, event_id,
event, event,
room_id,
RelationTypes.THREAD, RelationTypes.THREAD,
direction=Direction.FORWARDS, direction=Direction.FORWARDS,
) )
@@ -608,6 +610,7 @@ class RelationsHandler:
user_id, user_id,
events, events,
is_peeking=(member_event_id is None), is_peeking=(member_event_id is None),
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
) )
aggregations = await self.get_bundled_aggregations( aggregations = await self.get_bundled_aggregations(

View File

@@ -40,6 +40,7 @@ from typing import (
) )
import attr import attr
from typing_extensions import TypedDict
import synapse.events.snapshot import synapse.events.snapshot
from synapse.api.constants import ( from synapse.api.constants import (
@@ -87,7 +88,6 @@ from synapse.types import (
UserID, UserID,
create_requester, create_requester,
) )
from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
from synapse.types.state import StateFilter from synapse.types.state import StateFilter
from synapse.util import stringutils from synapse.util import stringutils
from synapse.util.caches.response_cache import ResponseCache from synapse.util.caches.response_cache import ResponseCache
@@ -1188,8 +1188,6 @@ class RoomCreationHandler:
) )
events_to_send.append((power_event, power_context)) events_to_send.append((power_event, power_context))
else: else:
# Please update the docs for `default_power_level_content_override` when
# updating the `events` dict below
power_level_content: JsonDict = { power_level_content: JsonDict = {
"users": {creator_id: 100}, "users": {creator_id: 100},
"users_default": 0, "users_default": 0,
@@ -1478,6 +1476,7 @@ class RoomContextHandler:
user.to_string(), user.to_string(),
events, events,
is_peeking=is_peeking, is_peeking=is_peeking,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
) )
event = await self.store.get_event( event = await self.store.get_event(
@@ -1781,6 +1780,63 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
return self.store.get_current_room_stream_token_for_room_id(room_id) return self.store.get_current_room_stream_token_for_room_id(room_id)
class ShutdownRoomParams(TypedDict):
"""
Attributes:
requester_user_id:
User who requested the action. Will be recorded as putting the room on the
blocking list.
new_room_user_id:
If set, a new room will be created with this user ID
as the creator and admin, and all users in the old room will be
moved into that room. If not set, no new room will be created
and the users will just be removed from the old room.
new_room_name:
A string representing the name of the room that new users will
be invited to. Defaults to `Content Violation Notification`
message:
A string containing the first message that will be sent as
`new_room_user_id` in the new room. Ideally this will clearly
convey why the original room was shut down.
Defaults to `Sharing illegal content on this server is not
permitted and rooms in violation will be blocked.`
block:
If set to `true`, this room will be added to a blocking list,
preventing future attempts to join the room. Defaults to `false`.
purge:
If set to `true`, purge the given room from the database.
force_purge:
If set to `true`, the room will be purged from database
even if there are still users joined to the room.
"""
requester_user_id: Optional[str]
new_room_user_id: Optional[str]
new_room_name: Optional[str]
message: Optional[str]
block: bool
purge: bool
force_purge: bool
class ShutdownRoomResponse(TypedDict):
"""
Attributes:
kicked_users: An array of users (`user_id`) that were kicked.
failed_to_kick_users:
An array of users (`user_id`) that that were not kicked.
local_aliases:
An array of strings representing the local aliases that were
migrated from the old room to the new.
new_room_id: A string representing the room ID of the new room.
"""
kicked_users: List[str]
failed_to_kick_users: List[str]
local_aliases: List[str]
new_room_id: Optional[str]
class RoomShutdownHandler: class RoomShutdownHandler:
DEFAULT_MESSAGE = ( DEFAULT_MESSAGE = (
"Sharing illegal content on this server is not permitted and rooms in" "Sharing illegal content on this server is not permitted and rooms in"

View File

@@ -106,13 +106,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
self.event_auth_handler = hs.get_event_auth_handler() self.event_auth_handler = hs.get_event_auth_handler()
self._worker_lock_handler = hs.get_worker_locks_handler() self._worker_lock_handler = hs.get_worker_locks_handler()
self._membership_types_to_include_profile_data_in = {
Membership.JOIN,
Membership.KNOCK,
}
if self.hs.config.server.include_profile_data_on_invite:
self._membership_types_to_include_profile_data_in.add(Membership.INVITE)
self.member_linearizer: Linearizer = Linearizer(name="member") self.member_linearizer: Linearizer = Linearizer(name="member")
self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter") self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter")
@@ -792,8 +785,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if ( if (
not self.allow_per_room_profiles and not is_requester_server_notices_user not self.allow_per_room_profiles and not is_requester_server_notices_user
) or requester.shadow_banned: ) or requester.shadow_banned:
# Strip profile data, knowing that new profile data will be added to # Strip profile data, knowing that new profile data will be added to the
# the event's content below using the target's global profile. # event's content in event_creation_handler.create_event() using the target's
# global profile.
content.pop("displayname", None) content.pop("displayname", None)
content.pop("avatar_url", None) content.pop("avatar_url", None)
@@ -829,29 +823,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if action in ["kick", "unban"]: if action in ["kick", "unban"]:
effective_membership_state = "leave" effective_membership_state = "leave"
if effective_membership_state not in Membership.LIST:
raise SynapseError(400, "Invalid membership key")
# Add profile data for joins etc, if no per-room profile.
if (
effective_membership_state
in self._membership_types_to_include_profile_data_in
):
# If event doesn't include a display name, add one.
profile = self.profile_handler
try:
if "displayname" not in content:
displayname = await profile.get_displayname(target)
if displayname is not None:
content["displayname"] = displayname
if "avatar_url" not in content:
avatar_url = await profile.get_avatar_url(target)
if avatar_url is not None:
content["avatar_url"] = avatar_url
except Exception as e:
logger.info("Failed to get profile information for %r: %s", target, e)
# if this is a join with a 3pid signature, we may need to turn a 3pid # if this is a join with a 3pid signature, we may need to turn a 3pid
# invite into a normal invite before we can handle the join. # invite into a normal invite before we can handle the join.
if third_party_signed is not None: if third_party_signed is not None:

View File

@@ -483,6 +483,7 @@ class SearchHandler:
self._storage_controllers, self._storage_controllers,
user.to_string(), user.to_string(),
filtered_events, filtered_events,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
) )
events.sort(key=lambda e: -rank_map[e.event_id]) events.sort(key=lambda e: -rank_map[e.event_id])
@@ -584,6 +585,7 @@ class SearchHandler:
self._storage_controllers, self._storage_controllers,
user.to_string(), user.to_string(),
filtered_events, filtered_events,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
) )
room_events.extend(events) room_events.extend(events)
@@ -671,12 +673,14 @@ class SearchHandler:
self._storage_controllers, self._storage_controllers,
user.to_string(), user.to_string(),
res.events_before, res.events_before,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
) )
events_after = await filter_events_for_client( events_after = await filter_events_for_client(
self._storage_controllers, self._storage_controllers,
user.to_string(), user.to_string(),
res.events_after, res.events_after,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
) )
context: JsonDict = { context: JsonDict = {

File diff suppressed because it is too large Load Diff

View File

@@ -293,9 +293,7 @@ class StatsHandler:
"history_visibility" "history_visibility"
) )
elif delta.event_type == EventTypes.RoomEncryption: elif delta.event_type == EventTypes.RoomEncryption:
room_state["encryption"] = event_content.get( room_state["encryption"] = event_content.get("algorithm")
EventContentFields.ENCRYPTION_ALGORITHM
)
elif delta.event_type == EventTypes.Name: elif delta.event_type == EventTypes.Name:
room_state["name"] = event_content.get("name") room_state["name"] = event_content.get("name")
elif delta.event_type == EventTypes.Topic: elif delta.event_type == EventTypes.Topic:

View File

@@ -28,14 +28,11 @@ from typing import (
Dict, Dict,
FrozenSet, FrozenSet,
List, List,
Literal,
Mapping, Mapping,
Optional, Optional,
Sequence, Sequence,
Set, Set,
Tuple, Tuple,
Union,
overload,
) )
import attr import attr
@@ -131,8 +128,6 @@ class SyncVersion(Enum):
# Traditional `/sync` endpoint # Traditional `/sync` endpoint
SYNC_V2 = "sync_v2" SYNC_V2 = "sync_v2"
# Part of MSC3575 Sliding Sync
E2EE_SYNC = "e2ee_sync"
@attr.s(slots=True, frozen=True, auto_attribs=True) @attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -284,47 +279,6 @@ class SyncResult:
or self.device_lists or self.device_lists
) )
@staticmethod
def empty(
next_batch: StreamToken,
device_one_time_keys_count: JsonMapping,
device_unused_fallback_key_types: List[str],
) -> "SyncResult":
"Return a new empty result"
return SyncResult(
next_batch=next_batch,
presence=[],
account_data=[],
joined=[],
invited=[],
knocked=[],
archived=[],
to_device=[],
device_lists=DeviceListUpdates(),
device_one_time_keys_count=device_one_time_keys_count,
device_unused_fallback_key_types=device_unused_fallback_key_types,
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class E2eeSyncResult:
"""
Attributes:
next_batch: Token for the next sync
to_device: List of direct messages for the device.
device_lists: List of user_ids whose devices have changed
device_one_time_keys_count: Dict of algorithm to count for one time keys
for this device
device_unused_fallback_key_types: List of key types that have an unused fallback
key
"""
next_batch: StreamToken
to_device: List[JsonDict]
device_lists: DeviceListUpdates
device_one_time_keys_count: JsonMapping
device_unused_fallback_key_types: List[str]
class SyncHandler: class SyncHandler:
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
@@ -368,31 +322,6 @@ class SyncHandler:
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
@overload
async def wait_for_sync_for_user(
self,
requester: Requester,
sync_config: SyncConfig,
sync_version: Literal[SyncVersion.SYNC_V2],
request_key: SyncRequestKey,
since_token: Optional[StreamToken] = None,
timeout: int = 0,
full_state: bool = False,
) -> SyncResult: ...
@overload
async def wait_for_sync_for_user(
self,
requester: Requester,
sync_config: SyncConfig,
sync_version: Literal[SyncVersion.E2EE_SYNC],
request_key: SyncRequestKey,
since_token: Optional[StreamToken] = None,
timeout: int = 0,
full_state: bool = False,
) -> E2eeSyncResult: ...
@overload
async def wait_for_sync_for_user( async def wait_for_sync_for_user(
self, self,
requester: Requester, requester: Requester,
@@ -402,18 +331,7 @@ class SyncHandler:
since_token: Optional[StreamToken] = None, since_token: Optional[StreamToken] = None,
timeout: int = 0, timeout: int = 0,
full_state: bool = False, full_state: bool = False,
) -> Union[SyncResult, E2eeSyncResult]: ... ) -> SyncResult:
async def wait_for_sync_for_user(
self,
requester: Requester,
sync_config: SyncConfig,
sync_version: SyncVersion,
request_key: SyncRequestKey,
since_token: Optional[StreamToken] = None,
timeout: int = 0,
full_state: bool = False,
) -> Union[SyncResult, E2eeSyncResult]:
"""Get the sync for a client if we have new data for it now. Otherwise """Get the sync for a client if we have new data for it now. Otherwise
wait for new data to arrive on the server. If the timeout expires, then wait for new data to arrive on the server. If the timeout expires, then
return an empty sync result. return an empty sync result.
@@ -426,10 +344,8 @@ class SyncHandler:
since_token: The point in the stream to sync from. since_token: The point in the stream to sync from.
timeout: How long to wait for new data to arrive before giving up. timeout: How long to wait for new data to arrive before giving up.
full_state: Whether to return the full state for each room. full_state: Whether to return the full state for each room.
Returns: Returns:
When `SyncVersion.SYNC_V2`, returns a full `SyncResult`. When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
When `SyncVersion.E2EE_SYNC`, returns a `E2eeSyncResult`.
""" """
# If the user is not part of the mau group, then check that limits have # If the user is not part of the mau group, then check that limits have
# not been exceeded (if not part of the group by this point, almost certain # not been exceeded (if not part of the group by this point, almost certain
@@ -450,29 +366,6 @@ class SyncHandler:
logger.debug("Returning sync response for %s", user_id) logger.debug("Returning sync response for %s", user_id)
return res return res
@overload
async def _wait_for_sync_for_user(
self,
sync_config: SyncConfig,
sync_version: Literal[SyncVersion.SYNC_V2],
since_token: Optional[StreamToken],
timeout: int,
full_state: bool,
cache_context: ResponseCacheContext[SyncRequestKey],
) -> SyncResult: ...
@overload
async def _wait_for_sync_for_user(
self,
sync_config: SyncConfig,
sync_version: Literal[SyncVersion.E2EE_SYNC],
since_token: Optional[StreamToken],
timeout: int,
full_state: bool,
cache_context: ResponseCacheContext[SyncRequestKey],
) -> E2eeSyncResult: ...
@overload
async def _wait_for_sync_for_user( async def _wait_for_sync_for_user(
self, self,
sync_config: SyncConfig, sync_config: SyncConfig,
@@ -481,17 +374,7 @@ class SyncHandler:
timeout: int, timeout: int,
full_state: bool, full_state: bool,
cache_context: ResponseCacheContext[SyncRequestKey], cache_context: ResponseCacheContext[SyncRequestKey],
) -> Union[SyncResult, E2eeSyncResult]: ... ) -> SyncResult:
async def _wait_for_sync_for_user(
self,
sync_config: SyncConfig,
sync_version: SyncVersion,
since_token: Optional[StreamToken],
timeout: int,
full_state: bool,
cache_context: ResponseCacheContext[SyncRequestKey],
) -> Union[SyncResult, E2eeSyncResult]:
"""The start of the machinery that produces a /sync response. """The start of the machinery that produces a /sync response.
See https://spec.matrix.org/v1.1/client-server-api/#syncing for full details. See https://spec.matrix.org/v1.1/client-server-api/#syncing for full details.
@@ -518,45 +401,6 @@ class SyncHandler:
if context: if context:
context.tag = sync_label context.tag = sync_label
if since_token is not None:
# We need to make sure this worker has caught up with the token. If
# this returns false it means we timed out waiting, and we should
# just return an empty response.
start = self.clock.time_msec()
if not await self.notifier.wait_for_stream_token(since_token):
logger.warning(
"Timed out waiting for worker to catch up. Returning empty response"
)
device_id = sync_config.device_id
one_time_keys_count: JsonMapping = {}
unused_fallback_key_types: List[str] = []
if device_id:
user_id = sync_config.user.to_string()
# TODO: We should have a way to let clients differentiate between the states of:
# * no change in OTK count since the provided since token
# * the server has zero OTKs left for this device
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
one_time_keys_count = await self.store.count_e2e_one_time_keys(
user_id, device_id
)
unused_fallback_key_types = list(
await self.store.get_e2e_unused_fallback_key_types(
user_id, device_id
)
)
cache_context.should_cache = False # Don't cache empty responses
return SyncResult.empty(
since_token, one_time_keys_count, unused_fallback_key_types
)
# If we've spent significant time waiting to catch up, take it off
# the timeout.
now = self.clock.time_msec()
if now - start > 1_000:
timeout -= now - start
timeout = max(timeout, 0)
# if we have a since token, delete any to-device messages before that token # if we have a since token, delete any to-device messages before that token
# (since we now know that the device has received them) # (since we now know that the device has received them)
if since_token is not None: if since_token is not None:
@@ -573,16 +417,14 @@ class SyncHandler:
if timeout == 0 or since_token is None or full_state: if timeout == 0 or since_token is None or full_state:
# we are going to return immediately, so don't bother calling # we are going to return immediately, so don't bother calling
# notifier.wait_for_events. # notifier.wait_for_events.
result: Union[SyncResult, E2eeSyncResult] = ( result: SyncResult = await self.current_sync_for_user(
await self.current_sync_for_user( sync_config, sync_version, since_token, full_state=full_state
sync_config, sync_version, since_token, full_state=full_state
)
) )
else: else:
# Otherwise, we wait for something to happen and report it to the user. # Otherwise, we wait for something to happen and report it to the user.
async def current_sync_callback( async def current_sync_callback(
before_token: StreamToken, after_token: StreamToken before_token: StreamToken, after_token: StreamToken
) -> Union[SyncResult, E2eeSyncResult]: ) -> SyncResult:
return await self.current_sync_for_user( return await self.current_sync_for_user(
sync_config, sync_version, since_token sync_config, sync_version, since_token
) )
@@ -614,43 +456,14 @@ class SyncHandler:
return result return result
@overload
async def current_sync_for_user(
self,
sync_config: SyncConfig,
sync_version: Literal[SyncVersion.SYNC_V2],
since_token: Optional[StreamToken] = None,
full_state: bool = False,
) -> SyncResult: ...
@overload
async def current_sync_for_user(
self,
sync_config: SyncConfig,
sync_version: Literal[SyncVersion.E2EE_SYNC],
since_token: Optional[StreamToken] = None,
full_state: bool = False,
) -> E2eeSyncResult: ...
@overload
async def current_sync_for_user( async def current_sync_for_user(
self, self,
sync_config: SyncConfig, sync_config: SyncConfig,
sync_version: SyncVersion, sync_version: SyncVersion,
since_token: Optional[StreamToken] = None, since_token: Optional[StreamToken] = None,
full_state: bool = False, full_state: bool = False,
) -> Union[SyncResult, E2eeSyncResult]: ... ) -> SyncResult:
"""Generates the response body of a sync result, represented as a SyncResult.
async def current_sync_for_user(
self,
sync_config: SyncConfig,
sync_version: SyncVersion,
since_token: Optional[StreamToken] = None,
full_state: bool = False,
) -> Union[SyncResult, E2eeSyncResult]:
"""
Generates the response body of a sync result, represented as a
`SyncResult`/`E2eeSyncResult`.
This is a wrapper around `generate_sync_result` which starts an open tracing This is a wrapper around `generate_sync_result` which starts an open tracing
span to track the sync. See `generate_sync_result` for the next part of your span to track the sync. See `generate_sync_result` for the next part of your
@@ -661,25 +474,15 @@ class SyncHandler:
sync_version: Determines what kind of sync response to generate. sync_version: Determines what kind of sync response to generate.
since_token: The point in the stream to sync from.p. since_token: The point in the stream to sync from.p.
full_state: Whether to return the full state for each room. full_state: Whether to return the full state for each room.
Returns: Returns:
When `SyncVersion.SYNC_V2`, returns a full `SyncResult`. When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
When `SyncVersion.E2EE_SYNC`, returns a `E2eeSyncResult`.
""" """
with start_active_span("sync.current_sync_for_user"): with start_active_span("sync.current_sync_for_user"):
log_kv({"since_token": since_token}) log_kv({"since_token": since_token})
# Go through the `/sync` v2 path # Go through the `/sync` v2 path
if sync_version == SyncVersion.SYNC_V2: if sync_version == SyncVersion.SYNC_V2:
sync_result: Union[SyncResult, E2eeSyncResult] = ( sync_result: SyncResult = await self.generate_sync_result(
await self.generate_sync_result( sync_config, since_token, full_state
sync_config, since_token, full_state
)
)
# Go through the MSC3575 Sliding Sync `/sync/e2ee` path
elif sync_version == SyncVersion.E2EE_SYNC:
sync_result = await self.generate_e2ee_sync_result(
sync_config, since_token
) )
else: else:
raise Exception( raise Exception(
@@ -844,6 +647,7 @@ class SyncHandler:
sync_config.user.to_string(), sync_config.user.to_string(),
recents, recents,
always_include_ids=current_state_ids, always_include_ids=current_state_ids,
msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
) )
log_kv({"recents_after_visibility_filtering": len(recents)}) log_kv({"recents_after_visibility_filtering": len(recents)})
else: else:
@@ -929,6 +733,7 @@ class SyncHandler:
sync_config.user.to_string(), sync_config.user.to_string(),
loaded_recents, loaded_recents,
always_include_ids=current_state_ids, always_include_ids=current_state_ids,
msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
) )
loaded_recents = [] loaded_recents = []
@@ -979,6 +784,89 @@ class SyncHandler:
bundled_aggregations=bundled_aggregations, bundled_aggregations=bundled_aggregations,
) )
async def get_state_after_event(
self,
event_id: str,
state_filter: Optional[StateFilter] = None,
await_full_state: bool = True,
) -> StateMap[str]:
"""
Get the room state after the given event
Args:
event_id: event of interest
state_filter: The state filter used to fetch state from the database.
await_full_state: if `True`, will block if we do not yet have complete state
at the event and `state_filter` is not satisfied by partial state.
Defaults to `True`.
"""
state_ids = await self._state_storage_controller.get_state_ids_for_event(
event_id,
state_filter=state_filter or StateFilter.all(),
await_full_state=await_full_state,
)
# using get_metadata_for_events here (instead of get_event) sidesteps an issue
# with redactions: if `event_id` is a redaction event, and we don't have the
# original (possibly because it got purged), get_event will refuse to return
# the redaction event, which isn't terribly helpful here.
#
# (To be fair, in that case we could assume it's *not* a state event, and
# therefore we don't need to worry about it. But still, it seems cleaner just
# to pull the metadata.)
m = (await self.store.get_metadata_for_events([event_id]))[event_id]
if m.state_key is not None and m.rejection_reason is None:
state_ids = dict(state_ids)
state_ids[(m.event_type, m.state_key)] = event_id
return state_ids
async def get_state_at(
self,
room_id: str,
stream_position: StreamToken,
state_filter: Optional[StateFilter] = None,
await_full_state: bool = True,
) -> StateMap[str]:
"""Get the room state at a particular stream position
Args:
room_id: room for which to get state
stream_position: point at which to get state
state_filter: The state filter used to fetch state from the database.
await_full_state: if `True`, will block if we do not yet have complete state
at the last event in the room before `stream_position` and
`state_filter` is not satisfied by partial state. Defaults to `True`.
"""
# FIXME: This gets the state at the latest event before the stream ordering,
# which might not be the same as the "current state" of the room at the time
# of the stream token if there were multiple forward extremities at the time.
last_event_id = await self.store.get_last_event_in_room_before_stream_ordering(
room_id,
end_token=stream_position.room_key,
)
if last_event_id:
state = await self.get_state_after_event(
last_event_id,
state_filter=state_filter or StateFilter.all(),
await_full_state=await_full_state,
)
else:
# no events in this room - so presumably no state
state = {}
# (erikj) This should be rarely hit, but we've had some reports that
# we get more state down gappy syncs than we should, so let's add
# some logging.
logger.info(
"Failed to find any events in room %s at %s",
room_id,
stream_position.room_key,
)
return state
async def compute_summary( async def compute_summary(
self, self,
room_id: str, room_id: str,
@@ -1352,7 +1240,7 @@ class SyncHandler:
await_full_state = True await_full_state = True
lazy_load_members = False lazy_load_members = False
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at( state_at_timeline_end = await self.get_state_at(
room_id, room_id,
stream_position=end_token, stream_position=end_token,
state_filter=state_filter, state_filter=state_filter,
@@ -1436,7 +1324,7 @@ class SyncHandler:
# We need to make sure the first event in our batch points to the # We need to make sure the first event in our batch points to the
# last event in the previous batch. # last event in the previous batch.
last_event_id_prev_batch = ( last_event_id_prev_batch = (
await self.store.get_last_event_id_in_room_before_stream_ordering( await self.store.get_last_event_in_room_before_stream_ordering(
room_id, room_id,
end_token=since_token.room_key, end_token=since_token.room_key,
) )
@@ -1480,13 +1368,11 @@ class SyncHandler:
else: else:
# We can get here if the user has ignored the senders of all # We can get here if the user has ignored the senders of all
# the recent events. # the recent events.
state_at_timeline_start = ( state_at_timeline_start = await self.get_state_at(
await self._state_storage_controller.get_state_ids_at( room_id,
room_id, stream_position=end_token,
stream_position=end_token, state_filter=state_filter,
state_filter=state_filter, await_full_state=await_full_state,
await_full_state=await_full_state,
)
) )
if batch.limited: if batch.limited:
@@ -1504,14 +1390,14 @@ class SyncHandler:
# about them). # about them).
state_filter = StateFilter.all() state_filter = StateFilter.all()
state_at_previous_sync = await self._state_storage_controller.get_state_ids_at( state_at_previous_sync = await self.get_state_at(
room_id, room_id,
stream_position=since_token, stream_position=since_token,
state_filter=state_filter, state_filter=state_filter,
await_full_state=await_full_state, await_full_state=await_full_state,
) )
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at( state_at_timeline_end = await self.get_state_at(
room_id, room_id,
stream_position=end_token, stream_position=end_token,
state_filter=state_filter, state_filter=state_filter,
@@ -1805,96 +1691,6 @@ class SyncHandler:
next_batch=sync_result_builder.now_token, next_batch=sync_result_builder.now_token,
) )
async def generate_e2ee_sync_result(
self,
sync_config: SyncConfig,
since_token: Optional[StreamToken] = None,
) -> E2eeSyncResult:
"""
Generates the response body of a MSC3575 Sliding Sync `/sync/e2ee` result.
This is represented by a `E2eeSyncResult` struct, which is built from small
pieces using a `SyncResultBuilder`. The `sync_result_builder` is passed as a
mutable ("inout") parameter to various helper functions. These retrieve and
process the data which forms the sync body, often writing to the
`sync_result_builder` to store their output.
At the end, we transfer data from the `sync_result_builder` to a new `E2eeSyncResult`
instance to signify that the sync calculation is complete.
"""
user_id = sync_config.user.to_string()
app_service = self.store.get_app_service_by_user_id(user_id)
if app_service:
# We no longer support AS users using /sync directly.
# See https://github.com/matrix-org/matrix-doc/issues/1144
raise NotImplementedError()
sync_result_builder = await self.get_sync_result_builder(
sync_config,
since_token,
full_state=False,
)
# 1. Calculate `to_device` events
await self._generate_sync_entry_for_to_device(sync_result_builder)
# 2. Calculate `device_lists`
# Device list updates are sent if a since token is provided.
device_lists = DeviceListUpdates()
include_device_list_updates = bool(since_token and since_token.device_list_key)
if include_device_list_updates:
# Note that _generate_sync_entry_for_rooms sets sync_result_builder.joined, which
# is used in calculate_user_changes below.
#
# TODO: Running `_generate_sync_entry_for_rooms()` is a lot of work just to
# figure out the membership changes/derived info needed for
# `_generate_sync_entry_for_device_list()`. In the future, we should try to
# refactor this away.
(
newly_joined_rooms,
newly_left_rooms,
) = await self._generate_sync_entry_for_rooms(sync_result_builder)
# This uses the sync_result_builder.joined which is set in
# `_generate_sync_entry_for_rooms`, if that didn't find any joined
# rooms for some reason it is a no-op.
(
newly_joined_or_invited_or_knocked_users,
newly_left_users,
) = sync_result_builder.calculate_user_changes()
device_lists = await self._generate_sync_entry_for_device_list(
sync_result_builder,
newly_joined_rooms=newly_joined_rooms,
newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
newly_left_rooms=newly_left_rooms,
newly_left_users=newly_left_users,
)
# 3. Calculate `device_one_time_keys_count` and `device_unused_fallback_key_types`
device_id = sync_config.device_id
one_time_keys_count: JsonMapping = {}
unused_fallback_key_types: List[str] = []
if device_id:
# TODO: We should have a way to let clients differentiate between the states of:
# * no change in OTK count since the provided since token
# * the server has zero OTKs left for this device
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
one_time_keys_count = await self.store.count_e2e_one_time_keys(
user_id, device_id
)
unused_fallback_key_types = list(
await self.store.get_e2e_unused_fallback_key_types(user_id, device_id)
)
return E2eeSyncResult(
to_device=sync_result_builder.to_device,
device_lists=device_lists,
device_one_time_keys_count=one_time_keys_count,
device_unused_fallback_key_types=unused_fallback_key_types,
next_batch=sync_result_builder.now_token,
)
async def get_sync_result_builder( async def get_sync_result_builder(
self, self,
sync_config: SyncConfig, sync_config: SyncConfig,
@@ -1919,7 +1715,7 @@ class SyncHandler:
""" """
user_id = sync_config.user.to_string() user_id = sync_config.user.to_string()
# Note: we get the users room list *before* we get the `now_token`, this # Note: we get the users room list *before* we get the current token, this
# avoids checking back in history if rooms are joined after the token is fetched. # avoids checking back in history if rooms are joined after the token is fetched.
token_before_rooms = self.event_sources.get_current_token() token_before_rooms = self.event_sources.get_current_token()
mutable_joined_room_ids = set(await self.store.get_rooms_for_user(user_id)) mutable_joined_room_ids = set(await self.store.get_rooms_for_user(user_id))
@@ -1931,10 +1727,10 @@ class SyncHandler:
now_token = self.event_sources.get_current_token() now_token = self.event_sources.get_current_token()
log_kv({"now_token": now_token}) log_kv({"now_token": now_token})
# Since we fetched the users room list before calculating the `now_token` (see # Since we fetched the users room list before the token, there's a small window
# above), there's a small window during which membership events may have been # during which membership events may have been persisted, so we fetch these now
# persisted, so we fetch these now and modify the joined room list for any # and modify the joined room list for any changes between the get_rooms_for_user
# changes between the get_rooms_for_user call and the get_current_token call. # call and the get_current_token call.
membership_change_events = [] membership_change_events = []
if since_token: if since_token:
membership_change_events = await self.store.get_membership_changes_for_user( membership_change_events = await self.store.get_membership_changes_for_user(
@@ -1944,19 +1740,16 @@ class SyncHandler:
self.rooms_to_exclude_globally, self.rooms_to_exclude_globally,
) )
last_membership_change_by_room_id: Dict[str, EventBase] = {} mem_last_change_by_room_id: Dict[str, EventBase] = {}
for event in membership_change_events: for event in membership_change_events:
last_membership_change_by_room_id[event.room_id] = event mem_last_change_by_room_id[event.room_id] = event
# For the latest membership event in each room found, add/remove the room ID # For the latest membership event in each room found, add/remove the room ID
# from the joined room list accordingly. In this case we only care if the # from the joined room list accordingly. In this case we only care if the
# latest change is JOIN. # latest change is JOIN.
for room_id, event in last_membership_change_by_room_id.items(): for room_id, event in mem_last_change_by_room_id.items():
assert event.internal_metadata.stream_ordering assert event.internal_metadata.stream_ordering
# As a shortcut, skip any events that happened before we got our
# `get_rooms_for_user()` snapshot (any changes are already represented
# in that list).
if ( if (
event.internal_metadata.stream_ordering event.internal_metadata.stream_ordering
< token_before_rooms.room_key.stream < token_before_rooms.room_key.stream
@@ -2096,7 +1889,7 @@ class SyncHandler:
users_that_have_changed = ( users_that_have_changed = (
await self._device_handler.get_device_changes_in_shared_rooms( await self._device_handler.get_device_changes_in_shared_rooms(
user_id, user_id,
joined_room_ids, sync_result_builder.joined_room_ids,
from_token=since_token, from_token=since_token,
now_token=sync_result_builder.now_token, now_token=sync_result_builder.now_token,
) )
@@ -2270,11 +2063,7 @@ class SyncHandler:
user=user, user=user,
from_key=presence_key, from_key=presence_key,
is_guest=sync_config.is_guest, is_guest=sync_config.is_guest,
include_offline=( include_offline=include_offline,
True
if self.hs_config.server.presence_include_offline_users_on_sync
else include_offline
),
) )
assert presence_key assert presence_key
sync_result_builder.now_token = now_token.copy_and_replace( sync_result_builder.now_token = now_token.copy_and_replace(
@@ -2514,7 +2303,7 @@ class SyncHandler:
continue continue
if room_id in sync_result_builder.joined_room_ids or has_join: if room_id in sync_result_builder.joined_room_ids or has_join:
old_state_ids = await self._state_storage_controller.get_state_ids_at( old_state_ids = await self.get_state_at(
room_id, room_id,
since_token, since_token,
state_filter=StateFilter.from_types([(EventTypes.Member, user_id)]), state_filter=StateFilter.from_types([(EventTypes.Member, user_id)]),
@@ -2544,14 +2333,12 @@ class SyncHandler:
newly_left_rooms.append(room_id) newly_left_rooms.append(room_id)
else: else:
if not old_state_ids: if not old_state_ids:
old_state_ids = ( old_state_ids = await self.get_state_at(
await self._state_storage_controller.get_state_ids_at( room_id,
room_id, since_token,
since_token, state_filter=StateFilter.from_types(
state_filter=StateFilter.from_types( [(EventTypes.Member, user_id)]
[(EventTypes.Member, user_id)] ),
),
)
) )
old_mem_ev_id = old_state_ids.get( old_mem_ev_id = old_state_ids.get(
(EventTypes.Member, user_id), None (EventTypes.Member, user_id), None
@@ -2756,7 +2543,7 @@ class SyncHandler:
continue continue
leave_token = now_token.copy_and_replace( leave_token = now_token.copy_and_replace(
StreamKeyType.ROOM, RoomStreamToken(stream=event.event_pos.stream) StreamKeyType.ROOM, RoomStreamToken(stream=event.stream_ordering)
) )
room_entries.append( room_entries.append(
RoomSyncResultBuilder( RoomSyncResultBuilder(

View File

@@ -477,9 +477,9 @@ class TypingWriterHandler(FollowerTypingHandler):
rows = [] rows = []
for room_id in changed_rooms: for room_id in changed_rooms:
serial = self._room_serials.get(room_id) serial = self._room_serials[room_id]
if serial and last_id < serial <= current_id: if last_id < serial <= current_id:
typing = self._room_typing.get(room_id, set()) typing = self._room_typing[room_id]
rows.append((serial, [room_id, list(typing)])) rows.append((serial, [room_id, list(typing)]))
rows.sort() rows.sort()
@@ -565,12 +565,7 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]):
room_ids: Iterable[str], room_ids: Iterable[str],
is_guest: bool, is_guest: bool,
explicit_room_id: Optional[str] = None, explicit_room_id: Optional[str] = None,
to_key: Optional[int] = None,
) -> Tuple[List[JsonMapping], int]: ) -> Tuple[List[JsonMapping], int]:
"""
Find typing notifications for given rooms (> `from_token` and <= `to_token`)
"""
with Measure(self.clock, "typing.get_new_events"): with Measure(self.clock, "typing.get_new_events"):
from_key = int(from_key) from_key = int(from_key)
handler = self.get_typing_handler() handler = self.get_typing_handler()
@@ -579,9 +574,7 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]):
for room_id in room_ids: for room_id in room_ids:
if room_id not in handler._room_serials: if room_id not in handler._room_serials:
continue continue
if handler._room_serials[room_id] <= from_key or ( if handler._room_serials[room_id] <= from_key:
to_key is not None and handler._room_serials[room_id] > to_key
):
continue continue
events.append(self._make_event_for(room_id)) events.append(self._make_event_for(room_id))

View File

@@ -35,8 +35,6 @@ from typing import (
Union, Union,
) )
import attr
import multipart
import treq import treq
from canonicaljson import encode_canonical_json from canonicaljson import encode_canonical_json
from netaddr import AddrFormatError, IPAddress, IPSet from netaddr import AddrFormatError, IPAddress, IPSet
@@ -1008,130 +1006,6 @@ class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol):
self._maybe_fail() self._maybe_fail()
@attr.s(auto_attribs=True, slots=True)
class MultipartResponse:
"""
A small class to hold parsed values of a multipart response.
"""
json: bytes = b"{}"
length: Optional[int] = None
content_type: Optional[bytes] = None
disposition: Optional[bytes] = None
url: Optional[bytes] = None
class _MultipartParserProtocol(protocol.Protocol):
"""
Protocol to read and parse a MSC3916 multipart/mixed response
"""
transport: Optional[ITCPTransport] = None
def __init__(
self,
stream: ByteWriteable,
deferred: defer.Deferred,
boundary: str,
max_length: Optional[int],
) -> None:
self.stream = stream
self.deferred = deferred
self.boundary = boundary
self.max_length = max_length
self.parser = None
self.multipart_response = MultipartResponse()
self.has_redirect = False
self.in_json = False
self.json_done = False
self.file_length = 0
self.total_length = 0
self.in_disposition = False
self.in_content_type = False
def dataReceived(self, incoming_data: bytes) -> None:
if self.deferred.called:
return
# we don't have a parser yet, instantiate it
if not self.parser:
def on_header_field(data: bytes, start: int, end: int) -> None:
if data[start:end] == b"Location":
self.has_redirect = True
if data[start:end] == b"Content-Disposition":
self.in_disposition = True
if data[start:end] == b"Content-Type":
self.in_content_type = True
def on_header_value(data: bytes, start: int, end: int) -> None:
# the first header should be content-type for application/json
if not self.in_json and not self.json_done:
assert data[start:end] == b"application/json"
self.in_json = True
elif self.has_redirect:
self.multipart_response.url = data[start:end]
elif self.in_content_type:
self.multipart_response.content_type = data[start:end]
self.in_content_type = False
elif self.in_disposition:
self.multipart_response.disposition = data[start:end]
self.in_disposition = False
def on_part_data(data: bytes, start: int, end: int) -> None:
# we've seen json header but haven't written the json data
if self.in_json and not self.json_done:
self.multipart_response.json = data[start:end]
self.json_done = True
# we have a redirect header rather than a file, and have already captured it
elif self.has_redirect:
return
# otherwise we are in the file part
else:
logger.info("Writing multipart file data to stream")
try:
self.stream.write(data[start:end])
except Exception as e:
logger.warning(
f"Exception encountered writing file data to stream: {e}"
)
self.deferred.errback()
self.file_length += end - start
callbacks = {
"on_header_field": on_header_field,
"on_header_value": on_header_value,
"on_part_data": on_part_data,
}
self.parser = multipart.MultipartParser(self.boundary, callbacks)
self.total_length += len(incoming_data)
if self.max_length is not None and self.total_length >= self.max_length:
self.deferred.errback(BodyExceededMaxSize())
# Close the connection (forcefully) since all the data will get
# discarded anyway.
assert self.transport is not None
self.transport.abortConnection()
try:
self.parser.write(incoming_data) # type: ignore[attr-defined]
except Exception as e:
logger.warning(f"Exception writing to multipart parser: {e}")
self.deferred.errback()
return
def connectionLost(self, reason: Failure = connectionDone) -> None:
# If the maximum size was already exceeded, there's nothing to do.
if self.deferred.called:
return
if reason.check(ResponseDone):
self.multipart_response.length = self.file_length
self.deferred.callback(self.multipart_response)
else:
self.deferred.errback(reason)
class _ReadBodyWithMaxSizeProtocol(protocol.Protocol): class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
"""A protocol which reads body to a stream, erroring if the body exceeds a maximum size.""" """A protocol which reads body to a stream, erroring if the body exceeds a maximum size."""
@@ -1217,32 +1091,6 @@ def read_body_with_max_size(
return d return d
def read_multipart_response(
response: IResponse, stream: ByteWriteable, boundary: str, max_length: Optional[int]
) -> "defer.Deferred[MultipartResponse]":
"""
Reads a MSC3916 multipart/mixed response and parses it, reading the file part (if it contains one) into
the stream passed in and returning a deferred resolving to a MultipartResponse
Args:
response: The HTTP response to read from.
stream: The file-object to write to.
boundary: the multipart/mixed boundary string
max_length: maximum allowable length of the response
"""
d: defer.Deferred[MultipartResponse] = defer.Deferred()
# If the Content-Length header gives a size larger than the maximum allowed
# size, do not bother downloading the body.
if max_length is not None and response.length != UNKNOWN_LENGTH:
if response.length > max_length:
response.deliverBody(_DiscardBodyWithMaxSizeProtocol(d))
return d
response.deliverBody(_MultipartParserProtocol(stream, d, boundary, max_length))
return d
def encode_query_args(args: Optional[QueryParams]) -> bytes: def encode_query_args(args: Optional[QueryParams]) -> bytes:
""" """
Encodes a map of query arguments to bytes which can be appended to a URL. Encodes a map of query arguments to bytes which can be appended to a URL.

View File

@@ -57,7 +57,7 @@ from twisted.internet.interfaces import IReactorTime
from twisted.internet.task import Cooperator from twisted.internet.task import Cooperator
from twisted.web.client import ResponseFailed from twisted.web.client import ResponseFailed
from twisted.web.http_headers import Headers from twisted.web.http_headers import Headers
from twisted.web.iweb import UNKNOWN_LENGTH, IAgent, IBodyProducer, IResponse from twisted.web.iweb import IAgent, IBodyProducer, IResponse
import synapse.metrics import synapse.metrics
import synapse.util.retryutils import synapse.util.retryutils
@@ -68,18 +68,15 @@ from synapse.api.errors import (
RequestSendFailed, RequestSendFailed,
SynapseError, SynapseError,
) )
from synapse.api.ratelimiting import Ratelimiter
from synapse.crypto.context_factory import FederationPolicyForHTTPS from synapse.crypto.context_factory import FederationPolicyForHTTPS
from synapse.http import QuieterFileBodyProducer from synapse.http import QuieterFileBodyProducer
from synapse.http.client import ( from synapse.http.client import (
BlocklistingAgentWrapper, BlocklistingAgentWrapper,
BodyExceededMaxSize, BodyExceededMaxSize,
ByteWriteable, ByteWriteable,
SimpleHttpClient,
_make_scheduler, _make_scheduler,
encode_query_args, encode_query_args,
read_body_with_max_size, read_body_with_max_size,
read_multipart_response,
) )
from synapse.http.connectproxyclient import BearerProxyCredentials from synapse.http.connectproxyclient import BearerProxyCredentials
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
@@ -90,7 +87,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import set_tag, start_active_span, tags from synapse.logging.opentracing import set_tag, start_active_span, tags
from synapse.types import JsonDict from synapse.types import JsonDict
from synapse.util import json_decoder from synapse.util import json_decoder
from synapse.util.async_helpers import AwakenableSleeper, Linearizer, timeout_deferred from synapse.util.async_helpers import AwakenableSleeper, timeout_deferred
from synapse.util.metrics import Measure from synapse.util.metrics import Measure
from synapse.util.stringutils import parse_and_validate_server_name from synapse.util.stringutils import parse_and_validate_server_name
@@ -468,15 +465,6 @@ class MatrixFederationHttpClient:
self._sleeper = AwakenableSleeper(self.reactor) self._sleeper = AwakenableSleeper(self.reactor)
self._simple_http_client = SimpleHttpClient(
hs,
ip_blocklist=hs.config.server.federation_ip_range_blocklist,
ip_allowlist=hs.config.server.federation_ip_range_allowlist,
use_proxy=True,
)
self.remote_download_linearizer = Linearizer("remote_download_linearizer", 6)
def wake_destination(self, destination: str) -> None: def wake_destination(self, destination: str) -> None:
"""Called when the remote server may have come back online.""" """Called when the remote server may have come back online."""
@@ -1423,11 +1411,9 @@ class MatrixFederationHttpClient:
destination: str, destination: str,
path: str, path: str,
output_stream: BinaryIO, output_stream: BinaryIO,
download_ratelimiter: Ratelimiter,
ip_address: str,
max_size: int,
args: Optional[QueryParams] = None, args: Optional[QueryParams] = None,
retry_on_dns_fail: bool = True, retry_on_dns_fail: bool = True,
max_size: Optional[int] = None,
ignore_backoff: bool = False, ignore_backoff: bool = False,
follow_redirects: bool = False, follow_redirects: bool = False,
) -> Tuple[int, Dict[bytes, List[bytes]]]: ) -> Tuple[int, Dict[bytes, List[bytes]]]:
@@ -1436,10 +1422,6 @@ class MatrixFederationHttpClient:
destination: The remote server to send the HTTP request to. destination: The remote server to send the HTTP request to.
path: The HTTP path to GET. path: The HTTP path to GET.
output_stream: File to write the response body to. output_stream: File to write the response body to.
download_ratelimiter: a ratelimiter to limit remote media downloads, keyed to
requester IP
ip_address: IP address of the requester
max_size: maximum allowable size in bytes of the file
args: Optional dictionary used to create the query string. args: Optional dictionary used to create the query string.
ignore_backoff: true to ignore the historical backoff data ignore_backoff: true to ignore the historical backoff data
and try the request anyway. and try the request anyway.
@@ -1459,27 +1441,11 @@ class MatrixFederationHttpClient:
federation whitelist federation whitelist
RequestSendFailed: If there were problems connecting to the RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc. remote, due to e.g. DNS failures, connection timeouts etc.
SynapseError: If the requested file exceeds ratelimits
""" """
request = MatrixFederationRequest( request = MatrixFederationRequest(
method="GET", destination=destination, path=path, query=args method="GET", destination=destination, path=path, query=args
) )
# check for a minimum balance of 1MiB in ratelimiter before initiating request
send_req, _ = await download_ratelimiter.can_do_action(
requester=None, key=ip_address, n_actions=1048576, update=False
)
if not send_req:
msg = "Requested file size exceeds ratelimits"
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
)
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
response = await self._send_request( response = await self._send_request(
request, request,
retry_on_dns_fail=retry_on_dns_fail, retry_on_dns_fail=retry_on_dns_fail,
@@ -1488,46 +1454,13 @@ class MatrixFederationHttpClient:
) )
headers = dict(response.headers.getAllRawHeaders()) headers = dict(response.headers.getAllRawHeaders())
expected_size = response.length
if expected_size == UNKNOWN_LENGTH:
expected_size = max_size
else:
if int(expected_size) > max_size:
msg = "Requested file is too large > %r bytes" % (max_size,)
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
)
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
read_body, _ = await download_ratelimiter.can_do_action(
requester=None,
key=ip_address,
n_actions=expected_size,
)
if not read_body:
msg = "Requested file size exceeds ratelimits"
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
)
raise SynapseError(
HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED
)
try: try:
async with self.remote_download_linearizer.queue(ip_address): d = read_body_with_max_size(response, output_stream, max_size)
# add a byte of headroom to max size as function errs at >= d.addTimeout(self.default_timeout_seconds, self.reactor)
d = read_body_with_max_size(response, output_stream, expected_size + 1) length = await make_deferred_yieldable(d)
d.addTimeout(self.default_timeout_seconds, self.reactor)
length = await make_deferred_yieldable(d)
except BodyExceededMaxSize: except BodyExceededMaxSize:
msg = "Requested file is too large > %r bytes" % (expected_size,) msg = "Requested file is too large > %r bytes" % (max_size,)
logger.warning( logger.warning(
"{%s} [%s] %s", "{%s} [%s] %s",
request.txn_id, request.txn_id,
@@ -1571,214 +1504,8 @@ class MatrixFederationHttpClient:
request.method, request.method,
request.uri.decode("ascii"), request.uri.decode("ascii"),
) )
# if we didn't know the length upfront, decrement the actual size from ratelimiter
if response.length == UNKNOWN_LENGTH:
download_ratelimiter.record_action(
requester=None, key=ip_address, n_actions=length
)
return length, headers return length, headers
async def federation_get_file(
self,
destination: str,
path: str,
output_stream: BinaryIO,
download_ratelimiter: Ratelimiter,
ip_address: str,
max_size: int,
args: Optional[QueryParams] = None,
retry_on_dns_fail: bool = True,
ignore_backoff: bool = False,
) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
"""GETs a file from a given homeserver over the federation /download endpoint
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path to GET.
output_stream: File to write the response body to.
download_ratelimiter: a ratelimiter to limit remote media downloads, keyed to
requester IP
ip_address: IP address of the requester
max_size: maximum allowable size in bytes of the file
args: Optional dictionary used to create the query string.
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
Returns:
Resolves to an (int, dict, bytes) tuple of
the file length, a dict of the response headers, and the file json
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
SynapseError: If the requested file exceeds ratelimits or the response from the
remote server is not a multipart response
AssertionError: if the resolved multipart response's length is None
"""
request = MatrixFederationRequest(
method="GET", destination=destination, path=path, query=args
)
# check for a minimum balance of 1MiB in ratelimiter before initiating request
send_req, _ = await download_ratelimiter.can_do_action(
requester=None, key=ip_address, n_actions=1048576, update=False
)
if not send_req:
msg = "Requested file size exceeds ratelimits"
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
)
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
response = await self._send_request(
request,
retry_on_dns_fail=retry_on_dns_fail,
ignore_backoff=ignore_backoff,
)
headers = dict(response.headers.getAllRawHeaders())
expected_size = response.length
if expected_size == UNKNOWN_LENGTH:
expected_size = max_size
else:
if int(expected_size) > max_size:
msg = "Requested file is too large > %r bytes" % (max_size,)
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
)
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
read_body, _ = await download_ratelimiter.can_do_action(
requester=None,
key=ip_address,
n_actions=expected_size,
)
if not read_body:
msg = "Requested file size exceeds ratelimits"
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
)
raise SynapseError(
HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED
)
# this should be a multipart/mixed response with the boundary string in the header
try:
raw_content_type = headers.get(b"Content-Type")
assert raw_content_type is not None
content_type = raw_content_type[0].decode("UTF-8")
content_type_parts = content_type.split("boundary=")
boundary = content_type_parts[1]
except Exception:
msg = "Remote response is malformed: expected Content-Type of multipart/mixed with a boundary present."
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
)
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg)
try:
async with self.remote_download_linearizer.queue(ip_address):
# add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >=
deferred = read_multipart_response(
response, output_stream, boundary, expected_size + 1
)
deferred.addTimeout(self.default_timeout_seconds, self.reactor)
except BodyExceededMaxSize:
msg = "Requested file is too large > %r bytes" % (expected_size,)
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
)
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
except defer.TimeoutError as e:
logger.warning(
"{%s} [%s] Timed out reading response - %s %s",
request.txn_id,
request.destination,
request.method,
request.uri.decode("ascii"),
)
raise RequestSendFailed(e, can_retry=True) from e
except ResponseFailed as e:
logger.warning(
"{%s} [%s] Failed to read response - %s %s",
request.txn_id,
request.destination,
request.method,
request.uri.decode("ascii"),
)
raise RequestSendFailed(e, can_retry=True) from e
except Exception as e:
logger.warning(
"{%s} [%s] Error reading response: %s",
request.txn_id,
request.destination,
e,
)
raise
multipart_response = await make_deferred_yieldable(deferred)
if not multipart_response.url:
assert multipart_response.length is not None
length = multipart_response.length
headers[b"Content-Type"] = [multipart_response.content_type]
headers[b"Content-Disposition"] = [multipart_response.disposition]
# the response contained a redirect url to download the file from
else:
str_url = multipart_response.url.decode("utf-8")
logger.info(
"{%s} [%s] File download redirected, now downloading from: %s",
request.txn_id,
request.destination,
str_url,
)
length, headers, _, _ = await self._simple_http_client.get_file(
str_url, output_stream, expected_size
)
logger.info(
"{%s} [%s] Completed: %d %s [%d bytes] %s %s",
request.txn_id,
request.destination,
response.code,
response.phrase.decode("ascii", errors="replace"),
length,
request.method,
request.uri.decode("ascii"),
)
# if we didn't know the length upfront, decrement the actual size from ratelimiter
if response.length == UNKNOWN_LENGTH:
download_ratelimiter.record_action(
requester=None, key=ip_address, n_actions=length
)
return length, headers, multipart_response.json
def _flatten_response_never_received(e: BaseException) -> str: def _flatten_response_never_received(e: BaseException) -> str:
if hasattr(e, "reasons"): if hasattr(e, "reasons"):

View File

@@ -62,15 +62,6 @@ HOP_BY_HOP_HEADERS = {
"Upgrade", "Upgrade",
} }
if hasattr(Headers, "_canonicalNameCaps"):
# Twisted < 24.7.0rc1
_canonicalHeaderName = Headers()._canonicalNameCaps # type: ignore[attr-defined]
else:
# Twisted >= 24.7.0rc1
# But note that `_encodeName` still exists on prior versions,
# it just encodes differently
_canonicalHeaderName = Headers()._encodeName
def parse_connection_header_value( def parse_connection_header_value(
connection_header_value: Optional[bytes], connection_header_value: Optional[bytes],
@@ -94,10 +85,11 @@ def parse_connection_header_value(
The set of header names that should not be copied over from the remote response. The set of header names that should not be copied over from the remote response.
The keys are capitalized in canonical capitalization. The keys are capitalized in canonical capitalization.
""" """
headers = Headers()
extra_headers_to_remove: Set[str] = set() extra_headers_to_remove: Set[str] = set()
if connection_header_value: if connection_header_value:
extra_headers_to_remove = { extra_headers_to_remove = {
_canonicalHeaderName(connection_option.strip()).decode("ascii") headers._canonicalNameCaps(connection_option.strip()).decode("ascii")
for connection_option in connection_header_value.split(b",") for connection_option in connection_header_value.split(b",")
} }

View File

@@ -74,7 +74,6 @@ from synapse.api.errors import (
from synapse.config.homeserver import HomeServerConfig from synapse.config.homeserver import HomeServerConfig
from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background
from synapse.logging.opentracing import active_span, start_active_span, trace_servlet from synapse.logging.opentracing import active_span, start_active_span, trace_servlet
from synapse.types import ISynapseReactor
from synapse.util import json_encoder from synapse.util import json_encoder
from synapse.util.caches import intern_dict from synapse.util.caches import intern_dict
from synapse.util.cancellation import is_function_cancellable from synapse.util.cancellation import is_function_cancellable
@@ -869,8 +868,7 @@ async def _async_write_json_to_request_in_thread(
with start_active_span("encode_json_response"): with start_active_span("encode_json_response"):
span = active_span() span = active_span()
reactor: ISynapseReactor = request.reactor # type: ignore json_str = await defer_to_thread(request.reactor, encode, span)
json_str = await defer_to_thread(reactor, encode, span)
_write_bytes_to_request(request, json_str) _write_bytes_to_request(request, json_str)

Some files were not shown because too many files have changed in this diff Show More