mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
155 Commits
erikj/dock
...
travis/mat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24edd4cd2c | ||
|
|
005b82f4bc | ||
|
|
20de685a4b | ||
|
|
8e9e6f1a0a | ||
|
|
57538eb4d9 | ||
|
|
45b35f8eae | ||
|
|
2ec257d608 | ||
|
|
daeaeb2a7b | ||
|
|
7786ae7e1c | ||
|
|
22aeb78b77 | ||
|
|
a9d2e40ea4 | ||
|
|
0c4f7a3d16 | ||
|
|
75b788f49f | ||
|
|
7be03d854b | ||
|
|
fa91655805 | ||
|
|
0d2b75cf92 | ||
|
|
ccce858c4a | ||
|
|
99c107920d | ||
|
|
1609855ff8 | ||
|
|
8f890447b0 | ||
|
|
b905ae27ca | ||
|
|
1ce59d7ba0 | ||
|
|
b3b793786c | ||
|
|
9c8f1a6d41 | ||
|
|
5b5280e3e5 | ||
|
|
635e3927d2 | ||
|
|
a1b8897668 | ||
|
|
76b9f14c0a | ||
|
|
1eccbfb82f | ||
|
|
2f5a77efae | ||
|
|
b11f5c984b | ||
|
|
27756c9fdf | ||
|
|
cc5e5893fe | ||
|
|
7c169f4d2c | ||
|
|
f75da9cc53 | ||
|
|
75c19bf57a | ||
|
|
b924a8e1a9 | ||
|
|
a8dcd686fb | ||
|
|
315b8d2032 | ||
|
|
9f47513458 | ||
|
|
ef7fbdfebd | ||
|
|
9cf0ef9c70 | ||
|
|
a023538822 | ||
|
|
f79dbd0f61 | ||
|
|
c89fea3fd1 | ||
|
|
554a92601a | ||
|
|
a98cb87bee | ||
|
|
6e8af83193 | ||
|
|
805e6c9a8f | ||
|
|
3c61ddbbc9 | ||
|
|
ae4c236a6d | ||
|
|
930a64b6c1 | ||
|
|
7a11c0ac4f | ||
|
|
cf711ac03c | ||
|
|
700d2cc4a0 | ||
|
|
1e74b50dc6 | ||
|
|
7c2d8f1f01 | ||
|
|
118b734081 | ||
|
|
7a6186b888 | ||
|
|
452a59f887 | ||
|
|
adeedb7b7c | ||
|
|
7c5fb13f7b | ||
|
|
f8d57ce656 | ||
|
|
13ed84c573 | ||
|
|
4243c1f074 | ||
|
|
3239b7459c | ||
|
|
c99203d98c | ||
|
|
9104a9f0d0 | ||
|
|
a412a5829d | ||
|
|
7ef89b985d | ||
|
|
bdf82efea5 | ||
|
|
afaf2d9388 | ||
|
|
199223062a | ||
|
|
97c3d98816 | ||
|
|
fa3adc896a | ||
|
|
79767a1108 | ||
|
|
4af654f0da | ||
|
|
1c7d85fdfe | ||
|
|
5a65e8a0d1 | ||
|
|
088992a484 | ||
|
|
d17d931a53 | ||
|
|
334123f0cd | ||
|
|
d8e81f67eb | ||
|
|
19a3d5b606 | ||
|
|
52813a8d94 | ||
|
|
a5485437cf | ||
|
|
e5b8a3e37f | ||
|
|
e88332b5f4 | ||
|
|
edfb7aad3a | ||
|
|
f983a77ab0 | ||
|
|
12d7303707 | ||
|
|
a3cb244755 | ||
|
|
3aae60f17b | ||
|
|
2c36a679ae | ||
|
|
c12ee0d5ba | ||
|
|
8aaff851b1 | ||
|
|
8c58eb7f17 | ||
|
|
ebdce69f6a | ||
|
|
c6eb99c878 | ||
|
|
5db3eec5bc | ||
|
|
f1c4dfb08b | ||
|
|
0edf1cacf7 | ||
|
|
d0f90bd04e | ||
|
|
0248ed70a9 | ||
|
|
e6816babf6 | ||
|
|
a8069e9739 | ||
|
|
863578bfcf | ||
|
|
9e59d18022 | ||
|
|
491365f199 | ||
|
|
dad1559721 | ||
|
|
8c4937b216 | ||
|
|
b84e31375b | ||
|
|
06953bc193 | ||
|
|
265ee88f34 | ||
|
|
ab94bce02c | ||
|
|
17d6c28285 | ||
|
|
4a7c58642c | ||
|
|
ce9385819b | ||
|
|
a963f579de | ||
|
|
3f06bbc0ac | ||
|
|
fcbc79bb87 | ||
|
|
aabf577166 | ||
|
|
7d8f0ef351 | ||
|
|
eab0b548e4 | ||
|
|
81cef38d4b | ||
|
|
e2f8476044 | ||
|
|
18c1196893 | ||
|
|
8a3270075b | ||
|
|
f458dff16d | ||
|
|
6b709c512d | ||
|
|
5c2a837e3c | ||
|
|
64f5a4a353 | ||
|
|
7dd14fadb1 | ||
|
|
5624c8b961 | ||
|
|
4e3868dc46 | ||
|
|
d16910ca02 | ||
|
|
225f378ffa | ||
|
|
8bd9ff0783 | ||
|
|
466f344547 | ||
|
|
726006cdf2 | ||
|
|
967b6948b0 | ||
|
|
d7198dfb95 | ||
|
|
94ef2f4f5d | ||
|
|
bb5a692946 | ||
|
|
ad179b0136 | ||
|
|
5147ce294a | ||
|
|
f35bc08d39 | ||
|
|
f2616edb73 | ||
|
|
86a2a0258f | ||
|
|
0893ee9af8 | ||
|
|
887f773472 | ||
|
|
9edb725ebc | ||
|
|
c97251d5ba | ||
|
|
7e2412265d | ||
|
|
7ef00b7628 |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -2,4 +2,4 @@
|
||||
(using a matrix.org account if necessary). We do not use GitHub issues for
|
||||
support.
|
||||
|
||||
**If you want to report a security issue** please see https://matrix.org/security-disclosure-policy/
|
||||
**If you want to report a security issue** please see https://element.io/security/security-disclosure-policy
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -7,7 +7,7 @@ body:
|
||||
**THIS IS NOT A SUPPORT CHANNEL!**
|
||||
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
|
||||
|
||||
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
|
||||
If you want to report a security issue, please see https://element.io/security/security-disclosure-policy
|
||||
|
||||
This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.
|
||||
|
||||
|
||||
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -72,7 +72,7 @@ jobs:
|
||||
|
||||
- name: Build and push all platforms
|
||||
id: build-and-push
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
push: true
|
||||
labels: |
|
||||
|
||||
2
.github/workflows/docs-pr-netlify.yaml
vendored
2
.github/workflows/docs-pr-netlify.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@09f2f74827fd3a8607589e5ad7f9398816f540fe # v3.1.4
|
||||
uses: dawidd6/action-download-artifact@bf251b5aa9c2f7eeb574a96ee720e24f801b7c11 # v6
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
|
||||
8
.github/workflows/release-artifacts.yml
vendored
8
.github/workflows/release-artifacts.yml
vendored
@@ -102,7 +102,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-11]
|
||||
os: [ubuntu-20.04, macos-12]
|
||||
arch: [x86_64, aarch64]
|
||||
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
||||
# It is not read by the rest of the workflow.
|
||||
@@ -112,9 +112,9 @@ jobs:
|
||||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-11"
|
||||
os: "macos-12"
|
||||
# Don't build aarch64 wheels on mac.
|
||||
- os: "macos-11"
|
||||
- os: "macos-12"
|
||||
arch: aarch64
|
||||
# Don't build aarch64 wheels on PR CI.
|
||||
- is_pr: true
|
||||
@@ -130,7 +130,7 @@ jobs:
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: python -m pip install cibuildwheel==2.16.2
|
||||
run: python -m pip install cibuildwheel==2.19.1
|
||||
|
||||
- name: Set up QEMU to emulate aarch64
|
||||
if: matrix.arch == 'aarch64'
|
||||
|
||||
25
.github/workflows/tests.yml
vendored
25
.github/workflows/tests.yml
vendored
@@ -21,6 +21,7 @@ jobs:
|
||||
trial: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.trial }}
|
||||
integration: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.integration }}
|
||||
linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }}
|
||||
linting_readme: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting_readme }}
|
||||
steps:
|
||||
- uses: dorny/paths-filter@v3
|
||||
id: filter
|
||||
@@ -73,6 +74,9 @@ jobs:
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/tests.yml'
|
||||
|
||||
linting_readme:
|
||||
- 'README.rst'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
@@ -135,7 +139,7 @@ jobs:
|
||||
|
||||
- name: Semantic checks (ruff)
|
||||
# --quiet suppresses the update check.
|
||||
run: poetry run ruff --quiet .
|
||||
run: poetry run ruff check --quiet .
|
||||
|
||||
lint-mypy:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -269,6 +273,20 @@ jobs:
|
||||
|
||||
- run: cargo fmt --check
|
||||
|
||||
# This is to detect issues with the rst file, which can otherwise cause issues
|
||||
# when uploading packages to PyPi.
|
||||
lint-readme:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install rstcheck"
|
||||
- run: "rstcheck --report-level=WARNING README.rst"
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
@@ -284,6 +302,7 @@ jobs:
|
||||
- lint-clippy
|
||||
- lint-clippy-nightly
|
||||
- lint-rustfmt
|
||||
- lint-readme
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
@@ -301,6 +320,7 @@ jobs:
|
||||
lint-clippy
|
||||
lint-clippy-nightly
|
||||
lint-rustfmt
|
||||
lint-readme
|
||||
|
||||
|
||||
calculate-test-jobs:
|
||||
@@ -479,6 +499,9 @@ jobs:
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
# If this is a pull request to a release branch, use that branch as default branch for sytest, else use develop
|
||||
# This works because the release script always create a branch on the sytest repo with the same name as the release branch
|
||||
SYTEST_DEFAULT_BRANCH: ${{ startsWith(github.base_ref, 'release-') && github.base_ref || 'develop' }}
|
||||
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.job.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') || '' }}
|
||||
|
||||
205
CHANGES.md
205
CHANGES.md
@@ -1,3 +1,208 @@
|
||||
# Synapse 1.110.0 (2024-07-03)
|
||||
|
||||
No significant changes since 1.110.0rc3.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc3 (2024-07-02)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug where `/sync` requests could get blocked indefinitely after an upgrade from Synapse versions before v1.109.0. ([\#17386](https://github.com/element-hq/synapse/issues/17386), [\#17391](https://github.com/element-hq/synapse/issues/17391))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Limit size of presence EDUs to 50 entries. ([\#17371](https://github.com/element-hq/synapse/issues/17371))
|
||||
- Fix building debian package for debian sid. ([\#17389](https://github.com/element-hq/synapse/issues/17389))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc2 (2024-06-26)
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc1 (2024-06-26)
|
||||
|
||||
### Features
|
||||
|
||||
- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17187](https://github.com/element-hq/synapse/issues/17187))
|
||||
- Add experimental support for [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension. ([\#17255](https://github.com/element-hq/synapse/issues/17255))
|
||||
- Improve ratelimiting in Synapse. ([\#17256](https://github.com/element-hq/synapse/issues/17256))
|
||||
- Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API. ([\#17270](https://github.com/element-hq/synapse/issues/17270), [\#17296](https://github.com/element-hq/synapse/issues/17296))
|
||||
- Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api). ([\#17276](https://github.com/element-hq/synapse/issues/17276))
|
||||
- Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17277](https://github.com/element-hq/synapse/issues/17277))
|
||||
- Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17281](https://github.com/element-hq/synapse/issues/17281))
|
||||
- Include user membership in events served to clients, per [MSC4115](https://github.com/matrix-org/matrix-spec-proposals/pull/4115). ([\#17282](https://github.com/element-hq/synapse/issues/17282))
|
||||
- Do not require user-interactive authentication for uploading cross-signing keys for the first time, per [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967). ([\#17284](https://github.com/element-hq/synapse/issues/17284))
|
||||
- Add `stream_ordering` sort to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17293](https://github.com/element-hq/synapse/issues/17293))
|
||||
- `register_new_matrix_user` now supports a --password-file flag, which
|
||||
is useful for scripting. ([\#17294](https://github.com/element-hq/synapse/issues/17294))
|
||||
- `register_new_matrix_user` now supports a --exists-ok flag to allow registration of users that already exist in the database.
|
||||
This is useful for scripts that bootstrap user accounts with initial passwords. ([\#17304](https://github.com/element-hq/synapse/issues/17304))
|
||||
- Add support for via query parameter from [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156). ([\#17322](https://github.com/element-hq/synapse/issues/17322))
|
||||
- Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17335](https://github.com/element-hq/synapse/issues/17335))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix searching for users with their exact localpart whose ID includes a hyphen. ([\#17254](https://github.com/element-hq/synapse/issues/17254))
|
||||
- Fix wrong retention policy being used when filtering events. ([\#17272](https://github.com/element-hq/synapse/issues/17272))
|
||||
- Fix bug where OTKs were not always included in `/sync` response when using workers. ([\#17275](https://github.com/element-hq/synapse/issues/17275))
|
||||
- Fix a long-standing bug where an invalid 'from' parameter to [`/notifications`](https://spec.matrix.org/v1.10/client-server-api/#get_matrixclientv3notifications) would result in an Internal Server Error. ([\#17283](https://github.com/element-hq/synapse/issues/17283))
|
||||
- Fix edge case in `/sync` returning the wrong the state when using sharded event persisters. ([\#17295](https://github.com/element-hq/synapse/issues/17295))
|
||||
- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17301](https://github.com/element-hq/synapse/issues/17301))
|
||||
- Fix email notification subject when invited to a space. ([\#17336](https://github.com/element-hq/synapse/issues/17336))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Add missing quotes for example for `exclude_rooms_from_sync`. ([\#17308](https://github.com/element-hq/synapse/issues/17308))
|
||||
- Update header in the README to visually fix the the auto-generated table of contents. ([\#17329](https://github.com/element-hq/synapse/issues/17329))
|
||||
- Fix stale references to the Foundation's Security Disclosure Policy. ([\#17341](https://github.com/element-hq/synapse/issues/17341))
|
||||
- Add default values for `rc_invites.per_issuer` to docs. ([\#17347](https://github.com/element-hq/synapse/issues/17347))
|
||||
- Fix an error in the docs for `search_all_users` parameter under `user_directory`. ([\#17348](https://github.com/element-hq/synapse/issues/17348))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Remove unused `expire_access_token` option in the Synapse Docker config file. Contributed by @AaronDewes. ([\#17198](https://github.com/element-hq/synapse/issues/17198))
|
||||
- Use fully-qualified `PersistedEventPosition` when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation. ([\#17265](https://github.com/element-hq/synapse/issues/17265))
|
||||
- Add debug logging for when room keys are uploaded, including whether they are replacing other room keys. ([\#17266](https://github.com/element-hq/synapse/issues/17266))
|
||||
- Handle OTK uploads off master. ([\#17271](https://github.com/element-hq/synapse/issues/17271))
|
||||
- Don't try and resync devices for remote users whose servers are marked as down. ([\#17273](https://github.com/element-hq/synapse/issues/17273))
|
||||
- Re-organize Pydantic models and types used in handlers. ([\#17279](https://github.com/element-hq/synapse/issues/17279))
|
||||
- Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`. ([\#17300](https://github.com/element-hq/synapse/issues/17300))
|
||||
- Update the README with Element branding, improve headers and fix the #synapse:matrix.org support room link rendering. ([\#17324](https://github.com/element-hq/synapse/issues/17324))
|
||||
- Change path of the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync implementation to `/org.matrix.simplified_msc3575/sync` since our simplified API is slightly incompatible with what's in the current MSC. ([\#17331](https://github.com/element-hq/synapse/issues/17331))
|
||||
- Handle device lists notifications for large accounts more efficiently in worker mode. ([\#17333](https://github.com/element-hq/synapse/issues/17333), [\#17358](https://github.com/element-hq/synapse/issues/17358))
|
||||
- Do not block event sending/receiving while calculating large event auth chains. ([\#17338](https://github.com/element-hq/synapse/issues/17338))
|
||||
- Tidy up `parse_integer` docs and call sites to reflect the fact that they require non-negative integers by default, and bring `parse_integer_from_args` default in alignment. Contributed by Denis Kasak (@dkasak). ([\#17339](https://github.com/element-hq/synapse/issues/17339))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump authlib from 1.3.0 to 1.3.1. ([\#17343](https://github.com/element-hq/synapse/issues/17343))
|
||||
* Bump dawidd6/action-download-artifact from 3.1.4 to 5. ([\#17289](https://github.com/element-hq/synapse/issues/17289))
|
||||
* Bump dawidd6/action-download-artifact from 5 to 6. ([\#17313](https://github.com/element-hq/synapse/issues/17313))
|
||||
* Bump docker/build-push-action from 5 to 6. ([\#17312](https://github.com/element-hq/synapse/issues/17312))
|
||||
* Bump jinja2 from 3.1.3 to 3.1.4. ([\#17287](https://github.com/element-hq/synapse/issues/17287))
|
||||
* Bump lazy_static from 1.4.0 to 1.5.0. ([\#17355](https://github.com/element-hq/synapse/issues/17355))
|
||||
* Bump msgpack from 1.0.7 to 1.0.8. ([\#17317](https://github.com/element-hq/synapse/issues/17317))
|
||||
* Bump netaddr from 1.2.1 to 1.3.0. ([\#17353](https://github.com/element-hq/synapse/issues/17353))
|
||||
* Bump packaging from 24.0 to 24.1. ([\#17352](https://github.com/element-hq/synapse/issues/17352))
|
||||
* Bump phonenumbers from 8.13.37 to 8.13.39. ([\#17315](https://github.com/element-hq/synapse/issues/17315))
|
||||
* Bump regex from 1.10.4 to 1.10.5. ([\#17290](https://github.com/element-hq/synapse/issues/17290))
|
||||
* Bump requests from 2.31.0 to 2.32.2. ([\#17345](https://github.com/element-hq/synapse/issues/17345))
|
||||
* Bump sentry-sdk from 2.1.1 to 2.3.1. ([\#17263](https://github.com/element-hq/synapse/issues/17263))
|
||||
* Bump sentry-sdk from 2.3.1 to 2.6.0. ([\#17351](https://github.com/element-hq/synapse/issues/17351))
|
||||
* Bump tornado from 6.4 to 6.4.1. ([\#17344](https://github.com/element-hq/synapse/issues/17344))
|
||||
* Bump mypy from 1.8.0 to 1.9.0. ([\#17297](https://github.com/element-hq/synapse/issues/17297))
|
||||
* Bump types-jsonschema from 4.21.0.20240311 to 4.22.0.20240610. ([\#17288](https://github.com/element-hq/synapse/issues/17288))
|
||||
* Bump types-netaddr from 1.2.0.20240219 to 1.3.0.20240530. ([\#17314](https://github.com/element-hq/synapse/issues/17314))
|
||||
* Bump types-pillow from 10.2.0.20240423 to 10.2.0.20240520. ([\#17285](https://github.com/element-hq/synapse/issues/17285))
|
||||
* Bump types-pyyaml from 6.0.12.12 to 6.0.12.20240311. ([\#17316](https://github.com/element-hq/synapse/issues/17316))
|
||||
* Bump typing-extensions from 4.11.0 to 4.12.2. ([\#17354](https://github.com/element-hq/synapse/issues/17354))
|
||||
* Bump urllib3 from 2.0.7 to 2.2.2. ([\#17346](https://github.com/element-hq/synapse/issues/17346))
|
||||
|
||||
# Synapse 1.109.0 (2024-06-18)
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Fix the building of binary wheels for macOS by switching to macOS 12 CI runners. ([\#17319](https://github.com/element-hq/synapse/issues/17319))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.109.0rc3 (2024-06-17)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- When rolling back to a previous Synapse version and then forwards again to this release, don't require server operators to manually run SQL. ([\#17305](https://github.com/element-hq/synapse/issues/17305), [\#17309](https://github.com/element-hq/synapse/issues/17309))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Use the release branch for sytest in release-branch PRs. ([\#17306](https://github.com/element-hq/synapse/issues/17306))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.109.0rc2 (2024-06-11)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug where one-time-keys were not always included in `/sync` response when using workers. Introduced in v1.109.0rc1. ([\#17275](https://github.com/element-hq/synapse/issues/17275))
|
||||
- Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1. ([\#17292](https://github.com/element-hq/synapse/issues/17292))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.109.0rc1 (2024-06-04)
|
||||
|
||||
### Features
|
||||
|
||||
- Add the ability to auto-accept invites on the behalf of users. See the [`auto_accept_invites`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#auto-accept-invites) config option for details. ([\#17147](https://github.com/element-hq/synapse/issues/17147))
|
||||
- Add experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync/e2ee` endpoint for to-device messages and device encryption info. ([\#17167](https://github.com/element-hq/synapse/issues/17167))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/issues/3916) by adding unstable media endpoints to `/_matrix/client`. ([\#17213](https://github.com/element-hq/synapse/issues/17213))
|
||||
- Add logging to tasks managed by the task scheduler, showing CPU and database usage. ([\#17219](https://github.com/element-hq/synapse/issues/17219))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix deduplicating of membership events to not create unused state groups. ([\#17164](https://github.com/element-hq/synapse/issues/17164))
|
||||
- Fix bug where duplicate events could be sent down sync when using workers that are overloaded. ([\#17215](https://github.com/element-hq/synapse/issues/17215))
|
||||
- Ignore attempts to send to-device messages to bad users, to avoid log spam when we try to connect to the bad server. ([\#17240](https://github.com/element-hq/synapse/issues/17240))
|
||||
- Fix handling of duplicate concurrent uploading of device one-time-keys. ([\#17241](https://github.com/element-hq/synapse/issues/17241))
|
||||
- Fix reporting of default tags to Sentry, such as worker name. Broke in v1.108.0. ([\#17251](https://github.com/element-hq/synapse/issues/17251))
|
||||
- Fix bug where typing updates would not be sent when using workers after a restart. ([\#17252](https://github.com/element-hq/synapse/issues/17252))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Update the LemonLDAP documentation to say that claims should be explicitly included in the returned `id_token`, as Synapse won't request them. ([\#17204](https://github.com/element-hq/synapse/issues/17204))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Improve DB usage when fetching related events. ([\#17083](https://github.com/element-hq/synapse/issues/17083))
|
||||
- Log exceptions when failing to auto-join new user according to the `auto_join_rooms` option. ([\#17176](https://github.com/element-hq/synapse/issues/17176))
|
||||
- Reduce work of calculating outbound device lists updates. ([\#17211](https://github.com/element-hq/synapse/issues/17211))
|
||||
- Improve performance of calculating device lists changes in `/sync`. ([\#17216](https://github.com/element-hq/synapse/issues/17216))
|
||||
- Move towards using `MultiWriterIdGenerator` everywhere. ([\#17226](https://github.com/element-hq/synapse/issues/17226))
|
||||
- Replaces all usages of `StreamIdGenerator` with `MultiWriterIdGenerator`. ([\#17229](https://github.com/element-hq/synapse/issues/17229))
|
||||
- Change the `allow_unsafe_locale` config option to also apply when setting up new databases. ([\#17238](https://github.com/element-hq/synapse/issues/17238))
|
||||
- Fix errors in logs about closing incorrect logging contexts when media gets rejected by a module. ([\#17239](https://github.com/element-hq/synapse/issues/17239), [\#17246](https://github.com/element-hq/synapse/issues/17246))
|
||||
- Clean out invalid destinations from `device_federation_outbox` table. ([\#17242](https://github.com/element-hq/synapse/issues/17242))
|
||||
- Stop logging errors when receiving invalid User IDs in key querys requests. ([\#17250](https://github.com/element-hq/synapse/issues/17250))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump anyhow from 1.0.83 to 1.0.86. ([\#17220](https://github.com/element-hq/synapse/issues/17220))
|
||||
* Bump bcrypt from 4.1.2 to 4.1.3. ([\#17224](https://github.com/element-hq/synapse/issues/17224))
|
||||
* Bump lxml from 5.2.1 to 5.2.2. ([\#17261](https://github.com/element-hq/synapse/issues/17261))
|
||||
* Bump mypy-zope from 1.0.3 to 1.0.4. ([\#17262](https://github.com/element-hq/synapse/issues/17262))
|
||||
* Bump phonenumbers from 8.13.35 to 8.13.37. ([\#17235](https://github.com/element-hq/synapse/issues/17235))
|
||||
* Bump prometheus-client from 0.19.0 to 0.20.0. ([\#17233](https://github.com/element-hq/synapse/issues/17233))
|
||||
* Bump pyasn1 from 0.5.1 to 0.6.0. ([\#17223](https://github.com/element-hq/synapse/issues/17223))
|
||||
* Bump pyicu from 2.13 to 2.13.1. ([\#17236](https://github.com/element-hq/synapse/issues/17236))
|
||||
* Bump pyopenssl from 24.0.0 to 24.1.0. ([\#17234](https://github.com/element-hq/synapse/issues/17234))
|
||||
* Bump serde from 1.0.201 to 1.0.202. ([\#17221](https://github.com/element-hq/synapse/issues/17221))
|
||||
* Bump serde from 1.0.202 to 1.0.203. ([\#17232](https://github.com/element-hq/synapse/issues/17232))
|
||||
* Bump twine from 5.0.0 to 5.1.0. ([\#17225](https://github.com/element-hq/synapse/issues/17225))
|
||||
* Bump types-psycopg2 from 2.9.21.20240311 to 2.9.21.20240417. ([\#17222](https://github.com/element-hq/synapse/issues/17222))
|
||||
* Bump types-pyopenssl from 24.0.0.20240311 to 24.1.0.20240425. ([\#17260](https://github.com/element-hq/synapse/issues/17260))
|
||||
|
||||
# Synapse 1.108.0 (2024-05-28)
|
||||
|
||||
No significant changes since 1.108.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.108.0rc1 (2024-05-21)
|
||||
|
||||
### Features
|
||||
|
||||
24
Cargo.lock
generated
24
Cargo.lock
generated
@@ -212,9 +212,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
@@ -234,9 +234,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.21"
|
||||
version = "0.4.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
|
||||
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
@@ -444,9 +444,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.10.4"
|
||||
version = "1.10.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
|
||||
checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.202"
|
||||
version = "1.0.203"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395"
|
||||
checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.202"
|
||||
version = "1.0.203"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838"
|
||||
checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -505,9 +505,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.117"
|
||||
version = "1.0.119"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3"
|
||||
checksum = "e8eddb61f0697cc3989c5d64b452f5488e2b8a60fd7d5076a3045076ffef8cb0"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
|
||||
71
README.rst
71
README.rst
@@ -1,21 +1,34 @@
|
||||
=========================================================================
|
||||
Synapse |support| |development| |documentation| |license| |pypi| |python|
|
||||
=========================================================================
|
||||
.. image:: https://github.com/element-hq/product/assets/87339233/7abf477a-5277-47f3-be44-ea44917d8ed7
|
||||
:height: 60px
|
||||
|
||||
Synapse is an open-source `Matrix <https://matrix.org/>`_ homeserver written and
|
||||
maintained by the Matrix.org Foundation. We began rapid development in 2014,
|
||||
reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues
|
||||
in earnest today.
|
||||
**Element Synapse - Matrix homeserver implementation**
|
||||
|
||||
Briefly, Matrix is an open standard for communications on the internet, supporting
|
||||
federation, encryption and VoIP. Matrix.org has more to say about the `goals of the
|
||||
Matrix project <https://matrix.org/docs/guides/introduction>`_, and the `formal specification
|
||||
<https://spec.matrix.org/>`_ describes the technical details.
|
||||
|support| |development| |documentation| |license| |pypi| |python|
|
||||
|
||||
Synapse is an open source `Matrix <https://matrix.org>`__ homeserver
|
||||
implementation, written and maintained by `Element <https://element.io>`_.
|
||||
`Matrix <https://github.com/matrix-org>`__ is the open standard for
|
||||
secure and interoperable real time communications. You can directly run
|
||||
and manage the source code in this repository, available under an AGPL
|
||||
license. There is no support provided from Element unless you have a
|
||||
subscription.
|
||||
|
||||
Subscription alternative
|
||||
========================
|
||||
|
||||
Alternatively, for those that need an enterprise-ready solution, Element
|
||||
Server Suite (ESS) is `available as a subscription <https://element.io/pricing>`_.
|
||||
ESS builds on Synapse to offer a complete Matrix-based backend including the full
|
||||
`Admin Console product <https://element.io/enterprise-functionality/admin-console>`_,
|
||||
giving admins the power to easily manage an organization-wide
|
||||
deployment. It includes advanced identity management, auditing,
|
||||
moderation and data retention options as well as Long Term Support and
|
||||
SLAs. ESS can be used to support any Matrix-based frontend client.
|
||||
|
||||
.. contents::
|
||||
|
||||
Installing and configuration
|
||||
============================
|
||||
🛠️ Installing and configuration
|
||||
===============================
|
||||
|
||||
The Synapse documentation describes `how to install Synapse <https://element-hq.github.io/synapse/latest/setup/installation.html>`_. We recommend using
|
||||
`Docker images <https://element-hq.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org
|
||||
@@ -105,8 +118,8 @@ Following this advice ensures that even if an XSS is found in Synapse, the
|
||||
impact to other applications will be minimal.
|
||||
|
||||
|
||||
Testing a new installation
|
||||
==========================
|
||||
🧪 Testing a new installation
|
||||
=============================
|
||||
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client.
|
||||
@@ -159,8 +172,20 @@ the form of::
|
||||
As when logging in, you will need to specify a "Custom server". Specify your
|
||||
desired ``localpart`` in the 'User name' box.
|
||||
|
||||
Troubleshooting and support
|
||||
===========================
|
||||
🎯 Troubleshooting and support
|
||||
==============================
|
||||
|
||||
🚀 Professional support
|
||||
-----------------------
|
||||
|
||||
Enterprise quality support for Synapse including SLAs is available as part of an
|
||||
`Element Server Suite (ESS) <https://element.io/pricing>`_ subscription.
|
||||
|
||||
If you are an existing ESS subscriber then you can raise a `support request <https://ems.element.io/support>`_
|
||||
and access the `knowledge base <https://ems-docs.element.io>`_.
|
||||
|
||||
🤝 Community support
|
||||
--------------------
|
||||
|
||||
The `Admin FAQ <https://element-hq.github.io/synapse/latest/usage/administration/admin_faq.html>`_
|
||||
includes tips on dealing with some common problems. For more details, see
|
||||
@@ -176,8 +201,8 @@ issues for support requests, only for bug reports and feature requests.
|
||||
.. |docs| replace:: ``docs``
|
||||
.. _docs: docs
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
🪪 Identity Servers
|
||||
===================
|
||||
|
||||
Identity servers have the job of mapping email addresses and other 3rd Party
|
||||
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
|
||||
@@ -206,8 +231,8 @@ an email address with your account, or send an invite to another user via their
|
||||
email address.
|
||||
|
||||
|
||||
Development
|
||||
===========
|
||||
🛠️ Development
|
||||
==============
|
||||
|
||||
We welcome contributions to Synapse from the community!
|
||||
The best place to get started is our
|
||||
@@ -225,8 +250,8 @@ Alongside all that, join our developer community on Matrix:
|
||||
`#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans!
|
||||
|
||||
|
||||
.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
|
||||
:alt: (get support on #synapse:matrix.org)
|
||||
.. |support| image:: https://img.shields.io/badge/matrix-community%20support-success
|
||||
:alt: (get community support in #synapse:matrix.org)
|
||||
:target: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
.. |development| image:: https://img.shields.io/matrix/synapse-dev:matrix.org?label=development&logo=matrix
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Add the ability to auto-accept invites on the behalf of users. See the [`auto_accept_invites`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#auto-accept-invites) config option for details.
|
||||
@@ -1 +0,0 @@
|
||||
Update OIDC documentation: by default Matrix doesn't query userinfo endpoint, then claims should be put on id_token.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce work of calculating outbound device lists updates.
|
||||
@@ -1 +0,0 @@
|
||||
Improve performance of calculating device lists changes in `/sync`.
|
||||
@@ -1 +0,0 @@
|
||||
Support loading pluggable modules from Docker images.
|
||||
1
changelog.d/17318.misc
Normal file
1
changelog.d/17318.misc
Normal file
@@ -0,0 +1 @@
|
||||
Make the release script create a release branch for Complement as well.
|
||||
1
changelog.d/17320.feature
Normal file
1
changelog.d/17320.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
1
changelog.d/17337.feature
Normal file
1
changelog.d/17337.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
1
changelog.d/17342.feature
Normal file
1
changelog.d/17342.feature
Normal file
@@ -0,0 +1 @@
|
||||
Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
1
changelog.d/17356.doc
Normal file
1
changelog.d/17356.doc
Normal file
@@ -0,0 +1 @@
|
||||
Clarify `url_preview_url_blacklist` is a usability feature.
|
||||
1
changelog.d/17362.bugfix
Normal file
1
changelog.d/17362.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix rare race which causes no new to-device messages to be received from remote server.
|
||||
1
changelog.d/17363.misc
Normal file
1
changelog.d/17363.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix uploading packages to PyPi.
|
||||
1
changelog.d/17365.feature
Normal file
1
changelog.d/17365.feature
Normal file
@@ -0,0 +1 @@
|
||||
Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding _matrix/client/v1/media/download endpoint.
|
||||
1
changelog.d/17367.misc
Normal file
1
changelog.d/17367.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add CI check for the README.
|
||||
1
changelog.d/17379.doc
Normal file
1
changelog.d/17379.doc
Normal file
@@ -0,0 +1 @@
|
||||
Fix broken links in README.
|
||||
1
changelog.d/17381.misc
Normal file
1
changelog.d/17381.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix linting errors from new `ruff` version.
|
||||
1
changelog.d/17390.misc
Normal file
1
changelog.d/17390.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix building debian packages on non-clean checkouts.
|
||||
1
changelog.d/17392.misc
Normal file
1
changelog.d/17392.misc
Normal file
@@ -0,0 +1 @@
|
||||
Finish up work to allow per-user feature flags.
|
||||
1
changelog.d/17393.misc
Normal file
1
changelog.d/17393.misc
Normal file
@@ -0,0 +1 @@
|
||||
Allow enabling sliding sync per-user.
|
||||
1
changelog.d/17399.doc
Normal file
1
changelog.d/17399.doc
Normal file
@@ -0,0 +1 @@
|
||||
Clarify that changelog content *and file extension* need to match in order for entries to merge.
|
||||
1
changelog.d/17400.feature
Normal file
1
changelog.d/17400.feature
Normal file
@@ -0,0 +1 @@
|
||||
Forget all of a user's rooms upon deactivation, enabling future purges.
|
||||
1
changelog.d/17403.feature
Normal file
1
changelog.d/17403.feature
Normal file
@@ -0,0 +1 @@
|
||||
Declare support for [Matrix 1.11](https://matrix.org/blog/2024/06/20/matrix-v1.11-release/).
|
||||
55
debian/changelog
vendored
55
debian/changelog
vendored
@@ -1,3 +1,58 @@
|
||||
matrix-synapse-py3 (1.110.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 03 Jul 2024 09:08:59 -0600
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Jul 2024 08:28:56 -0600
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Jun 2024 18:14:48 +0200
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc1) stable; urgency=medium
|
||||
|
||||
* `register_new_matrix_user` now supports a --password-file and a --exists-ok flag.
|
||||
* New Synapse release 1.110.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Jun 2024 14:07:56 +0200
|
||||
|
||||
matrix-synapse-py3 (1.109.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.109.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jun 2024 09:45:15 +0000
|
||||
|
||||
matrix-synapse-py3 (1.109.0~rc3) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.109.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Jun 2024 12:05:24 +0000
|
||||
|
||||
matrix-synapse-py3 (1.109.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.109.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jun 2024 13:20:17 +0000
|
||||
|
||||
matrix-synapse-py3 (1.109.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.109.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Jun 2024 09:42:46 +0100
|
||||
|
||||
matrix-synapse-py3 (1.108.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.108.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 28 May 2024 11:54:22 +0100
|
||||
|
||||
matrix-synapse-py3 (1.108.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.108.0rc1.
|
||||
|
||||
11
debian/register_new_matrix_user.ronn
vendored
11
debian/register_new_matrix_user.ronn
vendored
@@ -31,8 +31,12 @@ A sample YAML file accepted by `register_new_matrix_user` is described below:
|
||||
Local part of the new user. Will prompt if omitted.
|
||||
|
||||
* `-p`, `--password`:
|
||||
New password for user. Will prompt if omitted. Supplying the password
|
||||
on the command line is not recommended. Use the STDIN instead.
|
||||
New password for user. Will prompt if this option and `--password-file` are omitted.
|
||||
Supplying the password on the command line is not recommended.
|
||||
|
||||
* `--password-file`:
|
||||
File containing the new password for user. If set, overrides `--password`.
|
||||
This is a more secure alternative to specifying the password on the command line.
|
||||
|
||||
* `-a`, `--admin`:
|
||||
Register new user as an admin. Will prompt if omitted.
|
||||
@@ -44,6 +48,9 @@ A sample YAML file accepted by `register_new_matrix_user` is described below:
|
||||
Shared secret as defined in server config file. This is an optional
|
||||
parameter as it can be also supplied via the YAML file.
|
||||
|
||||
* `--exists-ok`:
|
||||
Do not fail if the user already exists. The user account will be not updated in this case.
|
||||
|
||||
* `server_url`:
|
||||
URL of the home server. Defaults to 'https://localhost:8448'.
|
||||
|
||||
|
||||
@@ -73,6 +73,8 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
# Required for building cffi from source.
|
||||
libffi-dev \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
|
||||
@@ -243,26 +243,3 @@ healthcheck:
|
||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
||||
You can read about jemalloc by reading the Synapse
|
||||
[Admin FAQ](https://element-hq.github.io/synapse/latest/usage/administration/admin_faq.html#help-synapse-is-slow-and-eats-all-my-ramcpu).
|
||||
|
||||
|
||||
## Modules
|
||||
|
||||
Synapse supports loading additional modules, using the
|
||||
[`modules`](https://element-hq.github.io/synapse/latest/modules/index.html)
|
||||
config. Synapse will look for these modules `/modules`.
|
||||
|
||||
To install a package, simply run:
|
||||
|
||||
```
|
||||
pip install --target <module_directory> <package>
|
||||
```
|
||||
|
||||
Where `<module_directory>` is the directory mounted to `/modules`, and
|
||||
`<package>` is either the package name or a path to the package. See
|
||||
`pip install` for more details.
|
||||
|
||||
**Note**: Packages already installed as part of Synapse cannot be overridden by
|
||||
different versions of the package in `/modules`, e.g. if the Synapse version
|
||||
uses Twisted 24.3.0 then installing Twisted 23.10.0 in `/modules` won't have any
|
||||
effect. This can cause issues if the required version of a package is different
|
||||
between Synapse and the module being installed.
|
||||
|
||||
@@ -11,6 +11,9 @@ DIST=$(cut -d ':' -f2 <<< "${distro:?}")
|
||||
cp -aT /synapse/source /synapse/build
|
||||
cd /synapse/build
|
||||
|
||||
# Delete any existing `.so` files to ensure a clean build.
|
||||
rm -f /synapse/build/synapse/*.so
|
||||
|
||||
# if this is a prerelease, set the Section accordingly.
|
||||
#
|
||||
# When the package is later added to the package repo, reprepro will use the
|
||||
|
||||
@@ -105,8 +105,6 @@ experimental_features:
|
||||
# Expose a room summary for public rooms
|
||||
msc3266_enabled: true
|
||||
|
||||
msc4115_membership_on_events: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
system_mxid_display_name: "Server Alert"
|
||||
|
||||
@@ -176,7 +176,6 @@ app_service_config_files:
|
||||
{% endif %}
|
||||
|
||||
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
||||
expire_access_token: False
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
},
|
||||
"media_repository": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["media"],
|
||||
"listener_resources": ["media", "client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/media/",
|
||||
"^/_synapse/admin/v1/purge_media_cache$",
|
||||
@@ -125,6 +125,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_synapse/admin/v1/user/.*/media.*$",
|
||||
"^/_synapse/admin/v1/media/.*$",
|
||||
"^/_synapse/admin/v1/quarantine_media/.*$",
|
||||
"^/_matrix/client/v1/media/.*$",
|
||||
],
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
|
||||
@@ -269,15 +269,6 @@ running with 'migrate_config'. See the README for more details.
|
||||
|
||||
args += ["--config-path", config_path]
|
||||
|
||||
# Add the `/modules` directly to python search path, which allows users to
|
||||
# add custom modules.
|
||||
#
|
||||
# We want to add the directory *last* so that nothing can overwrite the
|
||||
# existing package versions. Therefore we load the current path and append
|
||||
# `/modules` to that
|
||||
path = ":".join(sys.path)
|
||||
environ["PYTHONPATH"] = f"{path}:/modules"
|
||||
|
||||
log("Starting synapse with args " + " ".join(args))
|
||||
|
||||
args = [sys.executable] + args
|
||||
|
||||
@@ -1,21 +1,17 @@
|
||||
# Experimental Features API
|
||||
|
||||
This API allows a server administrator to enable or disable some experimental features on a per-user
|
||||
basis. The currently supported features are:
|
||||
- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy
|
||||
presence state enabled
|
||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require
|
||||
UIA when first uploading cross-signing keys.
|
||||
|
||||
basis. The currently supported features are:
|
||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
|
||||
## Enabling/Disabling Features
|
||||
|
||||
This API allows a server administrator to enable experimental features for a given user. The request must
|
||||
This API allows a server administrator to enable experimental features for a given user. The request must
|
||||
provide a body containing the user id and listing the features to enable/disable in the following format:
|
||||
```json
|
||||
{
|
||||
@@ -35,7 +31,7 @@ PUT /_synapse/admin/v1/experimental_features/<user_id>
|
||||
```
|
||||
|
||||
## Listing Enabled Features
|
||||
|
||||
|
||||
To list which features are enabled/disabled for a given user send a request to the following API:
|
||||
|
||||
```
|
||||
@@ -52,4 +48,4 @@ user like so:
|
||||
"msc3967": false
|
||||
}
|
||||
}
|
||||
```
|
||||
```
|
||||
|
||||
@@ -36,6 +36,10 @@ The following query parameters are available:
|
||||
- the room's name,
|
||||
- the local part of the room's canonical alias, or
|
||||
- the complete (local and server part) room's id (case sensitive).
|
||||
* `public_rooms` - Optional flag to filter public rooms. If `true`, only public rooms are queried. If `false`, public rooms are excluded from
|
||||
the query. When the flag is absent (the default), **both** public and non-public rooms are included in the search results.
|
||||
* `empty_rooms` - Optional flag to filter empty rooms. A room is empty if joined_members is zero. If `true`, only empty rooms are queried. If `false`, empty rooms are excluded from
|
||||
the query. When the flag is absent (the default), **both** empty and non-empty rooms are included in the search results.
|
||||
|
||||
Defaults to no filtering.
|
||||
|
||||
|
||||
@@ -449,9 +449,9 @@ For example, a fix in PR #1234 would have its changelog entry in
|
||||
> The security levels of Florbs are now validated when received
|
||||
> via the `/federation/florb` endpoint. Contributed by Jane Matrix.
|
||||
|
||||
If there are multiple pull requests involved in a single bugfix/feature/etc,
|
||||
then the content for each `changelog.d` file should be the same. Towncrier will
|
||||
merge the matching files together into a single changelog entry when we come to
|
||||
If there are multiple pull requests involved in a single bugfix/feature/etc, then the
|
||||
content for each `changelog.d` file and file extension should be the same. Towncrier
|
||||
will merge the matching files together into a single changelog entry when we come to
|
||||
release.
|
||||
|
||||
### How do I know what to call the changelog file before I create the PR?
|
||||
|
||||
@@ -242,12 +242,11 @@ host all all ::1/128 ident
|
||||
|
||||
### Fixing incorrect `COLLATE` or `CTYPE`
|
||||
|
||||
Synapse will refuse to set up a new database if it has the wrong values of
|
||||
`COLLATE` and `CTYPE` set. Synapse will also refuse to start an existing database with incorrect values
|
||||
of `COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in the
|
||||
`database` section of the config, is set to true. Using different locales can cause issues if the locale library is updated from
|
||||
underneath the database, or if a different version of the locale is used on any
|
||||
replicas.
|
||||
Synapse will refuse to start when using a database with incorrect values of
|
||||
`COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in the
|
||||
`database` section of the config, is set to true. Using different locales can
|
||||
cause issues if the locale library is updated from underneath the database, or
|
||||
if a different version of the locale is used on any replicas.
|
||||
|
||||
If you have a database with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with
|
||||
the correct locale parameter (as shown above). It is also possible to change the
|
||||
@@ -256,13 +255,3 @@ however extreme care must be taken to avoid database corruption.
|
||||
|
||||
Note that the above may fail with an error about duplicate rows if corruption
|
||||
has already occurred, and such duplicate rows will need to be manually removed.
|
||||
|
||||
### Fixing inconsistent sequences error
|
||||
|
||||
Synapse uses Postgres sequences to generate IDs for various tables. A sequence
|
||||
and associated table can get out of sync if, for example, Synapse has been
|
||||
downgraded and then upgraded again.
|
||||
|
||||
To fix the issue shut down Synapse (including any and all workers) and run the
|
||||
SQL command included in the error message. Once done Synapse should start
|
||||
successfully.
|
||||
|
||||
@@ -117,6 +117,19 @@ each upgrade are complete before moving on to the next upgrade, to avoid
|
||||
stacking them up. You can monitor the currently running background updates with
|
||||
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||
|
||||
# Upgrading to v1.111.0
|
||||
|
||||
## New worker endpoints for authenticated client media
|
||||
|
||||
[Media repository workers](./workers.md#synapseappmedia_repository) handling
|
||||
Media APIs can now handle the following endpoint pattern:
|
||||
|
||||
```
|
||||
^/_matrix/client/v1/media/.*$
|
||||
```
|
||||
|
||||
Please update your reverse proxy configuration.
|
||||
|
||||
# Upgrading to v1.106.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
||||
@@ -1759,8 +1759,9 @@ rc_3pid_validation:
|
||||
### `rc_invites`
|
||||
|
||||
This option sets ratelimiting how often invites can be sent in a room or to a
|
||||
specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and
|
||||
`per_user` defaults to `per_second: 0.003`, `burst_count: 5`.
|
||||
specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10`,
|
||||
`per_user` defaults to `per_second: 0.003`, `burst_count: 5`, and `per_issuer`
|
||||
defaults to `per_second: 0.3`, `burst_count: 10`.
|
||||
|
||||
Client requests that invite user(s) when [creating a
|
||||
room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom)
|
||||
@@ -1946,6 +1947,24 @@ Example configuration:
|
||||
max_image_pixels: 35M
|
||||
```
|
||||
---
|
||||
### `remote_media_download_burst_count`
|
||||
|
||||
Remote media downloads are ratelimited using a [leaky bucket algorithm](https://en.wikipedia.org/wiki/Leaky_bucket), where a given "bucket" is keyed to the IP address of the requester when requesting remote media downloads. This configuration option sets the size of the bucket against which the size in bytes of downloads are penalized - if the bucket is full, ie a given number of bytes have already been downloaded, further downloads will be denied until the bucket drains. Defaults to 500MiB. See also `remote_media_download_per_second` which determines the rate at which the "bucket" is emptied and thus has available space to authorize new requests.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
remote_media_download_burst_count: 200M
|
||||
```
|
||||
---
|
||||
### `remote_media_download_per_second`
|
||||
|
||||
Works in conjunction with `remote_media_download_burst_count` to ratelimit remote media downloads - this configuration option determines the rate at which the "bucket" (see above) leaks in bytes per second. As requests are made to download remote media, the size of those requests in bytes is added to the bucket, and once the bucket has reached it's capacity, no more requests will be allowed until a number of bytes has "drained" from the bucket. This setting determines the rate at which bytes drain from the bucket, with the practical effect that the larger the number, the faster the bucket leaks, allowing for more bytes downloaded over a shorter period of time. Defaults to 87KiB per second. See also `remote_media_download_burst_count`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
remote_media_download_per_second: 40K
|
||||
```
|
||||
---
|
||||
### `prevent_media_downloads_from`
|
||||
|
||||
A list of domains to never download media from. Media from these
|
||||
@@ -1957,9 +1976,10 @@ This will not prevent the listed domains from accessing media themselves.
|
||||
It simply prevents users on this server from downloading media originating
|
||||
from the listed servers.
|
||||
|
||||
This will have no effect on media originating from the local server.
|
||||
This only affects media downloaded from other Matrix servers, to
|
||||
block domains from URL previews see [`url_preview_url_blacklist`](#url_preview_url_blacklist).
|
||||
This will have no effect on media originating from the local server. This only
|
||||
affects media downloaded from other Matrix servers, to control URL previews see
|
||||
[`url_preview_ip_range_blacklist`](#url_preview_ip_range_blacklist) or
|
||||
[`url_preview_url_blacklist`](#url_preview_url_blacklist).
|
||||
|
||||
Defaults to an empty list (nothing blocked).
|
||||
|
||||
@@ -2111,12 +2131,14 @@ url_preview_ip_range_whitelist:
|
||||
---
|
||||
### `url_preview_url_blacklist`
|
||||
|
||||
Optional list of URL matches that the URL preview spider is
|
||||
denied from accessing. You should use `url_preview_ip_range_blacklist`
|
||||
in preference to this, otherwise someone could define a public DNS
|
||||
entry that points to a private IP address and circumvent the blacklist.
|
||||
This is more useful if you know there is an entire shape of URL that
|
||||
you know that will never want synapse to try to spider.
|
||||
Optional list of URL matches that the URL preview spider is denied from
|
||||
accessing. This is a usability feature, not a security one. You should use
|
||||
`url_preview_ip_range_blacklist` in preference to this, otherwise someone could
|
||||
define a public DNS entry that points to a private IP address and circumvent
|
||||
the blacklist. Applications that perform redirects or serve different content
|
||||
when detecting that Synapse is accessing them can also bypass the blacklist.
|
||||
This is more useful if you know there is an entire shape of URL that you know
|
||||
that you do not want Synapse to preview.
|
||||
|
||||
Each list entry is a dictionary of url component attributes as returned
|
||||
by urlparse.urlsplit as applied to the absolute form of the URL. See
|
||||
@@ -2700,7 +2722,7 @@ Example configuration:
|
||||
session_lifetime: 24h
|
||||
```
|
||||
---
|
||||
### `refresh_access_token_lifetime`
|
||||
### `refreshable_access_token_lifetime`
|
||||
|
||||
Time that an access token remains valid for, if the session is using refresh tokens.
|
||||
|
||||
@@ -3788,7 +3810,8 @@ This setting defines options related to the user directory.
|
||||
This option has the following sub-options:
|
||||
* `enabled`: Defines whether users can search the user directory. If false then
|
||||
empty responses are returned to all queries. Defaults to true.
|
||||
* `search_all_users`: Defines whether to search all users visible to your HS at the time the search is performed. If set to true, will return all users who share a room with the user from the homeserver.
|
||||
* `search_all_users`: Defines whether to search all users visible to your homeserver at the time the search is performed.
|
||||
If set to true, will return all users known to the homeserver matching the search query.
|
||||
If false, search results will only contain users
|
||||
visible in public rooms and users sharing a room with the requester.
|
||||
Defaults to false.
|
||||
@@ -4132,7 +4155,7 @@ By default, no room is excluded.
|
||||
Example configuration:
|
||||
```yaml
|
||||
exclude_rooms_from_sync:
|
||||
- !foo:example.com
|
||||
- "!foo:example.com"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -62,6 +62,6 @@ following documentation:
|
||||
|
||||
## Reporting a security vulnerability
|
||||
|
||||
If you've found a security issue in Synapse or any other Matrix.org Foundation
|
||||
project, please report it to us in accordance with our [Security Disclosure
|
||||
Policy](https://www.matrix.org/security-disclosure-policy/). Thank you!
|
||||
If you've found a security issue in Synapse or any other Element project,
|
||||
please report it to us in accordance with our [Security Disclosure
|
||||
Policy](https://element.io/security/security-disclosure-policy). Thank you!
|
||||
|
||||
@@ -739,6 +739,7 @@ An example for a federation sender instance:
|
||||
Handles the media repository. It can handle all endpoints starting with:
|
||||
|
||||
/_matrix/media/
|
||||
/_matrix/client/v1/media/
|
||||
|
||||
... and the following regular expressions matching media-specific administration APIs:
|
||||
|
||||
|
||||
3
mypy.ini
3
mypy.ini
@@ -96,3 +96,6 @@ ignore_missing_imports = True
|
||||
# https://github.com/twisted/treq/pull/366
|
||||
[mypy-treq.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-multipart.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
758
poetry.lock
generated
758
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -43,6 +43,7 @@ target-version = ['py38', 'py39', 'py310', 'py311']
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
|
||||
[tool.ruff.lint]
|
||||
# See https://beta.ruff.rs/docs/rules/#error-e
|
||||
# for error codes. The ones we ignore are:
|
||||
# E501: Line too long (black enforces this for us)
|
||||
@@ -96,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.108.0rc1"
|
||||
version = "1.110.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -200,10 +201,8 @@ netaddr = ">=0.7.18"
|
||||
# add a lower bound to the Jinja2 dependency.
|
||||
Jinja2 = ">=3.0"
|
||||
bleach = ">=1.4.3"
|
||||
# We use `ParamSpec` and `Concatenate`, which were added in `typing-extensions` 3.10.0.0.
|
||||
# Additionally we need https://github.com/python/typing/pull/817 to allow types to be
|
||||
# generic over ParamSpecs.
|
||||
typing-extensions = ">=3.10.0.1"
|
||||
# We use `Self`, which were added in `typing-extensions` 4.0.
|
||||
typing-extensions = ">=4.0"
|
||||
# We enforce that we have a `cryptography` version that bundles an `openssl`
|
||||
# with the latest security patches.
|
||||
cryptography = ">=3.4.7"
|
||||
@@ -226,6 +225,8 @@ pydantic = ">=1.7.4, <3"
|
||||
# needed.
|
||||
setuptools_rust = ">=1.3"
|
||||
|
||||
# This is used for parsing multipart responses
|
||||
python-multipart = ">=0.0.9"
|
||||
|
||||
# Optional Dependencies
|
||||
# ---------------------
|
||||
@@ -321,7 +322,7 @@ all = [
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.7.0"
|
||||
ruff = "0.3.7"
|
||||
ruff = "0.5.0"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
|
||||
@@ -204,6 +204,8 @@ pub struct EventInternalMetadata {
|
||||
/// The stream ordering of this event. None, until it has been persisted.
|
||||
#[pyo3(get, set)]
|
||||
stream_ordering: Option<NonZeroI64>,
|
||||
#[pyo3(get, set)]
|
||||
instance_name: Option<String>,
|
||||
|
||||
/// whether this event is an outlier (ie, whether we have the state at that
|
||||
/// point in the DAG)
|
||||
@@ -232,6 +234,7 @@ impl EventInternalMetadata {
|
||||
Ok(EventInternalMetadata {
|
||||
data,
|
||||
stream_ordering: None,
|
||||
instance_name: None,
|
||||
outlier: false,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -223,7 +223,6 @@ test_packages=(
|
||||
./tests/msc3930
|
||||
./tests/msc3902
|
||||
./tests/msc3967
|
||||
./tests/msc4115
|
||||
)
|
||||
|
||||
# Enable dirty runs, so tests will reuse the same container where possible.
|
||||
|
||||
@@ -112,7 +112,7 @@ python3 -m black "${files[@]}"
|
||||
|
||||
# Catch any common programming mistakes in Python code.
|
||||
# --quiet suppresses the update check.
|
||||
ruff --quiet --fix "${files[@]}"
|
||||
ruff check --quiet --fix "${files[@]}"
|
||||
|
||||
# Catch any common programming mistakes in Rust code.
|
||||
#
|
||||
|
||||
@@ -70,6 +70,7 @@ def cli() -> None:
|
||||
pip install -e .[dev]
|
||||
|
||||
- A checkout of the sytest repository at ../sytest
|
||||
- A checkout of the complement repository at ../complement
|
||||
|
||||
Then to use:
|
||||
|
||||
@@ -112,10 +113,12 @@ def _prepare() -> None:
|
||||
# Make sure we're in a git repo.
|
||||
synapse_repo = get_repo_and_check_clean_checkout()
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
complement_repo = get_repo_and_check_clean_checkout("../complement", "complement")
|
||||
|
||||
click.secho("Updating Synapse and Sytest git repos...")
|
||||
synapse_repo.remote().fetch()
|
||||
sytest_repo.remote().fetch()
|
||||
complement_repo.remote().fetch()
|
||||
|
||||
# Get the current version and AST from root Synapse module.
|
||||
current_version = get_package_version()
|
||||
@@ -208,7 +211,15 @@ def _prepare() -> None:
|
||||
"Which branch should the release be based on?", default=default
|
||||
)
|
||||
|
||||
for repo_name, repo in {"synapse": synapse_repo, "sytest": sytest_repo}.items():
|
||||
for repo_name, repo in {
|
||||
"synapse": synapse_repo,
|
||||
"sytest": sytest_repo,
|
||||
"complement": complement_repo,
|
||||
}.items():
|
||||
# Special case for Complement: `develop` maps to `main`
|
||||
if repo_name == "complement" and branch_name == "develop":
|
||||
branch_name = "main"
|
||||
|
||||
base_branch = find_ref(repo, branch_name)
|
||||
if not base_branch:
|
||||
print(f"Could not find base branch {branch_name} for {repo_name}!")
|
||||
@@ -231,6 +242,12 @@ def _prepare() -> None:
|
||||
if click.confirm("Push new SyTest branch?", default=True):
|
||||
sytest_repo.git.push("-u", sytest_repo.remote().name, release_branch_name)
|
||||
|
||||
# Same for Complement
|
||||
if click.confirm("Push new Complement branch?", default=True):
|
||||
complement_repo.git.push(
|
||||
"-u", complement_repo.remote().name, release_branch_name
|
||||
)
|
||||
|
||||
# Switch to the release branch and ensure it's up to date.
|
||||
synapse_repo.git.checkout(release_branch_name)
|
||||
update_branch(synapse_repo)
|
||||
@@ -630,6 +647,9 @@ def _merge_back() -> None:
|
||||
else:
|
||||
# Full release
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
complement_repo = get_repo_and_check_clean_checkout(
|
||||
"../complement", "complement"
|
||||
)
|
||||
|
||||
if click.confirm(f"Merge {branch_name} → master?", default=True):
|
||||
_merge_into(synapse_repo, branch_name, "master")
|
||||
@@ -643,6 +663,9 @@ def _merge_back() -> None:
|
||||
if click.confirm("On SyTest, merge master → develop?", default=True):
|
||||
_merge_into(sytest_repo, "master", "develop")
|
||||
|
||||
if click.confirm(f"On Complement, merge {branch_name} → main?", default=True):
|
||||
_merge_into(complement_repo, branch_name, "main")
|
||||
|
||||
|
||||
@cli.command()
|
||||
def announce() -> None:
|
||||
|
||||
@@ -44,7 +44,7 @@ logger = logging.getLogger("generate_workers_map")
|
||||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None:
|
||||
super().__init__(config.server.server_name, config=config)
|
||||
|
||||
@@ -52,6 +52,7 @@ def request_registration(
|
||||
user_type: Optional[str] = None,
|
||||
_print: Callable[[str], None] = print,
|
||||
exit: Callable[[int], None] = sys.exit,
|
||||
exists_ok: bool = False,
|
||||
) -> None:
|
||||
url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),)
|
||||
|
||||
@@ -97,6 +98,10 @@ def request_registration(
|
||||
r = requests.post(url, json=data)
|
||||
|
||||
if r.status_code != 200:
|
||||
response = r.json()
|
||||
if exists_ok and response["errcode"] == "M_USER_IN_USE":
|
||||
_print("User already exists. Skipping.")
|
||||
return
|
||||
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||
if 400 <= r.status_code < 500:
|
||||
try:
|
||||
@@ -115,6 +120,7 @@ def register_new_user(
|
||||
shared_secret: str,
|
||||
admin: Optional[bool],
|
||||
user_type: Optional[str],
|
||||
exists_ok: bool = False,
|
||||
) -> None:
|
||||
if not user:
|
||||
try:
|
||||
@@ -154,7 +160,13 @@ def register_new_user(
|
||||
admin = False
|
||||
|
||||
request_registration(
|
||||
user, password, server_location, shared_secret, bool(admin), user_type
|
||||
user,
|
||||
password,
|
||||
server_location,
|
||||
shared_secret,
|
||||
bool(admin),
|
||||
user_type,
|
||||
exists_ok=exists_ok,
|
||||
)
|
||||
|
||||
|
||||
@@ -174,10 +186,22 @@ def main() -> None:
|
||||
help="Local part of the new user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--exists-ok",
|
||||
action="store_true",
|
||||
help="Do not fail if user already exists.",
|
||||
)
|
||||
password_group = parser.add_mutually_exclusive_group()
|
||||
password_group.add_argument(
|
||||
"-p",
|
||||
"--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
help="New password for user. Will prompt for a password if "
|
||||
"this flag and `--password-file` are both omitted.",
|
||||
)
|
||||
password_group.add_argument(
|
||||
"--password-file",
|
||||
default=None,
|
||||
help="File containing the new password for user. If set, will override `--password`.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
@@ -185,6 +209,7 @@ def main() -> None:
|
||||
default=None,
|
||||
help="User type as specified in synapse.api.constants.UserTypes",
|
||||
)
|
||||
|
||||
admin_group = parser.add_mutually_exclusive_group()
|
||||
admin_group.add_argument(
|
||||
"-a",
|
||||
@@ -247,6 +272,11 @@ def main() -> None:
|
||||
print(_NO_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if args.password_file:
|
||||
password = _read_file(args.password_file, "password-file").strip()
|
||||
else:
|
||||
password = args.password
|
||||
|
||||
if args.server_url:
|
||||
server_url = args.server_url
|
||||
elif config is not None:
|
||||
@@ -270,7 +300,13 @@ def main() -> None:
|
||||
admin = args.admin
|
||||
|
||||
register_new_user(
|
||||
args.user, args.password, server_url, secret, admin, args.user_type
|
||||
args.user,
|
||||
password,
|
||||
server_url,
|
||||
secret,
|
||||
admin,
|
||||
args.user_type,
|
||||
exists_ok=args.exists_ok,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -777,22 +777,74 @@ class Porter:
|
||||
await self._setup_events_stream_seqs()
|
||||
await self._setup_sequence(
|
||||
"un_partial_stated_event_stream_sequence",
|
||||
("un_partial_stated_event_stream",),
|
||||
[("un_partial_stated_event_stream", "stream_id")],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"device_inbox_sequence", ("device_inbox", "device_federation_outbox")
|
||||
"device_inbox_sequence",
|
||||
[
|
||||
("device_inbox", "stream_id"),
|
||||
("device_federation_outbox", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"account_data_sequence",
|
||||
("room_account_data", "room_tags_revisions", "account_data"),
|
||||
[
|
||||
("room_account_data", "stream_id"),
|
||||
("room_tags_revisions", "stream_id"),
|
||||
("account_data", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"receipts_sequence",
|
||||
[
|
||||
("receipts_linearized", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"presence_stream_sequence",
|
||||
[
|
||||
("presence_stream", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence("receipts_sequence", ("receipts_linearized",))
|
||||
await self._setup_sequence("presence_stream_sequence", ("presence_stream",))
|
||||
await self._setup_auth_chain_sequence()
|
||||
await self._setup_sequence(
|
||||
"application_services_txn_id_seq",
|
||||
("application_services_txns",),
|
||||
"txn_id",
|
||||
[
|
||||
(
|
||||
"application_services_txns",
|
||||
"txn_id",
|
||||
)
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"device_lists_sequence",
|
||||
[
|
||||
("device_lists_stream", "stream_id"),
|
||||
("user_signature_stream", "stream_id"),
|
||||
("device_lists_outbound_pokes", "stream_id"),
|
||||
("device_lists_changes_in_room", "stream_id"),
|
||||
("device_lists_remote_pending", "stream_id"),
|
||||
("device_lists_changes_converted_stream_position", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"e2e_cross_signing_keys_sequence",
|
||||
[
|
||||
("e2e_cross_signing_keys", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"push_rules_stream_sequence",
|
||||
[
|
||||
("push_rules_stream", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"pushers_sequence",
|
||||
[
|
||||
("pushers", "id"),
|
||||
("deleted_pushers", "stream_id"),
|
||||
],
|
||||
)
|
||||
|
||||
# Step 3. Get tables.
|
||||
@@ -1101,12 +1153,11 @@ class Porter:
|
||||
async def _setup_sequence(
|
||||
self,
|
||||
sequence_name: str,
|
||||
stream_id_tables: Iterable[str],
|
||||
column_name: str = "stream_id",
|
||||
stream_id_tables: Iterable[Tuple[str, str]],
|
||||
) -> None:
|
||||
"""Set a sequence to the correct value."""
|
||||
current_stream_ids = []
|
||||
for stream_id_table in stream_id_tables:
|
||||
for stream_id_table, column_name in stream_id_tables:
|
||||
max_stream_id = cast(
|
||||
int,
|
||||
await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
|
||||
@@ -41,7 +41,7 @@ logger = logging.getLogger("update_database")
|
||||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore [assignment]
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
super().__init__(
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
from typing import Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
|
||||
from typing_extensions import Protocol
|
||||
|
||||
@@ -28,6 +28,9 @@ from synapse.appservice import ApplicationService
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.types import Requester
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
|
||||
# guests always get this device id.
|
||||
GUEST_DEVICE_ID = "guest_device"
|
||||
|
||||
@@ -87,6 +90,19 @@ class Auth(Protocol):
|
||||
AuthError if access is denied for the user in the access token
|
||||
"""
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
"""Like `get_user_by_req`, except also checks if the user has access to
|
||||
the experimental feature. If they don't returns a 404 unrecognized
|
||||
request.
|
||||
"""
|
||||
|
||||
async def validate_appservice_can_control_user_id(
|
||||
self, app_service: ApplicationService, user_id: str
|
||||
) -> None:
|
||||
|
||||
@@ -28,6 +28,7 @@ from synapse.api.errors import (
|
||||
Codes,
|
||||
InvalidClientTokenError,
|
||||
MissingClientTokenError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
|
||||
@@ -38,8 +39,10 @@ from . import GUEST_DEVICE_ID
|
||||
from .base import BaseAuth
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -106,6 +109,32 @@ class InternalAuth(BaseAuth):
|
||||
parent_span.set_tag("appservice_id", requester.app_service.id)
|
||||
return requester
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
try:
|
||||
requester = await self.get_user_by_req(
|
||||
request,
|
||||
allow_guest=allow_guest,
|
||||
allow_expired=allow_expired,
|
||||
allow_locked=allow_locked,
|
||||
)
|
||||
if await self.store.is_feature_enabled(requester.user.to_string(), feature):
|
||||
return requester
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
except (AuthError, InvalidClientTokenError):
|
||||
if feature.is_globally_enabled(self.hs.config):
|
||||
# If its globally enabled then return the auth error
|
||||
raise
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
|
||||
@cancellable
|
||||
async def _wrapped_get_user_by_req(
|
||||
self,
|
||||
|
||||
@@ -40,6 +40,7 @@ from synapse.api.errors import (
|
||||
OAuthInsufficientScopeError,
|
||||
StoreError,
|
||||
SynapseError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
@@ -48,6 +49,7 @@ from synapse.util import json_decoder
|
||||
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -245,6 +247,32 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
|
||||
return requester
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
try:
|
||||
requester = await self.get_user_by_req(
|
||||
request,
|
||||
allow_guest=allow_guest,
|
||||
allow_expired=allow_expired,
|
||||
allow_locked=allow_locked,
|
||||
)
|
||||
if await self.store.is_feature_enabled(requester.user.to_string(), feature):
|
||||
return requester
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
except (AuthError, InvalidClientTokenError):
|
||||
if feature.is_globally_enabled(self.hs.config):
|
||||
# If its globally enabled then return the auth error
|
||||
raise
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
|
||||
async def get_user_by_access_token(
|
||||
self,
|
||||
token: str,
|
||||
|
||||
@@ -50,7 +50,7 @@ class Membership:
|
||||
KNOCK: Final = "knock"
|
||||
LEAVE: Final = "leave"
|
||||
BAN: Final = "ban"
|
||||
LIST: Final = (INVITE, JOIN, KNOCK, LEAVE, BAN)
|
||||
LIST: Final = {INVITE, JOIN, KNOCK, LEAVE, BAN}
|
||||
|
||||
|
||||
class PresenceState:
|
||||
@@ -238,7 +238,7 @@ class EventUnsignedContentFields:
|
||||
"""Fields found inside the 'unsigned' data on events"""
|
||||
|
||||
# Requesting user's membership, per MSC4115
|
||||
MSC4115_MEMBERSHIP: Final = "io.element.msc4115.membership"
|
||||
MEMBERSHIP: Final = "membership"
|
||||
|
||||
|
||||
class RoomTypes:
|
||||
|
||||
@@ -130,7 +130,8 @@ class Ratelimiter:
|
||||
Overrides the value set during instantiation if set.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
update: Whether to count this check as performing the action. If the action
|
||||
cannot be performed, the user's action count is not incremented at all.
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
|
||||
@@ -681,17 +681,17 @@ def setup_sentry(hs: "HomeServer") -> None:
|
||||
)
|
||||
|
||||
# We set some default tags that give some context to this instance
|
||||
with sentry_sdk.configure_scope() as scope:
|
||||
scope.set_tag("matrix_server_name", hs.config.server.server_name)
|
||||
global_scope = sentry_sdk.Scope.get_global_scope()
|
||||
global_scope.set_tag("matrix_server_name", hs.config.server.server_name)
|
||||
|
||||
app = (
|
||||
hs.config.worker.worker_app
|
||||
if hs.config.worker.worker_app
|
||||
else "synapse.app.homeserver"
|
||||
)
|
||||
name = hs.get_instance_name()
|
||||
scope.set_tag("worker_app", app)
|
||||
scope.set_tag("worker_name", name)
|
||||
app = (
|
||||
hs.config.worker.worker_app
|
||||
if hs.config.worker.worker_app
|
||||
else "synapse.app.homeserver"
|
||||
)
|
||||
name = hs.get_instance_name()
|
||||
global_scope.set_tag("worker_app", app)
|
||||
global_scope.set_tag("worker_name", name)
|
||||
|
||||
|
||||
def setup_sdnotify(hs: "HomeServer") -> None:
|
||||
|
||||
@@ -110,7 +110,7 @@ class AdminCmdStore(
|
||||
|
||||
|
||||
class AdminCmdServer(HomeServer):
|
||||
DATASTORE_CLASS = AdminCmdStore # type: ignore
|
||||
DATASTORE_CLASS = AdminCmdStore
|
||||
|
||||
|
||||
async def export_data_command(hs: HomeServer, args: argparse.Namespace) -> None:
|
||||
|
||||
@@ -163,7 +163,7 @@ class GenericWorkerStore(
|
||||
|
||||
|
||||
class GenericWorkerServer(HomeServer):
|
||||
DATASTORE_CLASS = GenericWorkerStore # type: ignore
|
||||
DATASTORE_CLASS = GenericWorkerStore
|
||||
|
||||
def _listen_http(self, listener_config: ListenerConfig) -> None:
|
||||
assert listener_config.http_options is not None
|
||||
|
||||
@@ -81,7 +81,7 @@ def gz_wrap(r: Resource) -> Resource:
|
||||
|
||||
|
||||
class SynapseHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def _listener_http(
|
||||
self,
|
||||
|
||||
@@ -332,6 +332,9 @@ class ExperimentalConfig(Config):
|
||||
# MSC3391: Removing account data.
|
||||
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
|
||||
|
||||
# MSC3575 (Sliding Sync API endpoints)
|
||||
self.msc3575_enabled: bool = experimental.get("msc3575_enabled", False)
|
||||
|
||||
# MSC3773: Thread notifications
|
||||
self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
|
||||
|
||||
@@ -390,9 +393,6 @@ class ExperimentalConfig(Config):
|
||||
# MSC3391: Removing account data.
|
||||
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
|
||||
|
||||
# MSC3967: Do not require UIA when first uploading cross signing keys
|
||||
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
|
||||
|
||||
# MSC3861: Matrix architecture change to delegate authentication via OIDC
|
||||
try:
|
||||
self.msc3861 = MSC3861(**experimental.get("msc3861", {}))
|
||||
@@ -433,6 +433,16 @@ class ExperimentalConfig(Config):
|
||||
("experimental", "msc4108_delegation_endpoint"),
|
||||
)
|
||||
|
||||
self.msc4115_membership_on_events = experimental.get(
|
||||
"msc4115_membership_on_events", False
|
||||
self.msc3823_account_suspension = experimental.get(
|
||||
"msc3823_account_suspension", False
|
||||
)
|
||||
|
||||
self.msc3916_authenticated_media_enabled = experimental.get(
|
||||
"msc3916_authenticated_media_enabled", False
|
||||
)
|
||||
|
||||
# MSC4151: Report room API (Client-Server API)
|
||||
self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
|
||||
|
||||
# MSC4156: Migrate server_name to via
|
||||
self.msc4156_enabled: bool = experimental.get("msc4156_enabled", False)
|
||||
|
||||
@@ -218,3 +218,13 @@ class RatelimitConfig(Config):
|
||||
"rc_media_create",
|
||||
defaults={"per_second": 10, "burst_count": 50},
|
||||
)
|
||||
|
||||
self.remote_media_downloads = RatelimitSettings(
|
||||
key="rc_remote_media_downloads",
|
||||
per_second=self.parse_size(
|
||||
config.get("remote_media_download_per_second", "87K")
|
||||
),
|
||||
burst_count=self.parse_size(
|
||||
config.get("remote_media_download_burst_count", "500M")
|
||||
),
|
||||
)
|
||||
|
||||
@@ -90,6 +90,7 @@ def prune_event(event: EventBase) -> EventBase:
|
||||
pruned_event.internal_metadata.stream_ordering = (
|
||||
event.internal_metadata.stream_ordering
|
||||
)
|
||||
pruned_event.internal_metadata.instance_name = event.internal_metadata.instance_name
|
||||
pruned_event.internal_metadata.outlier = event.internal_metadata.outlier
|
||||
|
||||
# Mark the event as redacted
|
||||
@@ -116,6 +117,7 @@ def clone_event(event: EventBase) -> EventBase:
|
||||
new_event.internal_metadata.stream_ordering = (
|
||||
event.internal_metadata.stream_ordering
|
||||
)
|
||||
new_event.internal_metadata.instance_name = event.internal_metadata.instance_name
|
||||
new_event.internal_metadata.outlier = event.internal_metadata.outlier
|
||||
|
||||
return new_event
|
||||
@@ -834,3 +836,21 @@ def maybe_upsert_event_field(
|
||||
del container[key]
|
||||
|
||||
return upsert_okay
|
||||
|
||||
|
||||
def strip_event(event: EventBase) -> JsonDict:
|
||||
"""
|
||||
Used for "stripped state" events which provide a simplified view of the state of a
|
||||
room intended to help a potential joiner identify the room (relevant when the user
|
||||
is invited or knocked).
|
||||
|
||||
Stripped state events can only have the `sender`, `type`, `state_key` and `content`
|
||||
properties present.
|
||||
"""
|
||||
|
||||
return {
|
||||
"type": event.type,
|
||||
"state_key": event.state_key,
|
||||
"content": event.content,
|
||||
"sender": event.sender,
|
||||
}
|
||||
|
||||
@@ -47,9 +47,9 @@ from synapse.events.utils import (
|
||||
validate_canonicaljson,
|
||||
)
|
||||
from synapse.http.servlet import validate_json_object
|
||||
from synapse.rest.models import RequestBodyModel
|
||||
from synapse.storage.controllers.state import server_acl_evaluator_from_event
|
||||
from synapse.types import EventID, JsonDict, RoomID, StrCollection, UserID
|
||||
from synapse.types.rest import RequestBodyModel
|
||||
|
||||
|
||||
class EventValidator:
|
||||
|
||||
@@ -56,6 +56,7 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
UnsupportedRoomVersionError,
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
@@ -1870,6 +1871,52 @@ class FederationClient(FederationBase):
|
||||
|
||||
return filtered_statuses, filtered_failures
|
||||
|
||||
async def federation_download_media(
|
||||
self,
|
||||
destination: str,
|
||||
media_id: str,
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Union[
|
||||
Tuple[int, Dict[bytes, List[bytes]], bytes],
|
||||
Tuple[int, Dict[bytes, List[bytes]]],
|
||||
]:
|
||||
try:
|
||||
return await self.transport_layer.federation_download_media(
|
||||
destination,
|
||||
media_id,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the _matrix/media/v3/download endpoint. Otherwise, consider it a legitimate error
|
||||
# and raise.
|
||||
if not is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug(
|
||||
"Couldn't download media %s/%s over _matrix/federation/v1/media/download, falling back to _matrix/media/v3/download path",
|
||||
destination,
|
||||
media_id,
|
||||
)
|
||||
|
||||
return await self.transport_layer.download_media_v3(
|
||||
destination,
|
||||
media_id,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def download_media(
|
||||
self,
|
||||
destination: str,
|
||||
@@ -1877,6 +1924,8 @@ class FederationClient(FederationBase):
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
try:
|
||||
return await self.transport_layer.download_media_v3(
|
||||
@@ -1885,6 +1934,8 @@ class FederationClient(FederationBase):
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
@@ -1905,6 +1956,8 @@ class FederationClient(FederationBase):
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -674,7 +674,7 @@ class FederationServer(FederationBase):
|
||||
# This is in addition to the HS-level rate limiting applied by
|
||||
# BaseFederationServlet.
|
||||
# type-ignore: mypy doesn't seem able to deduce the type of the limiter(!?)
|
||||
await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type]
|
||||
await self._room_member_handler._join_rate_per_room_limiter.ratelimit(
|
||||
requester=None,
|
||||
key=room_id,
|
||||
update=False,
|
||||
@@ -717,7 +717,7 @@ class FederationServer(FederationBase):
|
||||
SynapseTags.SEND_JOIN_RESPONSE_IS_PARTIAL_STATE,
|
||||
caller_supports_partial_state,
|
||||
)
|
||||
await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type]
|
||||
await self._room_member_handler._join_rate_per_room_limiter.ratelimit(
|
||||
requester=None,
|
||||
key=room_id,
|
||||
update=False,
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#
|
||||
import datetime
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, Type
|
||||
|
||||
@@ -68,6 +69,10 @@ sent_edus_by_type = Counter(
|
||||
# If the retry interval is larger than this then we enter "catchup" mode
|
||||
CATCHUP_RETRY_INTERVAL = 60 * 60 * 1000
|
||||
|
||||
# Limit how many presence states we add to each presence EDU, to ensure that
|
||||
# they are bounded in size.
|
||||
MAX_PRESENCE_STATES_PER_EDU = 50
|
||||
|
||||
|
||||
class PerDestinationQueue:
|
||||
"""
|
||||
@@ -144,7 +149,7 @@ class PerDestinationQueue:
|
||||
|
||||
# Map of user_id -> UserPresenceState of pending presence to be sent to this
|
||||
# destination
|
||||
self._pending_presence: Dict[str, UserPresenceState] = {}
|
||||
self._pending_presence: OrderedDict[str, UserPresenceState] = OrderedDict()
|
||||
|
||||
# List of room_id -> receipt_type -> user_id -> receipt_dict,
|
||||
#
|
||||
@@ -317,7 +322,6 @@ class PerDestinationQueue:
|
||||
)
|
||||
|
||||
async def _transaction_transmission_loop(self) -> None:
|
||||
pending_pdus: List[EventBase] = []
|
||||
try:
|
||||
self.transmission_loop_running = True
|
||||
|
||||
@@ -333,7 +337,6 @@ class PerDestinationQueue:
|
||||
# not caught up yet
|
||||
return
|
||||
|
||||
pending_pdus = []
|
||||
while True:
|
||||
self._new_data_to_send = False
|
||||
|
||||
@@ -399,7 +402,7 @@ class PerDestinationQueue:
|
||||
# through another mechanism, because this is all volatile!
|
||||
self._pending_edus = []
|
||||
self._pending_edus_keyed = {}
|
||||
self._pending_presence = {}
|
||||
self._pending_presence.clear()
|
||||
self._pending_receipt_edus = []
|
||||
|
||||
self._start_catching_up()
|
||||
@@ -721,22 +724,26 @@ class _TransactionQueueManager:
|
||||
|
||||
# Add presence EDU.
|
||||
if self.queue._pending_presence:
|
||||
# Only send max 50 presence entries in the EDU, to bound the amount
|
||||
# of data we're sending.
|
||||
presence_to_add: List[JsonDict] = []
|
||||
while (
|
||||
self.queue._pending_presence
|
||||
and len(presence_to_add) < MAX_PRESENCE_STATES_PER_EDU
|
||||
):
|
||||
_, presence = self.queue._pending_presence.popitem(last=False)
|
||||
presence_to_add.append(
|
||||
format_user_presence_state(presence, self.queue._clock.time_msec())
|
||||
)
|
||||
|
||||
pending_edus.append(
|
||||
Edu(
|
||||
origin=self.queue._server_name,
|
||||
destination=self.queue._destination,
|
||||
edu_type=EduTypes.PRESENCE,
|
||||
content={
|
||||
"push": [
|
||||
format_user_presence_state(
|
||||
presence, self.queue._clock.time_msec()
|
||||
)
|
||||
for presence in self.queue._pending_presence.values()
|
||||
]
|
||||
},
|
||||
content={"push": presence_to_add},
|
||||
)
|
||||
)
|
||||
self.queue._pending_presence = {}
|
||||
|
||||
# Add read receipt EDUs.
|
||||
pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
|
||||
|
||||
@@ -43,6 +43,7 @@ import ijson
|
||||
|
||||
from synapse.api.constants import Direction, Membership
|
||||
from synapse.api.errors import Codes, HttpResponseException, SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.api.urls import (
|
||||
FEDERATION_UNSTABLE_PREFIX,
|
||||
@@ -819,9 +820,10 @@ class TransportLayerClient:
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
path = f"/_matrix/media/r0/download/{destination}/{media_id}"
|
||||
|
||||
return await self.client.get_file(
|
||||
destination,
|
||||
path,
|
||||
@@ -834,6 +836,8 @@ class TransportLayerClient:
|
||||
"allow_remote": "false",
|
||||
"timeout_ms": str(max_timeout_ms),
|
||||
},
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def download_media_v3(
|
||||
@@ -843,9 +847,10 @@ class TransportLayerClient:
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
path = f"/_matrix/media/v3/download/{destination}/{media_id}"
|
||||
|
||||
return await self.client.get_file(
|
||||
destination,
|
||||
path,
|
||||
@@ -862,6 +867,31 @@ class TransportLayerClient:
|
||||
"allow_redirect": "true",
|
||||
},
|
||||
follow_redirects=True,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def federation_download_media(
|
||||
self,
|
||||
destination: str,
|
||||
media_id: str,
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
|
||||
path = f"/_matrix/federation/v1/media/download/{media_id}"
|
||||
return await self.client.federation_get_file(
|
||||
destination,
|
||||
path,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
args={
|
||||
"timeout_ms": str(max_timeout_ms),
|
||||
},
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ from synapse.federation.transport.server._base import (
|
||||
from synapse.federation.transport.server.federation import (
|
||||
FEDERATION_SERVLET_CLASSES,
|
||||
FederationAccountStatusServlet,
|
||||
FederationMediaDownloadServlet,
|
||||
FederationUnstableClientKeysClaimServlet,
|
||||
)
|
||||
from synapse.http.server import HttpServer, JsonResource
|
||||
@@ -315,6 +316,10 @@ def register_servlets(
|
||||
):
|
||||
continue
|
||||
|
||||
if servletclass == FederationMediaDownloadServlet:
|
||||
if not hs.config.server.enable_media_repo:
|
||||
continue
|
||||
|
||||
servletclass(
|
||||
hs=hs,
|
||||
authenticator=authenticator,
|
||||
|
||||
@@ -360,13 +360,29 @@ class BaseFederationServlet:
|
||||
"request"
|
||||
)
|
||||
return None
|
||||
if (
|
||||
func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaDownloadServlet"
|
||||
):
|
||||
response = await func(
|
||||
origin, content, request, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = await func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
if (
|
||||
func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaDownloadServlet"
|
||||
):
|
||||
response = await func(
|
||||
origin, content, request, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = await func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = await func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
finally:
|
||||
# if we used the origin's context as the parent, add a new span using
|
||||
# the servlet span as a parent, so that we have a link
|
||||
|
||||
@@ -44,10 +44,13 @@ from synapse.federation.transport.server._base import (
|
||||
)
|
||||
from synapse.http.servlet import (
|
||||
parse_boolean_from_args,
|
||||
parse_integer,
|
||||
parse_integer_from_args,
|
||||
parse_string_from_args,
|
||||
parse_strings_from_args,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.media._base import DEFAULT_MAX_TIMEOUT_MS, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
@@ -787,6 +790,42 @@ class FederationAccountStatusServlet(BaseFederationServerServlet):
|
||||
return 200, {"account_statuses": statuses, "failures": failures}
|
||||
|
||||
|
||||
class FederationMediaDownloadServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
Implementation of new federation media `/download` endpoint outlined in MSC3916. Returns
|
||||
a multipart/mixed response consisting of a JSON object and the requested media
|
||||
item. This endpoint only returns local media.
|
||||
"""
|
||||
|
||||
PATH = "/media/download/(?P<media_id>[^/]*)"
|
||||
RATELIMIT = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
ratelimiter: FederationRateLimiter,
|
||||
authenticator: Authenticator,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.media_repo = self.hs.get_media_repository()
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: Optional[str],
|
||||
content: Literal[None],
|
||||
request: SynapseRequest,
|
||||
media_id: str,
|
||||
) -> None:
|
||||
max_timeout_ms = parse_integer(
|
||||
request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
|
||||
)
|
||||
max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
|
||||
await self.media_repo.get_local_media(
|
||||
request, media_id, None, max_timeout_ms, federation=True
|
||||
)
|
||||
|
||||
|
||||
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationSendServlet,
|
||||
FederationEventServlet,
|
||||
@@ -818,4 +857,5 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationV1SendKnockServlet,
|
||||
FederationMakeKnockServlet,
|
||||
FederationAccountStatusServlet,
|
||||
FederationMediaDownloadServlet,
|
||||
)
|
||||
|
||||
@@ -42,7 +42,6 @@ class AdminHandler:
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._state_storage_controller = self._storage_controllers.state
|
||||
self._hs_config = hs.config
|
||||
self._msc3866_enabled = hs.config.experimental.msc3866.enabled
|
||||
|
||||
async def get_whois(self, user: UserID) -> JsonMapping:
|
||||
@@ -126,13 +125,7 @@ class AdminHandler:
|
||||
# Get all rooms the user is in or has been in
|
||||
rooms = await self._store.get_rooms_for_local_user_where_membership_is(
|
||||
user_id,
|
||||
membership_list=(
|
||||
Membership.JOIN,
|
||||
Membership.LEAVE,
|
||||
Membership.BAN,
|
||||
Membership.INVITE,
|
||||
Membership.KNOCK,
|
||||
),
|
||||
membership_list=Membership.LIST,
|
||||
)
|
||||
|
||||
# We only try and fetch events for rooms the user has been in. If
|
||||
@@ -179,7 +172,7 @@ class AdminHandler:
|
||||
if room.membership == Membership.JOIN:
|
||||
stream_ordering = self._store.get_room_max_stream_ordering()
|
||||
else:
|
||||
stream_ordering = room.stream_ordering
|
||||
stream_ordering = room.event_pos.stream
|
||||
|
||||
from_key = RoomStreamToken(topological=0, stream=0)
|
||||
to_key = RoomStreamToken(stream=stream_ordering)
|
||||
@@ -221,7 +214,6 @@ class AdminHandler:
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
events,
|
||||
msc4115_membership_on_events=self._hs_config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
writer.write_events(room_id, events)
|
||||
|
||||
@@ -283,6 +283,10 @@ class DeactivateAccountHandler:
|
||||
ratelimit=False,
|
||||
require_consent=False,
|
||||
)
|
||||
|
||||
# Mark the room forgotten too, because they won't be able to do this
|
||||
# for us. This may lead to the room being purged eventually.
|
||||
await self._room_member_handler.forget(user, room_id)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to part user %r from room %r: ignoring and continuing",
|
||||
|
||||
@@ -236,6 +236,13 @@ class DeviceMessageHandler:
|
||||
local_messages = {}
|
||||
remote_messages: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
|
||||
for user_id, by_device in messages.items():
|
||||
if not UserID.is_valid(user_id):
|
||||
logger.warning(
|
||||
"Ignoring attempt to send device message to invalid user: %r",
|
||||
user_id,
|
||||
)
|
||||
continue
|
||||
|
||||
# add an opentracing log entry for each message
|
||||
for device_id, message_content in by_device.items():
|
||||
log_kv(
|
||||
|
||||
@@ -35,6 +35,7 @@ from synapse.api.errors import CodeMessageException, Codes, NotFoundError, Synap
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
|
||||
from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
@@ -45,7 +46,10 @@ from synapse.types import (
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
from synapse.util.retryutils import (
|
||||
NotRetryingDestination,
|
||||
filter_destinations_by_retry_limiter,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -53,6 +57,9 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
ONE_TIME_KEY_UPLOAD = "one_time_key_upload_lock"
|
||||
|
||||
|
||||
class E2eKeysHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.config = hs.config
|
||||
@@ -62,6 +69,7 @@ class E2eKeysHandler:
|
||||
self._appservice_handler = hs.get_application_service_handler()
|
||||
self.is_mine = hs.is_mine
|
||||
self.clock = hs.get_clock()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
||||
@@ -82,6 +90,12 @@ class E2eKeysHandler:
|
||||
edu_updater.incoming_signing_key_update,
|
||||
)
|
||||
|
||||
self.device_key_uploader = self.upload_device_keys_for_user
|
||||
else:
|
||||
self.device_key_uploader = (
|
||||
ReplicationUploadKeysForUserRestServlet.make_client(hs)
|
||||
)
|
||||
|
||||
# doesn't really work as part of the generic query API, because the
|
||||
# query request requires an object POST, but we abuse the
|
||||
# "query handler" interface.
|
||||
@@ -145,6 +159,11 @@ class E2eKeysHandler:
|
||||
remote_queries = {}
|
||||
|
||||
for user_id, device_ids in device_keys_query.items():
|
||||
if not UserID.is_valid(user_id):
|
||||
# Ignore invalid user IDs, which is the same behaviour as if
|
||||
# the user existed but had no keys.
|
||||
continue
|
||||
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if self.is_mine(UserID.from_string(user_id)):
|
||||
local_query[user_id] = device_ids
|
||||
@@ -259,10 +278,8 @@ class E2eKeysHandler:
|
||||
"%d destinations to query devices for", len(remote_queries_not_in_cache)
|
||||
)
|
||||
|
||||
async def _query(
|
||||
destination_queries: Tuple[str, Dict[str, Iterable[str]]]
|
||||
) -> None:
|
||||
destination, queries = destination_queries
|
||||
async def _query(destination: str) -> None:
|
||||
queries = remote_queries_not_in_cache[destination]
|
||||
return await self._query_devices_for_destination(
|
||||
results,
|
||||
cross_signing_keys,
|
||||
@@ -272,9 +289,20 @@ class E2eKeysHandler:
|
||||
timeout,
|
||||
)
|
||||
|
||||
# Only try and fetch keys for destinations that are not marked as
|
||||
# down.
|
||||
filtered_destinations = await filter_destinations_by_retry_limiter(
|
||||
remote_queries_not_in_cache.keys(),
|
||||
self.clock,
|
||||
self.store,
|
||||
# Let's give an arbitrary grace period for those hosts that are
|
||||
# only recently down
|
||||
retry_due_within_ms=60 * 1000,
|
||||
)
|
||||
|
||||
await concurrently_execute(
|
||||
_query,
|
||||
remote_queries_not_in_cache.items(),
|
||||
filtered_destinations,
|
||||
10,
|
||||
delay_cancellation=True,
|
||||
)
|
||||
@@ -775,36 +803,17 @@ class E2eKeysHandler:
|
||||
"one_time_keys": A mapping from algorithm to number of keys for that
|
||||
algorithm, including those previously persisted.
|
||||
"""
|
||||
# This can only be called from the main process.
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
device_keys = keys.get("device_keys", None)
|
||||
if device_keys:
|
||||
logger.info(
|
||||
"Updating device_keys for device %r for user %s at %d",
|
||||
device_id,
|
||||
user_id,
|
||||
time_now,
|
||||
await self.device_key_uploader(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
keys={"device_keys": device_keys},
|
||||
)
|
||||
log_kv(
|
||||
{
|
||||
"message": "Updating device_keys for user.",
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
}
|
||||
)
|
||||
# TODO: Sign the JSON with the server key
|
||||
changed = await self.store.set_e2e_device_keys(
|
||||
user_id, device_id, time_now, device_keys
|
||||
)
|
||||
if changed:
|
||||
# Only notify about device updates *if* the keys actually changed
|
||||
await self.device_handler.notify_device_update(user_id, [device_id])
|
||||
else:
|
||||
log_kv({"message": "Not updating device_keys for user", "user_id": user_id})
|
||||
|
||||
one_time_keys = keys.get("one_time_keys", None)
|
||||
if one_time_keys:
|
||||
log_kv(
|
||||
@@ -840,6 +849,49 @@ class E2eKeysHandler:
|
||||
{"message": "Did not update fallback_keys", "reason": "no keys given"}
|
||||
)
|
||||
|
||||
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
|
||||
set_tag("one_time_key_counts", str(result))
|
||||
return {"one_time_key_counts": result}
|
||||
|
||||
@tag_args
|
||||
async def upload_device_keys_for_user(
|
||||
self, user_id: str, device_id: str, keys: JsonDict
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
user_id: user whose keys are being uploaded.
|
||||
device_id: device whose keys are being uploaded.
|
||||
device_keys: the `device_keys` of an /keys/upload request.
|
||||
|
||||
"""
|
||||
# This can only be called from the main process.
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
device_keys = keys["device_keys"]
|
||||
logger.info(
|
||||
"Updating device_keys for device %r for user %s at %d",
|
||||
device_id,
|
||||
user_id,
|
||||
time_now,
|
||||
)
|
||||
log_kv(
|
||||
{
|
||||
"message": "Updating device_keys for user.",
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
}
|
||||
)
|
||||
# TODO: Sign the JSON with the server key
|
||||
changed = await self.store.set_e2e_device_keys(
|
||||
user_id, device_id, time_now, device_keys
|
||||
)
|
||||
if changed:
|
||||
# Only notify about device updates *if* the keys actually changed
|
||||
await self.device_handler.notify_device_update(user_id, [device_id])
|
||||
|
||||
# the device should have been registered already, but it may have been
|
||||
# deleted due to a race with a DELETE request. Or we may be using an
|
||||
# old access_token without an associated device_id. Either way, we
|
||||
@@ -847,53 +899,56 @@ class E2eKeysHandler:
|
||||
# keys without a corresponding device.
|
||||
await self.device_handler.check_device_registered(user_id, device_id)
|
||||
|
||||
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
|
||||
set_tag("one_time_key_counts", str(result))
|
||||
return {"one_time_key_counts": result}
|
||||
|
||||
async def _upload_one_time_keys_for_user(
|
||||
self, user_id: str, device_id: str, time_now: int, one_time_keys: JsonDict
|
||||
) -> None:
|
||||
logger.info(
|
||||
"Adding one_time_keys %r for device %r for user %r at %d",
|
||||
one_time_keys.keys(),
|
||||
device_id,
|
||||
user_id,
|
||||
time_now,
|
||||
)
|
||||
# We take out a lock so that we don't have to worry about a client
|
||||
# sending duplicate requests.
|
||||
lock_key = f"{user_id}_{device_id}"
|
||||
async with self._worker_lock_handler.acquire_lock(
|
||||
ONE_TIME_KEY_UPLOAD, lock_key
|
||||
):
|
||||
logger.info(
|
||||
"Adding one_time_keys %r for device %r for user %r at %d",
|
||||
one_time_keys.keys(),
|
||||
device_id,
|
||||
user_id,
|
||||
time_now,
|
||||
)
|
||||
|
||||
# make a list of (alg, id, key) tuples
|
||||
key_list = []
|
||||
for key_id, key_obj in one_time_keys.items():
|
||||
algorithm, key_id = key_id.split(":")
|
||||
key_list.append((algorithm, key_id, key_obj))
|
||||
# make a list of (alg, id, key) tuples
|
||||
key_list = []
|
||||
for key_id, key_obj in one_time_keys.items():
|
||||
algorithm, key_id = key_id.split(":")
|
||||
key_list.append((algorithm, key_id, key_obj))
|
||||
|
||||
# First we check if we have already persisted any of the keys.
|
||||
existing_key_map = await self.store.get_e2e_one_time_keys(
|
||||
user_id, device_id, [k_id for _, k_id, _ in key_list]
|
||||
)
|
||||
# First we check if we have already persisted any of the keys.
|
||||
existing_key_map = await self.store.get_e2e_one_time_keys(
|
||||
user_id, device_id, [k_id for _, k_id, _ in key_list]
|
||||
)
|
||||
|
||||
new_keys = [] # Keys that we need to insert. (alg, id, json) tuples.
|
||||
for algorithm, key_id, key in key_list:
|
||||
ex_json = existing_key_map.get((algorithm, key_id), None)
|
||||
if ex_json:
|
||||
if not _one_time_keys_match(ex_json, key):
|
||||
raise SynapseError(
|
||||
400,
|
||||
(
|
||||
"One time key %s:%s already exists. "
|
||||
"Old key: %s; new key: %r"
|
||||
new_keys = [] # Keys that we need to insert. (alg, id, json) tuples.
|
||||
for algorithm, key_id, key in key_list:
|
||||
ex_json = existing_key_map.get((algorithm, key_id), None)
|
||||
if ex_json:
|
||||
if not _one_time_keys_match(ex_json, key):
|
||||
raise SynapseError(
|
||||
400,
|
||||
(
|
||||
"One time key %s:%s already exists. "
|
||||
"Old key: %s; new key: %r"
|
||||
)
|
||||
% (algorithm, key_id, ex_json, key),
|
||||
)
|
||||
% (algorithm, key_id, ex_json, key),
|
||||
else:
|
||||
new_keys.append(
|
||||
(algorithm, key_id, encode_canonical_json(key).decode("ascii"))
|
||||
)
|
||||
else:
|
||||
new_keys.append(
|
||||
(algorithm, key_id, encode_canonical_json(key).decode("ascii"))
|
||||
)
|
||||
|
||||
log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
|
||||
await self.store.add_e2e_one_time_keys(user_id, device_id, time_now, new_keys)
|
||||
log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
|
||||
await self.store.add_e2e_one_time_keys(
|
||||
user_id, device_id, time_now, new_keys
|
||||
)
|
||||
|
||||
async def upload_signing_keys_for_user(
|
||||
self, user_id: str, keys: JsonDict
|
||||
|
||||
@@ -247,6 +247,12 @@ class E2eRoomKeysHandler:
|
||||
if current_room_key:
|
||||
if self._should_replace_room_key(current_room_key, room_key):
|
||||
log_kv({"message": "Replacing room key."})
|
||||
logger.debug(
|
||||
"Replacing room key. room=%s session=%s user=%s",
|
||||
room_id,
|
||||
session_id,
|
||||
user_id,
|
||||
)
|
||||
# updates are done one at a time in the DB, so send
|
||||
# updates right away rather than batching them up,
|
||||
# like we do with the inserts
|
||||
@@ -256,6 +262,12 @@ class E2eRoomKeysHandler:
|
||||
changed = True
|
||||
else:
|
||||
log_kv({"message": "Not replacing room_key."})
|
||||
logger.debug(
|
||||
"Not replacing room key. room=%s session=%s user=%s",
|
||||
room_id,
|
||||
session_id,
|
||||
user_id,
|
||||
)
|
||||
else:
|
||||
log_kv(
|
||||
{
|
||||
@@ -265,6 +277,12 @@ class E2eRoomKeysHandler:
|
||||
}
|
||||
)
|
||||
log_kv({"message": "Replacing room key."})
|
||||
logger.debug(
|
||||
"Inserting new room key. room=%s session=%s user=%s",
|
||||
room_id,
|
||||
session_id,
|
||||
user_id,
|
||||
)
|
||||
to_insert.append((room_id, session_id, room_key))
|
||||
changed = True
|
||||
|
||||
|
||||
@@ -148,7 +148,6 @@ class EventHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastores().main
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._config = hs.config
|
||||
|
||||
async def get_event(
|
||||
self,
|
||||
@@ -194,7 +193,6 @@ class EventHandler:
|
||||
user.to_string(),
|
||||
[event],
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
if not filtered:
|
||||
|
||||
@@ -199,7 +199,7 @@ class InitialSyncHandler:
|
||||
)
|
||||
elif event.membership == Membership.LEAVE:
|
||||
room_end_token = RoomStreamToken(
|
||||
stream=event.stream_ordering,
|
||||
stream=event.event_pos.stream,
|
||||
)
|
||||
deferred_room_state = run_in_background(
|
||||
self._state_storage_controller.get_state_for_events,
|
||||
@@ -224,7 +224,6 @@ class InitialSyncHandler:
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
messages,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
@@ -383,7 +382,6 @@ class InitialSyncHandler:
|
||||
requester.user.to_string(),
|
||||
messages,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
@@ -498,7 +496,6 @@ class InitialSyncHandler:
|
||||
requester.user.to_string(),
|
||||
messages,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
|
||||
@@ -201,7 +201,7 @@ class MessageHandler:
|
||||
|
||||
if at_token:
|
||||
last_event_id = (
|
||||
await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
await self.store.get_last_event_id_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=at_token.room_key,
|
||||
)
|
||||
@@ -496,13 +496,6 @@ class EventCreationHandler:
|
||||
|
||||
self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state
|
||||
|
||||
self.membership_types_to_include_profile_data_in = {
|
||||
Membership.JOIN,
|
||||
Membership.KNOCK,
|
||||
}
|
||||
if self.hs.config.server.include_profile_data_on_invite:
|
||||
self.membership_types_to_include_profile_data_in.add(Membership.INVITE)
|
||||
|
||||
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
|
||||
self.send_events = ReplicationSendEventsRestServlet.make_client(hs)
|
||||
|
||||
@@ -594,8 +587,6 @@ class EventCreationHandler:
|
||||
Creates an FrozenEvent object, filling out auth_events, prev_events,
|
||||
etc.
|
||||
|
||||
Adds display names to Join membership events.
|
||||
|
||||
Args:
|
||||
requester
|
||||
event_dict: An entire event
|
||||
@@ -651,6 +642,17 @@ class EventCreationHandler:
|
||||
"""
|
||||
await self.auth_blocking.check_auth_blocking(requester=requester)
|
||||
|
||||
if event_dict["type"] == EventTypes.Message:
|
||||
requester_suspended = await self.store.get_user_suspended_status(
|
||||
requester.user.to_string()
|
||||
)
|
||||
if requester_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Sending messages while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
|
||||
room_version_id = event_dict["content"]["room_version"]
|
||||
maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
||||
@@ -672,29 +674,6 @@ class EventCreationHandler:
|
||||
|
||||
self.validator.validate_builder(builder)
|
||||
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
target = UserID.from_string(builder.state_key)
|
||||
|
||||
if membership in self.membership_types_to_include_profile_data_in:
|
||||
# If event doesn't include a display name, add one.
|
||||
profile = self.profile_handler
|
||||
content = builder.content
|
||||
|
||||
try:
|
||||
if "displayname" not in content:
|
||||
displayname = await profile.get_displayname(target)
|
||||
if displayname is not None:
|
||||
content["displayname"] = displayname
|
||||
if "avatar_url" not in content:
|
||||
avatar_url = await profile.get_avatar_url(target)
|
||||
if avatar_url is not None:
|
||||
content["avatar_url"] = avatar_url
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information for %r: %s", target, e
|
||||
)
|
||||
|
||||
is_exempt = await self._is_exempt_from_privacy_policy(builder, requester)
|
||||
if require_consent and not is_exempt:
|
||||
await self.assert_accepted_privacy_policy(requester)
|
||||
@@ -1583,6 +1562,7 @@ class EventCreationHandler:
|
||||
# stream_ordering entry manually (as it was persisted on
|
||||
# another worker).
|
||||
event.internal_metadata.stream_ordering = stream_id
|
||||
event.internal_metadata.instance_name = writer_instance
|
||||
|
||||
return event
|
||||
|
||||
|
||||
@@ -27,7 +27,6 @@ from synapse.api.constants import Direction, EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.events.utils import SerializeEventConfig
|
||||
from synapse.handlers.room import ShutdownRoomParams, ShutdownRoomResponse
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -41,6 +40,7 @@ from synapse.types import (
|
||||
StreamKeyType,
|
||||
TaskStatus,
|
||||
)
|
||||
from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util.async_helpers import ReadWriteLock
|
||||
from synapse.visibility import filter_events_for_client
|
||||
@@ -623,7 +623,6 @@ class PaginationHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
# if after the filter applied there are no more events
|
||||
|
||||
@@ -590,7 +590,7 @@ class RegistrationHandler:
|
||||
# moving away from bare excepts is a good thing to do.
|
||||
logger.error("Failed to join new user to %r: %r", r, e)
|
||||
except Exception as e:
|
||||
logger.error("Failed to join new user to %r: %r", r, e)
|
||||
logger.error("Failed to join new user to %r: %r", r, e, exc_info=True)
|
||||
|
||||
async def _auto_join_rooms(self, user_id: str) -> None:
|
||||
"""Automatically joins users to auto join rooms - creating the room in the first place
|
||||
|
||||
@@ -95,7 +95,6 @@ class RelationsHandler:
|
||||
self._event_handler = hs.get_event_handler()
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self._event_creation_handler = hs.get_event_creation_handler()
|
||||
self._config = hs.config
|
||||
|
||||
async def get_relations(
|
||||
self,
|
||||
@@ -164,7 +163,6 @@ class RelationsHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
# The relations returned for the requested event do include their
|
||||
@@ -393,9 +391,9 @@ class RelationsHandler:
|
||||
|
||||
# Attempt to find another event to use as the latest event.
|
||||
potential_events, _ = await self._main_store.get_relations_for_event(
|
||||
room_id,
|
||||
event_id,
|
||||
event,
|
||||
room_id,
|
||||
RelationTypes.THREAD,
|
||||
direction=Direction.FORWARDS,
|
||||
)
|
||||
@@ -610,7 +608,6 @@ class RelationsHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
aggregations = await self.get_bundled_aggregations(
|
||||
|
||||
@@ -40,7 +40,6 @@ from typing import (
|
||||
)
|
||||
|
||||
import attr
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
import synapse.events.snapshot
|
||||
from synapse.api.constants import (
|
||||
@@ -88,6 +87,7 @@ from synapse.types import (
|
||||
UserID,
|
||||
create_requester,
|
||||
)
|
||||
from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
@@ -1476,7 +1476,6 @@ class RoomContextHandler:
|
||||
user.to_string(),
|
||||
events,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
event = await self.store.get_event(
|
||||
@@ -1780,63 +1779,6 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
|
||||
return self.store.get_current_room_stream_token_for_room_id(room_id)
|
||||
|
||||
|
||||
class ShutdownRoomParams(TypedDict):
|
||||
"""
|
||||
Attributes:
|
||||
requester_user_id:
|
||||
User who requested the action. Will be recorded as putting the room on the
|
||||
blocking list.
|
||||
new_room_user_id:
|
||||
If set, a new room will be created with this user ID
|
||||
as the creator and admin, and all users in the old room will be
|
||||
moved into that room. If not set, no new room will be created
|
||||
and the users will just be removed from the old room.
|
||||
new_room_name:
|
||||
A string representing the name of the room that new users will
|
||||
be invited to. Defaults to `Content Violation Notification`
|
||||
message:
|
||||
A string containing the first message that will be sent as
|
||||
`new_room_user_id` in the new room. Ideally this will clearly
|
||||
convey why the original room was shut down.
|
||||
Defaults to `Sharing illegal content on this server is not
|
||||
permitted and rooms in violation will be blocked.`
|
||||
block:
|
||||
If set to `true`, this room will be added to a blocking list,
|
||||
preventing future attempts to join the room. Defaults to `false`.
|
||||
purge:
|
||||
If set to `true`, purge the given room from the database.
|
||||
force_purge:
|
||||
If set to `true`, the room will be purged from database
|
||||
even if there are still users joined to the room.
|
||||
"""
|
||||
|
||||
requester_user_id: Optional[str]
|
||||
new_room_user_id: Optional[str]
|
||||
new_room_name: Optional[str]
|
||||
message: Optional[str]
|
||||
block: bool
|
||||
purge: bool
|
||||
force_purge: bool
|
||||
|
||||
|
||||
class ShutdownRoomResponse(TypedDict):
|
||||
"""
|
||||
Attributes:
|
||||
kicked_users: An array of users (`user_id`) that were kicked.
|
||||
failed_to_kick_users:
|
||||
An array of users (`user_id`) that that were not kicked.
|
||||
local_aliases:
|
||||
An array of strings representing the local aliases that were
|
||||
migrated from the old room to the new.
|
||||
new_room_id: A string representing the room ID of the new room.
|
||||
"""
|
||||
|
||||
kicked_users: List[str]
|
||||
failed_to_kick_users: List[str]
|
||||
local_aliases: List[str]
|
||||
new_room_id: Optional[str]
|
||||
|
||||
|
||||
class RoomShutdownHandler:
|
||||
DEFAULT_MESSAGE = (
|
||||
"Sharing illegal content on this server is not permitted and rooms in"
|
||||
|
||||
@@ -106,6 +106,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
self.event_auth_handler = hs.get_event_auth_handler()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
|
||||
self._membership_types_to_include_profile_data_in = {
|
||||
Membership.JOIN,
|
||||
Membership.KNOCK,
|
||||
}
|
||||
if self.hs.config.server.include_profile_data_on_invite:
|
||||
self._membership_types_to_include_profile_data_in.add(Membership.INVITE)
|
||||
|
||||
self.member_linearizer: Linearizer = Linearizer(name="member")
|
||||
self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter")
|
||||
|
||||
@@ -785,9 +792,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
if (
|
||||
not self.allow_per_room_profiles and not is_requester_server_notices_user
|
||||
) or requester.shadow_banned:
|
||||
# Strip profile data, knowing that new profile data will be added to the
|
||||
# event's content in event_creation_handler.create_event() using the target's
|
||||
# global profile.
|
||||
# Strip profile data, knowing that new profile data will be added to
|
||||
# the event's content below using the target's global profile.
|
||||
content.pop("displayname", None)
|
||||
content.pop("avatar_url", None)
|
||||
|
||||
@@ -823,6 +829,29 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
if action in ["kick", "unban"]:
|
||||
effective_membership_state = "leave"
|
||||
|
||||
if effective_membership_state not in Membership.LIST:
|
||||
raise SynapseError(400, "Invalid membership key")
|
||||
|
||||
# Add profile data for joins etc, if no per-room profile.
|
||||
if (
|
||||
effective_membership_state
|
||||
in self._membership_types_to_include_profile_data_in
|
||||
):
|
||||
# If event doesn't include a display name, add one.
|
||||
profile = self.profile_handler
|
||||
|
||||
try:
|
||||
if "displayname" not in content:
|
||||
displayname = await profile.get_displayname(target)
|
||||
if displayname is not None:
|
||||
content["displayname"] = displayname
|
||||
if "avatar_url" not in content:
|
||||
avatar_url = await profile.get_avatar_url(target)
|
||||
if avatar_url is not None:
|
||||
content["avatar_url"] = avatar_url
|
||||
except Exception as e:
|
||||
logger.info("Failed to get profile information for %r: %s", target, e)
|
||||
|
||||
# if this is a join with a 3pid signature, we may need to turn a 3pid
|
||||
# invite into a normal invite before we can handle the join.
|
||||
if third_party_signed is not None:
|
||||
|
||||
@@ -483,7 +483,6 @@ class SearchHandler:
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
filtered_events,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
events.sort(key=lambda e: -rank_map[e.event_id])
|
||||
@@ -585,7 +584,6 @@ class SearchHandler:
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
filtered_events,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
room_events.extend(events)
|
||||
@@ -673,14 +671,12 @@ class SearchHandler:
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
res.events_before,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
events_after = await filter_events_for_client(
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
res.events_after,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
context: JsonDict = {
|
||||
|
||||
1369
synapse/handlers/sliding_sync.py
Normal file
1369
synapse/handlers/sliding_sync.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -28,11 +28,14 @@ from typing import (
|
||||
Dict,
|
||||
FrozenSet,
|
||||
List,
|
||||
Literal,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
overload,
|
||||
)
|
||||
|
||||
import attr
|
||||
@@ -128,6 +131,8 @@ class SyncVersion(Enum):
|
||||
|
||||
# Traditional `/sync` endpoint
|
||||
SYNC_V2 = "sync_v2"
|
||||
# Part of MSC3575 Sliding Sync
|
||||
E2EE_SYNC = "e2ee_sync"
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
@@ -279,6 +284,47 @@ class SyncResult:
|
||||
or self.device_lists
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def empty(
|
||||
next_batch: StreamToken,
|
||||
device_one_time_keys_count: JsonMapping,
|
||||
device_unused_fallback_key_types: List[str],
|
||||
) -> "SyncResult":
|
||||
"Return a new empty result"
|
||||
return SyncResult(
|
||||
next_batch=next_batch,
|
||||
presence=[],
|
||||
account_data=[],
|
||||
joined=[],
|
||||
invited=[],
|
||||
knocked=[],
|
||||
archived=[],
|
||||
to_device=[],
|
||||
device_lists=DeviceListUpdates(),
|
||||
device_one_time_keys_count=device_one_time_keys_count,
|
||||
device_unused_fallback_key_types=device_unused_fallback_key_types,
|
||||
)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class E2eeSyncResult:
|
||||
"""
|
||||
Attributes:
|
||||
next_batch: Token for the next sync
|
||||
to_device: List of direct messages for the device.
|
||||
device_lists: List of user_ids whose devices have changed
|
||||
device_one_time_keys_count: Dict of algorithm to count for one time keys
|
||||
for this device
|
||||
device_unused_fallback_key_types: List of key types that have an unused fallback
|
||||
key
|
||||
"""
|
||||
|
||||
next_batch: StreamToken
|
||||
to_device: List[JsonDict]
|
||||
device_lists: DeviceListUpdates
|
||||
device_one_time_keys_count: JsonMapping
|
||||
device_unused_fallback_key_types: List[str]
|
||||
|
||||
|
||||
class SyncHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
@@ -322,6 +368,31 @@ class SyncHandler:
|
||||
|
||||
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
|
||||
|
||||
@overload
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.SYNC_V2],
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> SyncResult: ...
|
||||
|
||||
@overload
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.E2EE_SYNC],
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> E2eeSyncResult: ...
|
||||
|
||||
@overload
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
@@ -331,7 +402,18 @@ class SyncHandler:
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> SyncResult:
|
||||
) -> Union[SyncResult, E2eeSyncResult]: ...
|
||||
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
"""Get the sync for a client if we have new data for it now. Otherwise
|
||||
wait for new data to arrive on the server. If the timeout expires, then
|
||||
return an empty sync result.
|
||||
@@ -344,8 +426,10 @@ class SyncHandler:
|
||||
since_token: The point in the stream to sync from.
|
||||
timeout: How long to wait for new data to arrive before giving up.
|
||||
full_state: Whether to return the full state for each room.
|
||||
|
||||
Returns:
|
||||
When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
|
||||
When `SyncVersion.E2EE_SYNC`, returns a `E2eeSyncResult`.
|
||||
"""
|
||||
# If the user is not part of the mau group, then check that limits have
|
||||
# not been exceeded (if not part of the group by this point, almost certain
|
||||
@@ -366,6 +450,29 @@ class SyncHandler:
|
||||
logger.debug("Returning sync response for %s", user_id)
|
||||
return res
|
||||
|
||||
@overload
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.SYNC_V2],
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> SyncResult: ...
|
||||
|
||||
@overload
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.E2EE_SYNC],
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> E2eeSyncResult: ...
|
||||
|
||||
@overload
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
@@ -374,7 +481,17 @@ class SyncHandler:
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> SyncResult:
|
||||
) -> Union[SyncResult, E2eeSyncResult]: ...
|
||||
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
"""The start of the machinery that produces a /sync response.
|
||||
|
||||
See https://spec.matrix.org/v1.1/client-server-api/#syncing for full details.
|
||||
@@ -401,6 +518,45 @@ class SyncHandler:
|
||||
if context:
|
||||
context.tag = sync_label
|
||||
|
||||
if since_token is not None:
|
||||
# We need to make sure this worker has caught up with the token. If
|
||||
# this returns false it means we timed out waiting, and we should
|
||||
# just return an empty response.
|
||||
start = self.clock.time_msec()
|
||||
if not await self.notifier.wait_for_stream_token(since_token):
|
||||
logger.warning(
|
||||
"Timed out waiting for worker to catch up. Returning empty response"
|
||||
)
|
||||
device_id = sync_config.device_id
|
||||
one_time_keys_count: JsonMapping = {}
|
||||
unused_fallback_key_types: List[str] = []
|
||||
if device_id:
|
||||
user_id = sync_config.user.to_string()
|
||||
# TODO: We should have a way to let clients differentiate between the states of:
|
||||
# * no change in OTK count since the provided since token
|
||||
# * the server has zero OTKs left for this device
|
||||
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
|
||||
one_time_keys_count = await self.store.count_e2e_one_time_keys(
|
||||
user_id, device_id
|
||||
)
|
||||
unused_fallback_key_types = list(
|
||||
await self.store.get_e2e_unused_fallback_key_types(
|
||||
user_id, device_id
|
||||
)
|
||||
)
|
||||
|
||||
cache_context.should_cache = False # Don't cache empty responses
|
||||
return SyncResult.empty(
|
||||
since_token, one_time_keys_count, unused_fallback_key_types
|
||||
)
|
||||
|
||||
# If we've spent significant time waiting to catch up, take it off
|
||||
# the timeout.
|
||||
now = self.clock.time_msec()
|
||||
if now - start > 1_000:
|
||||
timeout -= now - start
|
||||
timeout = max(timeout, 0)
|
||||
|
||||
# if we have a since token, delete any to-device messages before that token
|
||||
# (since we now know that the device has received them)
|
||||
if since_token is not None:
|
||||
@@ -417,14 +573,16 @@ class SyncHandler:
|
||||
if timeout == 0 or since_token is None or full_state:
|
||||
# we are going to return immediately, so don't bother calling
|
||||
# notifier.wait_for_events.
|
||||
result: SyncResult = await self.current_sync_for_user(
|
||||
sync_config, sync_version, since_token, full_state=full_state
|
||||
result: Union[SyncResult, E2eeSyncResult] = (
|
||||
await self.current_sync_for_user(
|
||||
sync_config, sync_version, since_token, full_state=full_state
|
||||
)
|
||||
)
|
||||
else:
|
||||
# Otherwise, we wait for something to happen and report it to the user.
|
||||
async def current_sync_callback(
|
||||
before_token: StreamToken, after_token: StreamToken
|
||||
) -> SyncResult:
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
return await self.current_sync_for_user(
|
||||
sync_config, sync_version, since_token
|
||||
)
|
||||
@@ -456,14 +614,43 @@ class SyncHandler:
|
||||
|
||||
return result
|
||||
|
||||
@overload
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.SYNC_V2],
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> SyncResult: ...
|
||||
|
||||
@overload
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.E2EE_SYNC],
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> E2eeSyncResult: ...
|
||||
|
||||
@overload
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> SyncResult:
|
||||
"""Generates the response body of a sync result, represented as a SyncResult.
|
||||
) -> Union[SyncResult, E2eeSyncResult]: ...
|
||||
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
"""
|
||||
Generates the response body of a sync result, represented as a
|
||||
`SyncResult`/`E2eeSyncResult`.
|
||||
|
||||
This is a wrapper around `generate_sync_result` which starts an open tracing
|
||||
span to track the sync. See `generate_sync_result` for the next part of your
|
||||
@@ -474,15 +661,25 @@ class SyncHandler:
|
||||
sync_version: Determines what kind of sync response to generate.
|
||||
since_token: The point in the stream to sync from.p.
|
||||
full_state: Whether to return the full state for each room.
|
||||
|
||||
Returns:
|
||||
When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
|
||||
When `SyncVersion.E2EE_SYNC`, returns a `E2eeSyncResult`.
|
||||
"""
|
||||
with start_active_span("sync.current_sync_for_user"):
|
||||
log_kv({"since_token": since_token})
|
||||
|
||||
# Go through the `/sync` v2 path
|
||||
if sync_version == SyncVersion.SYNC_V2:
|
||||
sync_result: SyncResult = await self.generate_sync_result(
|
||||
sync_config, since_token, full_state
|
||||
sync_result: Union[SyncResult, E2eeSyncResult] = (
|
||||
await self.generate_sync_result(
|
||||
sync_config, since_token, full_state
|
||||
)
|
||||
)
|
||||
# Go through the MSC3575 Sliding Sync `/sync/e2ee` path
|
||||
elif sync_version == SyncVersion.E2EE_SYNC:
|
||||
sync_result = await self.generate_e2ee_sync_result(
|
||||
sync_config, since_token
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
@@ -647,7 +844,6 @@ class SyncHandler:
|
||||
sync_config.user.to_string(),
|
||||
recents,
|
||||
always_include_ids=current_state_ids,
|
||||
msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
log_kv({"recents_after_visibility_filtering": len(recents)})
|
||||
else:
|
||||
@@ -733,7 +929,6 @@ class SyncHandler:
|
||||
sync_config.user.to_string(),
|
||||
loaded_recents,
|
||||
always_include_ids=current_state_ids,
|
||||
msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
loaded_recents = []
|
||||
@@ -784,89 +979,6 @@ class SyncHandler:
|
||||
bundled_aggregations=bundled_aggregations,
|
||||
)
|
||||
|
||||
async def get_state_after_event(
|
||||
self,
|
||||
event_id: str,
|
||||
state_filter: Optional[StateFilter] = None,
|
||||
await_full_state: bool = True,
|
||||
) -> StateMap[str]:
|
||||
"""
|
||||
Get the room state after the given event
|
||||
|
||||
Args:
|
||||
event_id: event of interest
|
||||
state_filter: The state filter used to fetch state from the database.
|
||||
await_full_state: if `True`, will block if we do not yet have complete state
|
||||
at the event and `state_filter` is not satisfied by partial state.
|
||||
Defaults to `True`.
|
||||
"""
|
||||
state_ids = await self._state_storage_controller.get_state_ids_for_event(
|
||||
event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
# using get_metadata_for_events here (instead of get_event) sidesteps an issue
|
||||
# with redactions: if `event_id` is a redaction event, and we don't have the
|
||||
# original (possibly because it got purged), get_event will refuse to return
|
||||
# the redaction event, which isn't terribly helpful here.
|
||||
#
|
||||
# (To be fair, in that case we could assume it's *not* a state event, and
|
||||
# therefore we don't need to worry about it. But still, it seems cleaner just
|
||||
# to pull the metadata.)
|
||||
m = (await self.store.get_metadata_for_events([event_id]))[event_id]
|
||||
if m.state_key is not None and m.rejection_reason is None:
|
||||
state_ids = dict(state_ids)
|
||||
state_ids[(m.event_type, m.state_key)] = event_id
|
||||
|
||||
return state_ids
|
||||
|
||||
async def get_state_at(
|
||||
self,
|
||||
room_id: str,
|
||||
stream_position: StreamToken,
|
||||
state_filter: Optional[StateFilter] = None,
|
||||
await_full_state: bool = True,
|
||||
) -> StateMap[str]:
|
||||
"""Get the room state at a particular stream position
|
||||
|
||||
Args:
|
||||
room_id: room for which to get state
|
||||
stream_position: point at which to get state
|
||||
state_filter: The state filter used to fetch state from the database.
|
||||
await_full_state: if `True`, will block if we do not yet have complete state
|
||||
at the last event in the room before `stream_position` and
|
||||
`state_filter` is not satisfied by partial state. Defaults to `True`.
|
||||
"""
|
||||
# FIXME: This gets the state at the latest event before the stream ordering,
|
||||
# which might not be the same as the "current state" of the room at the time
|
||||
# of the stream token if there were multiple forward extremities at the time.
|
||||
last_event_id = await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=stream_position.room_key,
|
||||
)
|
||||
|
||||
if last_event_id:
|
||||
state = await self.get_state_after_event(
|
||||
last_event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
else:
|
||||
# no events in this room - so presumably no state
|
||||
state = {}
|
||||
|
||||
# (erikj) This should be rarely hit, but we've had some reports that
|
||||
# we get more state down gappy syncs than we should, so let's add
|
||||
# some logging.
|
||||
logger.info(
|
||||
"Failed to find any events in room %s at %s",
|
||||
room_id,
|
||||
stream_position.room_key,
|
||||
)
|
||||
return state
|
||||
|
||||
async def compute_summary(
|
||||
self,
|
||||
room_id: str,
|
||||
@@ -1240,7 +1352,7 @@ class SyncHandler:
|
||||
await_full_state = True
|
||||
lazy_load_members = False
|
||||
|
||||
state_at_timeline_end = await self.get_state_at(
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
@@ -1324,7 +1436,7 @@ class SyncHandler:
|
||||
# We need to make sure the first event in our batch points to the
|
||||
# last event in the previous batch.
|
||||
last_event_id_prev_batch = (
|
||||
await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
await self.store.get_last_event_id_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=since_token.room_key,
|
||||
)
|
||||
@@ -1368,11 +1480,13 @@ class SyncHandler:
|
||||
else:
|
||||
# We can get here if the user has ignored the senders of all
|
||||
# the recent events.
|
||||
state_at_timeline_start = await self.get_state_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
state_at_timeline_start = (
|
||||
await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
)
|
||||
|
||||
if batch.limited:
|
||||
@@ -1390,14 +1504,14 @@ class SyncHandler:
|
||||
# about them).
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
state_at_previous_sync = await self.get_state_at(
|
||||
state_at_previous_sync = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=since_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
state_at_timeline_end = await self.get_state_at(
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
@@ -1691,6 +1805,96 @@ class SyncHandler:
|
||||
next_batch=sync_result_builder.now_token,
|
||||
)
|
||||
|
||||
async def generate_e2ee_sync_result(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
) -> E2eeSyncResult:
|
||||
"""
|
||||
Generates the response body of a MSC3575 Sliding Sync `/sync/e2ee` result.
|
||||
|
||||
This is represented by a `E2eeSyncResult` struct, which is built from small
|
||||
pieces using a `SyncResultBuilder`. The `sync_result_builder` is passed as a
|
||||
mutable ("inout") parameter to various helper functions. These retrieve and
|
||||
process the data which forms the sync body, often writing to the
|
||||
`sync_result_builder` to store their output.
|
||||
|
||||
At the end, we transfer data from the `sync_result_builder` to a new `E2eeSyncResult`
|
||||
instance to signify that the sync calculation is complete.
|
||||
"""
|
||||
user_id = sync_config.user.to_string()
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service:
|
||||
# We no longer support AS users using /sync directly.
|
||||
# See https://github.com/matrix-org/matrix-doc/issues/1144
|
||||
raise NotImplementedError()
|
||||
|
||||
sync_result_builder = await self.get_sync_result_builder(
|
||||
sync_config,
|
||||
since_token,
|
||||
full_state=False,
|
||||
)
|
||||
|
||||
# 1. Calculate `to_device` events
|
||||
await self._generate_sync_entry_for_to_device(sync_result_builder)
|
||||
|
||||
# 2. Calculate `device_lists`
|
||||
# Device list updates are sent if a since token is provided.
|
||||
device_lists = DeviceListUpdates()
|
||||
include_device_list_updates = bool(since_token and since_token.device_list_key)
|
||||
if include_device_list_updates:
|
||||
# Note that _generate_sync_entry_for_rooms sets sync_result_builder.joined, which
|
||||
# is used in calculate_user_changes below.
|
||||
#
|
||||
# TODO: Running `_generate_sync_entry_for_rooms()` is a lot of work just to
|
||||
# figure out the membership changes/derived info needed for
|
||||
# `_generate_sync_entry_for_device_list()`. In the future, we should try to
|
||||
# refactor this away.
|
||||
(
|
||||
newly_joined_rooms,
|
||||
newly_left_rooms,
|
||||
) = await self._generate_sync_entry_for_rooms(sync_result_builder)
|
||||
|
||||
# This uses the sync_result_builder.joined which is set in
|
||||
# `_generate_sync_entry_for_rooms`, if that didn't find any joined
|
||||
# rooms for some reason it is a no-op.
|
||||
(
|
||||
newly_joined_or_invited_or_knocked_users,
|
||||
newly_left_users,
|
||||
) = sync_result_builder.calculate_user_changes()
|
||||
|
||||
device_lists = await self._generate_sync_entry_for_device_list(
|
||||
sync_result_builder,
|
||||
newly_joined_rooms=newly_joined_rooms,
|
||||
newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
|
||||
newly_left_rooms=newly_left_rooms,
|
||||
newly_left_users=newly_left_users,
|
||||
)
|
||||
|
||||
# 3. Calculate `device_one_time_keys_count` and `device_unused_fallback_key_types`
|
||||
device_id = sync_config.device_id
|
||||
one_time_keys_count: JsonMapping = {}
|
||||
unused_fallback_key_types: List[str] = []
|
||||
if device_id:
|
||||
# TODO: We should have a way to let clients differentiate between the states of:
|
||||
# * no change in OTK count since the provided since token
|
||||
# * the server has zero OTKs left for this device
|
||||
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
|
||||
one_time_keys_count = await self.store.count_e2e_one_time_keys(
|
||||
user_id, device_id
|
||||
)
|
||||
unused_fallback_key_types = list(
|
||||
await self.store.get_e2e_unused_fallback_key_types(user_id, device_id)
|
||||
)
|
||||
|
||||
return E2eeSyncResult(
|
||||
to_device=sync_result_builder.to_device,
|
||||
device_lists=device_lists,
|
||||
device_one_time_keys_count=one_time_keys_count,
|
||||
device_unused_fallback_key_types=unused_fallback_key_types,
|
||||
next_batch=sync_result_builder.now_token,
|
||||
)
|
||||
|
||||
async def get_sync_result_builder(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
@@ -1715,7 +1919,7 @@ class SyncHandler:
|
||||
"""
|
||||
user_id = sync_config.user.to_string()
|
||||
|
||||
# Note: we get the users room list *before* we get the current token, this
|
||||
# Note: we get the users room list *before* we get the `now_token`, this
|
||||
# avoids checking back in history if rooms are joined after the token is fetched.
|
||||
token_before_rooms = self.event_sources.get_current_token()
|
||||
mutable_joined_room_ids = set(await self.store.get_rooms_for_user(user_id))
|
||||
@@ -1727,10 +1931,10 @@ class SyncHandler:
|
||||
now_token = self.event_sources.get_current_token()
|
||||
log_kv({"now_token": now_token})
|
||||
|
||||
# Since we fetched the users room list before the token, there's a small window
|
||||
# during which membership events may have been persisted, so we fetch these now
|
||||
# and modify the joined room list for any changes between the get_rooms_for_user
|
||||
# call and the get_current_token call.
|
||||
# Since we fetched the users room list before calculating the `now_token` (see
|
||||
# above), there's a small window during which membership events may have been
|
||||
# persisted, so we fetch these now and modify the joined room list for any
|
||||
# changes between the get_rooms_for_user call and the get_current_token call.
|
||||
membership_change_events = []
|
||||
if since_token:
|
||||
membership_change_events = await self.store.get_membership_changes_for_user(
|
||||
@@ -1740,16 +1944,19 @@ class SyncHandler:
|
||||
self.rooms_to_exclude_globally,
|
||||
)
|
||||
|
||||
mem_last_change_by_room_id: Dict[str, EventBase] = {}
|
||||
last_membership_change_by_room_id: Dict[str, EventBase] = {}
|
||||
for event in membership_change_events:
|
||||
mem_last_change_by_room_id[event.room_id] = event
|
||||
last_membership_change_by_room_id[event.room_id] = event
|
||||
|
||||
# For the latest membership event in each room found, add/remove the room ID
|
||||
# from the joined room list accordingly. In this case we only care if the
|
||||
# latest change is JOIN.
|
||||
|
||||
for room_id, event in mem_last_change_by_room_id.items():
|
||||
for room_id, event in last_membership_change_by_room_id.items():
|
||||
assert event.internal_metadata.stream_ordering
|
||||
# As a shortcut, skip any events that happened before we got our
|
||||
# `get_rooms_for_user()` snapshot (any changes are already represented
|
||||
# in that list).
|
||||
if (
|
||||
event.internal_metadata.stream_ordering
|
||||
< token_before_rooms.room_key.stream
|
||||
@@ -1889,7 +2096,7 @@ class SyncHandler:
|
||||
users_that_have_changed = (
|
||||
await self._device_handler.get_device_changes_in_shared_rooms(
|
||||
user_id,
|
||||
sync_result_builder.joined_room_ids,
|
||||
joined_room_ids,
|
||||
from_token=since_token,
|
||||
now_token=sync_result_builder.now_token,
|
||||
)
|
||||
@@ -2303,7 +2510,7 @@ class SyncHandler:
|
||||
continue
|
||||
|
||||
if room_id in sync_result_builder.joined_room_ids or has_join:
|
||||
old_state_ids = await self.get_state_at(
|
||||
old_state_ids = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types([(EventTypes.Member, user_id)]),
|
||||
@@ -2333,12 +2540,14 @@ class SyncHandler:
|
||||
newly_left_rooms.append(room_id)
|
||||
else:
|
||||
if not old_state_ids:
|
||||
old_state_ids = await self.get_state_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types(
|
||||
[(EventTypes.Member, user_id)]
|
||||
),
|
||||
old_state_ids = (
|
||||
await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types(
|
||||
[(EventTypes.Member, user_id)]
|
||||
),
|
||||
)
|
||||
)
|
||||
old_mem_ev_id = old_state_ids.get(
|
||||
(EventTypes.Member, user_id), None
|
||||
@@ -2543,7 +2752,7 @@ class SyncHandler:
|
||||
continue
|
||||
|
||||
leave_token = now_token.copy_and_replace(
|
||||
StreamKeyType.ROOM, RoomStreamToken(stream=event.stream_ordering)
|
||||
StreamKeyType.ROOM, RoomStreamToken(stream=event.event_pos.stream)
|
||||
)
|
||||
room_entries.append(
|
||||
RoomSyncResultBuilder(
|
||||
|
||||
@@ -477,9 +477,9 @@ class TypingWriterHandler(FollowerTypingHandler):
|
||||
|
||||
rows = []
|
||||
for room_id in changed_rooms:
|
||||
serial = self._room_serials[room_id]
|
||||
if last_id < serial <= current_id:
|
||||
typing = self._room_typing[room_id]
|
||||
serial = self._room_serials.get(room_id)
|
||||
if serial and last_id < serial <= current_id:
|
||||
typing = self._room_typing.get(room_id, set())
|
||||
rows.append((serial, [room_id, list(typing)]))
|
||||
rows.sort()
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ from typing import (
|
||||
Union,
|
||||
)
|
||||
|
||||
import attr
|
||||
import multipart
|
||||
import treq
|
||||
from canonicaljson import encode_canonical_json
|
||||
from netaddr import AddrFormatError, IPAddress, IPSet
|
||||
@@ -1006,6 +1008,130 @@ class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
self._maybe_fail()
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True, slots=True)
|
||||
class MultipartResponse:
|
||||
"""
|
||||
A small class to hold parsed values of a multipart response.
|
||||
"""
|
||||
|
||||
json: bytes = b"{}"
|
||||
length: Optional[int] = None
|
||||
content_type: Optional[bytes] = None
|
||||
disposition: Optional[bytes] = None
|
||||
url: Optional[bytes] = None
|
||||
|
||||
|
||||
class _MultipartParserProtocol(protocol.Protocol):
|
||||
"""
|
||||
Protocol to read and parse a MSC3916 multipart/mixed response
|
||||
"""
|
||||
|
||||
transport: Optional[ITCPTransport] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stream: ByteWriteable,
|
||||
deferred: defer.Deferred,
|
||||
boundary: str,
|
||||
max_length: Optional[int],
|
||||
) -> None:
|
||||
self.stream = stream
|
||||
self.deferred = deferred
|
||||
self.boundary = boundary
|
||||
self.max_length = max_length
|
||||
self.parser = None
|
||||
self.multipart_response = MultipartResponse()
|
||||
self.has_redirect = False
|
||||
self.in_json = False
|
||||
self.json_done = False
|
||||
self.file_length = 0
|
||||
self.total_length = 0
|
||||
self.in_disposition = False
|
||||
self.in_content_type = False
|
||||
|
||||
def dataReceived(self, incoming_data: bytes) -> None:
|
||||
if self.deferred.called:
|
||||
return
|
||||
|
||||
# we don't have a parser yet, instantiate it
|
||||
if not self.parser:
|
||||
|
||||
def on_header_field(data: bytes, start: int, end: int) -> None:
|
||||
if data[start:end] == b"Location":
|
||||
self.has_redirect = True
|
||||
if data[start:end] == b"Content-Disposition":
|
||||
self.in_disposition = True
|
||||
if data[start:end] == b"Content-Type":
|
||||
self.in_content_type = True
|
||||
|
||||
def on_header_value(data: bytes, start: int, end: int) -> None:
|
||||
# the first header should be content-type for application/json
|
||||
if not self.in_json and not self.json_done:
|
||||
assert data[start:end] == b"application/json"
|
||||
self.in_json = True
|
||||
elif self.has_redirect:
|
||||
self.multipart_response.url = data[start:end]
|
||||
elif self.in_content_type:
|
||||
self.multipart_response.content_type = data[start:end]
|
||||
self.in_content_type = False
|
||||
elif self.in_disposition:
|
||||
self.multipart_response.disposition = data[start:end]
|
||||
self.in_disposition = False
|
||||
|
||||
def on_part_data(data: bytes, start: int, end: int) -> None:
|
||||
# we've seen json header but haven't written the json data
|
||||
if self.in_json and not self.json_done:
|
||||
self.multipart_response.json = data[start:end]
|
||||
self.json_done = True
|
||||
# we have a redirect header rather than a file, and have already captured it
|
||||
elif self.has_redirect:
|
||||
return
|
||||
# otherwise we are in the file part
|
||||
else:
|
||||
logger.info("Writing multipart file data to stream")
|
||||
try:
|
||||
self.stream.write(data[start:end])
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Exception encountered writing file data to stream: {e}"
|
||||
)
|
||||
self.deferred.errback()
|
||||
self.file_length += end - start
|
||||
|
||||
callbacks = {
|
||||
"on_header_field": on_header_field,
|
||||
"on_header_value": on_header_value,
|
||||
"on_part_data": on_part_data,
|
||||
}
|
||||
self.parser = multipart.MultipartParser(self.boundary, callbacks)
|
||||
|
||||
self.total_length += len(incoming_data)
|
||||
if self.max_length is not None and self.total_length >= self.max_length:
|
||||
self.deferred.errback(BodyExceededMaxSize())
|
||||
# Close the connection (forcefully) since all the data will get
|
||||
# discarded anyway.
|
||||
assert self.transport is not None
|
||||
self.transport.abortConnection()
|
||||
|
||||
try:
|
||||
self.parser.write(incoming_data) # type: ignore[attr-defined]
|
||||
except Exception as e:
|
||||
logger.warning(f"Exception writing to multipart parser: {e}")
|
||||
self.deferred.errback()
|
||||
return
|
||||
|
||||
def connectionLost(self, reason: Failure = connectionDone) -> None:
|
||||
# If the maximum size was already exceeded, there's nothing to do.
|
||||
if self.deferred.called:
|
||||
return
|
||||
|
||||
if reason.check(ResponseDone):
|
||||
self.multipart_response.length = self.file_length
|
||||
self.deferred.callback(self.multipart_response)
|
||||
else:
|
||||
self.deferred.errback(reason)
|
||||
|
||||
|
||||
class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
"""A protocol which reads body to a stream, erroring if the body exceeds a maximum size."""
|
||||
|
||||
@@ -1091,6 +1217,32 @@ def read_body_with_max_size(
|
||||
return d
|
||||
|
||||
|
||||
def read_multipart_response(
|
||||
response: IResponse, stream: ByteWriteable, boundary: str, max_length: Optional[int]
|
||||
) -> "defer.Deferred[MultipartResponse]":
|
||||
"""
|
||||
Reads a MSC3916 multipart/mixed response and parses it, reading the file part (if it contains one) into
|
||||
the stream passed in and returning a deferred resolving to a MultipartResponse
|
||||
|
||||
Args:
|
||||
response: The HTTP response to read from.
|
||||
stream: The file-object to write to.
|
||||
boundary: the multipart/mixed boundary string
|
||||
max_length: maximum allowable length of the response
|
||||
"""
|
||||
d: defer.Deferred[MultipartResponse] = defer.Deferred()
|
||||
|
||||
# If the Content-Length header gives a size larger than the maximum allowed
|
||||
# size, do not bother downloading the body.
|
||||
if max_length is not None and response.length != UNKNOWN_LENGTH:
|
||||
if response.length > max_length:
|
||||
response.deliverBody(_DiscardBodyWithMaxSizeProtocol(d))
|
||||
return d
|
||||
|
||||
response.deliverBody(_MultipartParserProtocol(stream, d, boundary, max_length))
|
||||
return d
|
||||
|
||||
|
||||
def encode_query_args(args: Optional[QueryParams]) -> bytes:
|
||||
"""
|
||||
Encodes a map of query arguments to bytes which can be appended to a URL.
|
||||
|
||||
@@ -57,7 +57,7 @@ from twisted.internet.interfaces import IReactorTime
|
||||
from twisted.internet.task import Cooperator
|
||||
from twisted.web.client import ResponseFailed
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.web.iweb import IAgent, IBodyProducer, IResponse
|
||||
from twisted.web.iweb import UNKNOWN_LENGTH, IAgent, IBodyProducer, IResponse
|
||||
|
||||
import synapse.metrics
|
||||
import synapse.util.retryutils
|
||||
@@ -68,15 +68,18 @@ from synapse.api.errors import (
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.crypto.context_factory import FederationPolicyForHTTPS
|
||||
from synapse.http import QuieterFileBodyProducer
|
||||
from synapse.http.client import (
|
||||
BlocklistingAgentWrapper,
|
||||
BodyExceededMaxSize,
|
||||
ByteWriteable,
|
||||
SimpleHttpClient,
|
||||
_make_scheduler,
|
||||
encode_query_args,
|
||||
read_body_with_max_size,
|
||||
read_multipart_response,
|
||||
)
|
||||
from synapse.http.connectproxyclient import BearerProxyCredentials
|
||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||
@@ -465,6 +468,13 @@ class MatrixFederationHttpClient:
|
||||
|
||||
self._sleeper = AwakenableSleeper(self.reactor)
|
||||
|
||||
self._simple_http_client = SimpleHttpClient(
|
||||
hs,
|
||||
ip_blocklist=hs.config.server.federation_ip_range_blocklist,
|
||||
ip_allowlist=hs.config.server.federation_ip_range_allowlist,
|
||||
use_proxy=True,
|
||||
)
|
||||
|
||||
def wake_destination(self, destination: str) -> None:
|
||||
"""Called when the remote server may have come back online."""
|
||||
|
||||
@@ -1411,9 +1421,11 @@ class MatrixFederationHttpClient:
|
||||
destination: str,
|
||||
path: str,
|
||||
output_stream: BinaryIO,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
max_size: int,
|
||||
args: Optional[QueryParams] = None,
|
||||
retry_on_dns_fail: bool = True,
|
||||
max_size: Optional[int] = None,
|
||||
ignore_backoff: bool = False,
|
||||
follow_redirects: bool = False,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
@@ -1422,6 +1434,10 @@ class MatrixFederationHttpClient:
|
||||
destination: The remote server to send the HTTP request to.
|
||||
path: The HTTP path to GET.
|
||||
output_stream: File to write the response body to.
|
||||
download_ratelimiter: a ratelimiter to limit remote media downloads, keyed to
|
||||
requester IP
|
||||
ip_address: IP address of the requester
|
||||
max_size: maximum allowable size in bytes of the file
|
||||
args: Optional dictionary used to create the query string.
|
||||
ignore_backoff: true to ignore the historical backoff data
|
||||
and try the request anyway.
|
||||
@@ -1441,11 +1457,27 @@ class MatrixFederationHttpClient:
|
||||
federation whitelist
|
||||
RequestSendFailed: If there were problems connecting to the
|
||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||
SynapseError: If the requested file exceeds ratelimits
|
||||
"""
|
||||
request = MatrixFederationRequest(
|
||||
method="GET", destination=destination, path=path, query=args
|
||||
)
|
||||
|
||||
# check for a minimum balance of 1MiB in ratelimiter before initiating request
|
||||
send_req, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None, key=ip_address, n_actions=1048576, update=False
|
||||
)
|
||||
|
||||
if not send_req:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
|
||||
response = await self._send_request(
|
||||
request,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
@@ -1455,12 +1487,36 @@ class MatrixFederationHttpClient:
|
||||
|
||||
headers = dict(response.headers.getAllRawHeaders())
|
||||
|
||||
expected_size = response.length
|
||||
# if we don't get an expected length then use the max length
|
||||
if expected_size == UNKNOWN_LENGTH:
|
||||
expected_size = max_size
|
||||
logger.debug(
|
||||
f"File size unknown, assuming file is max allowable size: {max_size}"
|
||||
)
|
||||
|
||||
read_body, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None,
|
||||
key=ip_address,
|
||||
n_actions=expected_size,
|
||||
)
|
||||
if not read_body:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
|
||||
try:
|
||||
d = read_body_with_max_size(response, output_stream, max_size)
|
||||
# add a byte of headroom to max size as function errs at >=
|
||||
d = read_body_with_max_size(response, output_stream, expected_size + 1)
|
||||
d.addTimeout(self.default_timeout_seconds, self.reactor)
|
||||
length = await make_deferred_yieldable(d)
|
||||
except BodyExceededMaxSize:
|
||||
msg = "Requested file is too large > %r bytes" % (max_size,)
|
||||
msg = "Requested file is too large > %r bytes" % (expected_size,)
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
@@ -1506,6 +1562,189 @@ class MatrixFederationHttpClient:
|
||||
)
|
||||
return length, headers
|
||||
|
||||
async def federation_get_file(
|
||||
self,
|
||||
destination: str,
|
||||
path: str,
|
||||
output_stream: BinaryIO,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
max_size: int,
|
||||
args: Optional[QueryParams] = None,
|
||||
retry_on_dns_fail: bool = True,
|
||||
ignore_backoff: bool = False,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
|
||||
"""GETs a file from a given homeserver over the federation /download endpoint
|
||||
Args:
|
||||
destination: The remote server to send the HTTP request to.
|
||||
path: The HTTP path to GET.
|
||||
output_stream: File to write the response body to.
|
||||
download_ratelimiter: a ratelimiter to limit remote media downloads, keyed to
|
||||
requester IP
|
||||
ip_address: IP address of the requester
|
||||
max_size: maximum allowable size in bytes of the file
|
||||
args: Optional dictionary used to create the query string.
|
||||
ignore_backoff: true to ignore the historical backoff data
|
||||
and try the request anyway.
|
||||
|
||||
Returns:
|
||||
Resolves to an (int, dict, bytes) tuple of
|
||||
the file length, a dict of the response headers, and the file json
|
||||
|
||||
Raises:
|
||||
HttpResponseException: If we get an HTTP response code >= 300
|
||||
(except 429).
|
||||
NotRetryingDestination: If we are not yet ready to retry this
|
||||
server.
|
||||
FederationDeniedError: If this destination is not on our
|
||||
federation whitelist
|
||||
RequestSendFailed: If there were problems connecting to the
|
||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||
SynapseError: If the requested file exceeds ratelimits or the response from the
|
||||
remote server is not a multipart response
|
||||
AssertionError: if the resolved multipart response's length is None
|
||||
"""
|
||||
request = MatrixFederationRequest(
|
||||
method="GET", destination=destination, path=path, query=args
|
||||
)
|
||||
|
||||
# check for a minimum balance of 1MiB in ratelimiter before initiating request
|
||||
send_req, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None, key=ip_address, n_actions=1048576, update=False
|
||||
)
|
||||
|
||||
if not send_req:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
|
||||
response = await self._send_request(
|
||||
request,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
ignore_backoff=ignore_backoff,
|
||||
)
|
||||
|
||||
headers = dict(response.headers.getAllRawHeaders())
|
||||
|
||||
expected_size = response.length
|
||||
# if we don't get an expected length then use the max length
|
||||
if expected_size == UNKNOWN_LENGTH:
|
||||
expected_size = max_size
|
||||
logger.debug(
|
||||
f"File size unknown, assuming file is max allowable size: {max_size}"
|
||||
)
|
||||
|
||||
read_body, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None,
|
||||
key=ip_address,
|
||||
n_actions=expected_size,
|
||||
)
|
||||
if not read_body:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
|
||||
# this should be a multipart/mixed response with the boundary string in the header
|
||||
try:
|
||||
raw_content_type = headers.get(b"Content-Type")
|
||||
assert raw_content_type is not None
|
||||
content_type = raw_content_type[0].decode("UTF-8")
|
||||
content_type_parts = content_type.split("boundary=")
|
||||
boundary = content_type_parts[1]
|
||||
except Exception:
|
||||
msg = "Remote response is malformed: expected Content-Type of multipart/mixed with a boundary present."
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg)
|
||||
|
||||
try:
|
||||
# add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >=
|
||||
deferred = read_multipart_response(
|
||||
response, output_stream, boundary, expected_size + 1
|
||||
)
|
||||
deferred.addTimeout(self.default_timeout_seconds, self.reactor)
|
||||
except BodyExceededMaxSize:
|
||||
msg = "Requested file is too large > %r bytes" % (expected_size,)
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
|
||||
except defer.TimeoutError as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Timed out reading response - %s %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
raise RequestSendFailed(e, can_retry=True) from e
|
||||
except ResponseFailed as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Failed to read response - %s %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
raise RequestSendFailed(e, can_retry=True) from e
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Error reading response: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
e,
|
||||
)
|
||||
raise
|
||||
|
||||
multipart_response = await make_deferred_yieldable(deferred)
|
||||
if not multipart_response.url:
|
||||
assert multipart_response.length is not None
|
||||
length = multipart_response.length
|
||||
headers[b"Content-Type"] = [multipart_response.content_type]
|
||||
headers[b"Content-Disposition"] = [multipart_response.disposition]
|
||||
|
||||
# the response contained a redirect url to download the file from
|
||||
else:
|
||||
str_url = multipart_response.url.decode("utf-8")
|
||||
logger.info(
|
||||
"{%s} [%s] File download redirected, now downloading from: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
str_url,
|
||||
)
|
||||
length, headers, _, _ = await self._simple_http_client.get_file(
|
||||
str_url, output_stream, expected_size
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"{%s} [%s] Completed: %d %s [%d bytes] %s %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
response.code,
|
||||
response.phrase.decode("ascii", errors="replace"),
|
||||
length,
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
return length, headers, multipart_response.json
|
||||
|
||||
|
||||
def _flatten_response_never_received(e: BaseException) -> str:
|
||||
if hasattr(e, "reasons"):
|
||||
|
||||
@@ -119,14 +119,15 @@ def parse_integer(
|
||||
default: value to use if the parameter is absent, defaults to None.
|
||||
required: whether to raise a 400 SynapseError if the parameter is absent,
|
||||
defaults to False.
|
||||
negative: whether to allow negative integers, defaults to True.
|
||||
negative: whether to allow negative integers, defaults to False (disallowing
|
||||
negatives).
|
||||
Returns:
|
||||
An int value or the default.
|
||||
|
||||
Raises:
|
||||
SynapseError: if the parameter is absent and required, if the
|
||||
parameter is present and not an integer, or if the
|
||||
parameter is illegitimate negative.
|
||||
parameter is illegitimately negative.
|
||||
"""
|
||||
args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore
|
||||
return parse_integer_from_args(args, name, default, required, negative)
|
||||
@@ -164,7 +165,7 @@ def parse_integer_from_args(
|
||||
name: str,
|
||||
default: Optional[int] = None,
|
||||
required: bool = False,
|
||||
negative: bool = True,
|
||||
negative: bool = False,
|
||||
) -> Optional[int]:
|
||||
"""Parse an integer parameter from the request string
|
||||
|
||||
@@ -174,7 +175,8 @@ def parse_integer_from_args(
|
||||
default: value to use if the parameter is absent, defaults to None.
|
||||
required: whether to raise a 400 SynapseError if the parameter is absent,
|
||||
defaults to False.
|
||||
negative: whether to allow negative integers, defaults to True.
|
||||
negative: whether to allow negative integers, defaults to False (disallowing
|
||||
negatives).
|
||||
|
||||
Returns:
|
||||
An int value or the default.
|
||||
@@ -182,7 +184,7 @@ def parse_integer_from_args(
|
||||
Raises:
|
||||
SynapseError: if the parameter is absent and required, if the
|
||||
parameter is present and not an integer, or if the
|
||||
parameter is illegitimate negative.
|
||||
parameter is illegitimately negative.
|
||||
"""
|
||||
name_bytes = name.encode("ascii")
|
||||
|
||||
|
||||
@@ -25,7 +25,16 @@ import os
|
||||
import urllib
|
||||
from abc import ABC, abstractmethod
|
||||
from types import TracebackType
|
||||
from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Awaitable,
|
||||
Dict,
|
||||
Generator,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
)
|
||||
|
||||
import attr
|
||||
|
||||
@@ -37,8 +46,13 @@ from synapse.api.errors import Codes, cs_error
|
||||
from synapse.http.server import finish_request, respond_with_json
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import is_ascii
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.storage.databases.main.media_repository import LocalMedia
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# list all text content types that will have the charset default to UTF-8 when
|
||||
@@ -207,6 +221,7 @@ def add_file_headers(
|
||||
# select private. don't bother setting Expires as all our
|
||||
# clients are smart enough to be happy with Cache-Control
|
||||
request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
|
||||
|
||||
if file_size is not None:
|
||||
request.setHeader(b"Content-Length", b"%d" % (file_size,))
|
||||
|
||||
@@ -260,6 +275,93 @@ def _can_encode_filename_as_token(x: str) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
async def respond_with_multipart_responder(
|
||||
clock: Clock,
|
||||
request: SynapseRequest,
|
||||
responder: "Optional[Responder]",
|
||||
media_info: "LocalMedia",
|
||||
) -> None:
|
||||
"""
|
||||
Responds to requests originating from the federation media `/download` endpoint by
|
||||
streaming a multipart/mixed response
|
||||
|
||||
Args:
|
||||
clock:
|
||||
request: the federation request to respond to
|
||||
responder: the responder which will send the response
|
||||
media_info: metadata about the media item
|
||||
"""
|
||||
if not responder:
|
||||
respond_404(request)
|
||||
return
|
||||
|
||||
# If we have a responder we *must* use it as a context manager.
|
||||
with responder:
|
||||
if request._disconnected:
|
||||
logger.warning(
|
||||
"Not sending response to request %s, already disconnected.", request
|
||||
)
|
||||
return
|
||||
|
||||
if media_info.media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES:
|
||||
disposition = "inline"
|
||||
else:
|
||||
disposition = "attachment"
|
||||
|
||||
def _quote(x: str) -> str:
|
||||
return urllib.parse.quote(x.encode("utf-8"))
|
||||
|
||||
if media_info.upload_name:
|
||||
if _can_encode_filename_as_token(media_info.upload_name):
|
||||
disposition = "%s; filename=%s" % (
|
||||
disposition,
|
||||
media_info.upload_name,
|
||||
)
|
||||
else:
|
||||
disposition = "%s; filename*=utf-8''%s" % (
|
||||
disposition,
|
||||
_quote(media_info.upload_name),
|
||||
)
|
||||
|
||||
from synapse.media.media_storage import MultipartFileConsumer
|
||||
|
||||
# note that currently the json_object is just {}, this will change when linked media
|
||||
# is implemented
|
||||
multipart_consumer = MultipartFileConsumer(
|
||||
clock,
|
||||
request,
|
||||
media_info.media_type,
|
||||
{},
|
||||
disposition,
|
||||
media_info.media_length,
|
||||
)
|
||||
|
||||
logger.debug("Responding to media request with responder %s", responder)
|
||||
if media_info.media_length is not None:
|
||||
content_length = multipart_consumer.content_length()
|
||||
assert content_length is not None
|
||||
request.setHeader(b"Content-Length", b"%d" % (content_length,))
|
||||
|
||||
request.setHeader(
|
||||
b"Content-Type",
|
||||
b"multipart/mixed; boundary=%s" % multipart_consumer.boundary,
|
||||
)
|
||||
|
||||
try:
|
||||
await responder.write_to_consumer(multipart_consumer)
|
||||
except Exception as e:
|
||||
# The majority of the time this will be due to the client having gone
|
||||
# away. Unfortunately, Twisted simply throws a generic exception at us
|
||||
# in that case.
|
||||
logger.warning("Failed to write to consumer: %s %s", type(e), e)
|
||||
|
||||
# Unregister the producer, if it has one, so Twisted doesn't complain
|
||||
if request.producer:
|
||||
request.unregisterProducer()
|
||||
|
||||
finish_request(request)
|
||||
|
||||
|
||||
async def respond_with_responder(
|
||||
request: SynapseRequest,
|
||||
responder: "Optional[Responder]",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user