mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-05 01:10:13 +00:00
Compare commits
132 Commits
dkasak/par
...
v1.112.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
37f9876ccf | ||
|
|
8b449a8ce6 | ||
|
|
e8ee784c75 | ||
|
|
48c1307911 | ||
|
|
d225b6b3eb | ||
|
|
1daae43f3a | ||
|
|
a9ee832e48 | ||
|
|
13a99fba1b | ||
|
|
e3a0681ecf | ||
|
|
de05a64246 | ||
|
|
d221512498 | ||
|
|
ed0face8ad | ||
|
|
73529d3732 | ||
|
|
1648337775 | ||
|
|
dc8ddc6472 | ||
|
|
d3f9afd8d9 | ||
|
|
43c865f7c9 | ||
|
|
71d83477cb | ||
|
|
6a01af59e1 | ||
|
|
f583f1dce4 | ||
|
|
a574de0062 | ||
|
|
3fee32ed6b | ||
|
|
5884f0a956 | ||
|
|
79924aebef | ||
|
|
574aa53126 | ||
|
|
83894180b2 | ||
|
|
429ecb7564 | ||
|
|
9e1acea051 | ||
|
|
899d33f2ba | ||
|
|
df11af14db | ||
|
|
d88ba45db9 | ||
|
|
14f2b1eb00 | ||
|
|
2af729a193 | ||
|
|
0de0689ae8 | ||
|
|
4c44020838 | ||
|
|
4f6194492a | ||
|
|
ab62aa09da | ||
|
|
fb66e938b2 | ||
|
|
5a97bbd895 | ||
|
|
606da398fc | ||
|
|
677142b6a9 | ||
|
|
342f0c35b7 | ||
|
|
5871daf877 | ||
|
|
30e14c8510 | ||
|
|
3232bc2982 | ||
|
|
4ca13ce0dd | ||
|
|
8e229535fa | ||
|
|
e0ff850cb7 | ||
|
|
22fbc5be54 | ||
|
|
1cf3ff6b40 | ||
|
|
62d8b0361b | ||
|
|
4d6f7c0fc9 | ||
|
|
d48061b7e6 | ||
|
|
963a60c7e7 | ||
|
|
8e7da35402 | ||
|
|
028b103ae0 | ||
|
|
abb1384502 | ||
|
|
0ed1c64c83 | ||
|
|
1353fb3347 | ||
|
|
b15e17ce6e | ||
|
|
8cdd2d214e | ||
|
|
3fef535ff2 | ||
|
|
62134dcc77 | ||
|
|
23eed4f72a | ||
|
|
4721177241 | ||
|
|
7879f288df | ||
|
|
ccbd619b43 | ||
|
|
c896030f67 | ||
|
|
4d7e53707c | ||
|
|
cf69f8d59b | ||
|
|
20de685a4b | ||
|
|
8e9e6f1a0a | ||
|
|
57538eb4d9 | ||
|
|
45b35f8eae | ||
|
|
2ec257d608 | ||
|
|
daeaeb2a7b | ||
|
|
7786ae7e1c | ||
|
|
22aeb78b77 | ||
|
|
a9d2e40ea4 | ||
|
|
0c4f7a3d16 | ||
|
|
75b788f49f | ||
|
|
7be03d854b | ||
|
|
fa91655805 | ||
|
|
0d2b75cf92 | ||
|
|
ccce858c4a | ||
|
|
99c107920d | ||
|
|
1609855ff8 | ||
|
|
8f890447b0 | ||
|
|
b905ae27ca | ||
|
|
1ce59d7ba0 | ||
|
|
b3b793786c | ||
|
|
9c8f1a6d41 | ||
|
|
5b5280e3e5 | ||
|
|
635e3927d2 | ||
|
|
a1b8897668 | ||
|
|
76b9f14c0a | ||
|
|
1eccbfb82f | ||
|
|
2f5a77efae | ||
|
|
b11f5c984b | ||
|
|
27756c9fdf | ||
|
|
cc5e5893fe | ||
|
|
7c169f4d2c | ||
|
|
f75da9cc53 | ||
|
|
75c19bf57a | ||
|
|
b924a8e1a9 | ||
|
|
a8dcd686fb | ||
|
|
315b8d2032 | ||
|
|
9f47513458 | ||
|
|
ef7fbdfebd | ||
|
|
9cf0ef9c70 | ||
|
|
a023538822 | ||
|
|
f79dbd0f61 | ||
|
|
c89fea3fd1 | ||
|
|
554a92601a | ||
|
|
a98cb87bee | ||
|
|
6e8af83193 | ||
|
|
805e6c9a8f | ||
|
|
3c61ddbbc9 | ||
|
|
ae4c236a6d | ||
|
|
930a64b6c1 | ||
|
|
7a11c0ac4f | ||
|
|
cf711ac03c | ||
|
|
700d2cc4a0 | ||
|
|
1e74b50dc6 | ||
|
|
7c2d8f1f01 | ||
|
|
118b734081 | ||
|
|
7a6186b888 | ||
|
|
452a59f887 | ||
|
|
adeedb7b7c | ||
|
|
7c5fb13f7b | ||
|
|
f8d57ce656 | ||
|
|
13ed84c573 |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -2,4 +2,4 @@
|
||||
(using a matrix.org account if necessary). We do not use GitHub issues for
|
||||
support.
|
||||
|
||||
**If you want to report a security issue** please see https://matrix.org/security-disclosure-policy/
|
||||
**If you want to report a security issue** please see https://element.io/security/security-disclosure-policy
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -7,7 +7,7 @@ body:
|
||||
**THIS IS NOT A SUPPORT CHANNEL!**
|
||||
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
|
||||
|
||||
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
|
||||
If you want to report a security issue, please see https://element.io/security/security-disclosure-policy
|
||||
|
||||
This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.
|
||||
|
||||
|
||||
26
.github/workflows/tests.yml
vendored
26
.github/workflows/tests.yml
vendored
@@ -21,6 +21,7 @@ jobs:
|
||||
trial: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.trial }}
|
||||
integration: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.integration }}
|
||||
linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }}
|
||||
linting_readme: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting_readme }}
|
||||
steps:
|
||||
- uses: dorny/paths-filter@v3
|
||||
id: filter
|
||||
@@ -73,6 +74,9 @@ jobs:
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/tests.yml'
|
||||
|
||||
linting_readme:
|
||||
- 'README.rst'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
@@ -135,7 +139,7 @@ jobs:
|
||||
|
||||
- name: Semantic checks (ruff)
|
||||
# --quiet suppresses the update check.
|
||||
run: poetry run ruff --quiet .
|
||||
run: poetry run ruff check --quiet .
|
||||
|
||||
lint-mypy:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -269,6 +273,20 @@ jobs:
|
||||
|
||||
- run: cargo fmt --check
|
||||
|
||||
# This is to detect issues with the rst file, which can otherwise cause issues
|
||||
# when uploading packages to PyPi.
|
||||
lint-readme:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install rstcheck"
|
||||
- run: "rstcheck --report-level=WARNING README.rst"
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
@@ -284,9 +302,10 @@ jobs:
|
||||
- lint-clippy
|
||||
- lint-clippy-nightly
|
||||
- lint-rustfmt
|
||||
- lint-readme
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
- uses: matrix-org/done-action@v3
|
||||
with:
|
||||
needs: ${{ toJSON(needs) }}
|
||||
|
||||
@@ -301,6 +320,7 @@ jobs:
|
||||
lint-clippy
|
||||
lint-clippy-nightly
|
||||
lint-rustfmt
|
||||
lint-readme
|
||||
|
||||
|
||||
calculate-test-jobs:
|
||||
@@ -717,7 +737,7 @@ jobs:
|
||||
- linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
- uses: matrix-org/done-action@v3
|
||||
with:
|
||||
needs: ${{ toJSON(needs) }}
|
||||
|
||||
|
||||
272
CHANGES.md
272
CHANGES.md
@@ -1,3 +1,275 @@
|
||||
# Synapse 1.112.0 (2024-07-30)
|
||||
|
||||
This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7).
|
||||
|
||||
Note that this security fix is also available as **Synapse 1.111.1**, which does not include the rest of the changes in Synapse 1.112.0.
|
||||
|
||||
This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request.
|
||||
If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality.
|
||||
|
||||
With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**.
|
||||
The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration.
|
||||
|
||||
Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today.
|
||||
|
||||
**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time.
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502))
|
||||
|
||||
|
||||
# Synapse 1.112.0rc1 (2024-07-23)
|
||||
|
||||
Please note that this release candidate does not include the security dependency update
|
||||
included in version 1.111.1 as this version was released before 1.111.1.
|
||||
The same security fix can be found in the full release of 1.112.0.
|
||||
|
||||
### Features
|
||||
|
||||
- Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17416](https://github.com/element-hq/synapse/issues/17416))
|
||||
- Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17418](https://github.com/element-hq/synapse/issues/17418))
|
||||
- Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17419](https://github.com/element-hq/synapse/issues/17419))
|
||||
- Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17429](https://github.com/element-hq/synapse/issues/17429))
|
||||
- Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17432](https://github.com/element-hq/synapse/issues/17432))
|
||||
- Prepare for authenticated media freeze. ([\#17433](https://github.com/element-hq/synapse/issues/17433))
|
||||
- Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17454](https://github.com/element-hq/synapse/issues/17454))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Add configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231))
|
||||
- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. ([\#17434](https://github.com/element-hq/synapse/issues/17434))
|
||||
- Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). ([\#17435](https://github.com/element-hq/synapse/issues/17435))
|
||||
- Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. ([\#17438](https://github.com/element-hq/synapse/issues/17438))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Update the readme image to have a white background, so that it is readable in dark mode. ([\#17387](https://github.com/element-hq/synapse/issues/17387))
|
||||
- Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. ([\#17423](https://github.com/element-hq/synapse/issues/17423))
|
||||
- Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. ([\#17451](https://github.com/element-hq/synapse/issues/17451))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Make sure we always use the right logic for enabling the media repo. ([\#17424](https://github.com/element-hq/synapse/issues/17424))
|
||||
- Fix argument documentation for method `RateLimiter.record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426))
|
||||
- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428))
|
||||
- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439))
|
||||
- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449))
|
||||
- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. ([\#17453](https://github.com/element-hq/synapse/issues/17453))
|
||||
- Speed up generating sliding sync responses. ([\#17458](https://github.com/element-hq/synapse/issues/17458))
|
||||
- Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. ([\#17460](https://github.com/element-hq/synapse/issues/17460))
|
||||
- Speed up fetching room keys from backup. ([\#17461](https://github.com/element-hq/synapse/issues/17461))
|
||||
- Speed up sorting of the room list in sliding sync. ([\#17468](https://github.com/element-hq/synapse/issues/17468))
|
||||
- Implement handling of `$ME` as a state key in sliding sync. ([\#17469](https://github.com/element-hq/synapse/issues/17469))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump bytes from 1.6.0 to 1.6.1. ([\#17441](https://github.com/element-hq/synapse/issues/17441))
|
||||
* Bump hiredis from 2.3.2 to 3.0.0. ([\#17464](https://github.com/element-hq/synapse/issues/17464))
|
||||
* Bump jsonschema from 4.22.0 to 4.23.0. ([\#17444](https://github.com/element-hq/synapse/issues/17444))
|
||||
* Bump matrix-org/done-action from 2 to 3. ([\#17440](https://github.com/element-hq/synapse/issues/17440))
|
||||
* Bump mypy from 1.9.0 to 1.10.1. ([\#17445](https://github.com/element-hq/synapse/issues/17445))
|
||||
* Bump pyopenssl from 24.1.0 to 24.2.1. ([\#17465](https://github.com/element-hq/synapse/issues/17465))
|
||||
* Bump ruff from 0.5.0 to 0.5.4. ([\#17466](https://github.com/element-hq/synapse/issues/17466))
|
||||
* Bump sentry-sdk from 2.6.0 to 2.8.0. ([\#17456](https://github.com/element-hq/synapse/issues/17456))
|
||||
* Bump sentry-sdk from 2.8.0 to 2.10.0. ([\#17467](https://github.com/element-hq/synapse/issues/17467))
|
||||
* Bump setuptools from 67.6.0 to 70.0.0. ([\#17448](https://github.com/element-hq/synapse/issues/17448))
|
||||
* Bump twine from 5.1.0 to 5.1.1. ([\#17443](https://github.com/element-hq/synapse/issues/17443))
|
||||
* Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712. ([\#17446](https://github.com/element-hq/synapse/issues/17446))
|
||||
* Bump ulid from 1.1.2 to 1.1.3. ([\#17442](https://github.com/element-hq/synapse/issues/17442))
|
||||
* Bump zipp from 3.15.0 to 3.19.1. ([\#17427](https://github.com/element-hq/synapse/issues/17427))
|
||||
|
||||
# Synapse 1.111.0 (2024-07-16)
|
||||
|
||||
No significant changes since 1.111.0rc2.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.111.0rc2 (2024-07-10)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug where using `synapse.app.media_repository` worker configuration would break the new media endpoints. ([\#17420](https://github.com/element-hq/synapse/issues/17420))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Document the new federation media worker endpoints in the [upgrade notes](https://element-hq.github.io/synapse/v1.111/upgrade.html) and [worker docs](https://element-hq.github.io/synapse/v1.111/workers.html). ([\#17421](https://github.com/element-hq/synapse/issues/17421))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Route authenticated federation media requests to media repository workers in Complement tests. ([\#17422](https://github.com/element-hq/synapse/issues/17422))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.111.0rc1 (2024-07-09)
|
||||
|
||||
### Features
|
||||
|
||||
- Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17320](https://github.com/element-hq/synapse/issues/17320))
|
||||
- Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17337](https://github.com/element-hq/synapse/issues/17337))
|
||||
- Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17342](https://github.com/element-hq/synapse/issues/17342))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding [`_matrix/client/v1/media/download`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediadownloadservernamemediaid) endpoint. ([\#17365](https://github.com/element-hq/synapse/issues/17365))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
|
||||
by adding [`_matrix/client/v1/media/thumbnail`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediathumbnailservernamemediaid), [`_matrix/federation/v1/media/thumbnail`](https://spec.matrix.org/v1.11/server-server-api/#get_matrixfederationv1mediathumbnailmediaid) endpoints and stabilizing the
|
||||
remaining [`_matrix/client/v1/media`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediaconfig) endpoints. ([\#17388](https://github.com/element-hq/synapse/issues/17388))
|
||||
- Add `rooms.bump_stamp` for easier client-side sorting in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17395](https://github.com/element-hq/synapse/issues/17395))
|
||||
- Forget all of a user's rooms upon deactivation, preventing local room purges from being blocked on deactivated users. ([\#17400](https://github.com/element-hq/synapse/issues/17400))
|
||||
- Declare support for [Matrix 1.11](https://matrix.org/blog/2024/06/20/matrix-v1.11-release/). ([\#17403](https://github.com/element-hq/synapse/issues/17403))
|
||||
- [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow overriding the introspection endpoint. ([\#17406](https://github.com/element-hq/synapse/issues/17406))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix rare race which caused no new to-device messages to be received from remote server. ([\#17362](https://github.com/element-hq/synapse/issues/17362))
|
||||
- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using an old database. ([\#17398](https://github.com/element-hq/synapse/issues/17398))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Clarify that `url_preview_url_blacklist` is a usability feature. ([\#17356](https://github.com/element-hq/synapse/issues/17356))
|
||||
- Fix broken links in README. ([\#17379](https://github.com/element-hq/synapse/issues/17379))
|
||||
- Clarify that changelog content *and file extension* need to match in order for entries to merge. ([\#17399](https://github.com/element-hq/synapse/issues/17399))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Make the release script create a release branch for Complement as well. ([\#17318](https://github.com/element-hq/synapse/issues/17318))
|
||||
- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363))
|
||||
- Add CI check for the README. ([\#17367](https://github.com/element-hq/synapse/issues/17367))
|
||||
- Fix linting errors from new `ruff` version. ([\#17381](https://github.com/element-hq/synapse/issues/17381), [\#17411](https://github.com/element-hq/synapse/issues/17411))
|
||||
- Fix building debian packages on non-clean checkouts. ([\#17390](https://github.com/element-hq/synapse/issues/17390))
|
||||
- Finish up work to allow per-user feature flags. ([\#17392](https://github.com/element-hq/synapse/issues/17392), [\#17410](https://github.com/element-hq/synapse/issues/17410))
|
||||
- Allow enabling sliding sync per-user. ([\#17393](https://github.com/element-hq/synapse/issues/17393))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump certifi from 2023.7.22 to 2024.7.4. ([\#17404](https://github.com/element-hq/synapse/issues/17404))
|
||||
* Bump cryptography from 42.0.7 to 42.0.8. ([\#17382](https://github.com/element-hq/synapse/issues/17382))
|
||||
* Bump ijson from 3.2.3 to 3.3.0. ([\#17413](https://github.com/element-hq/synapse/issues/17413))
|
||||
* Bump log from 0.4.21 to 0.4.22. ([\#17384](https://github.com/element-hq/synapse/issues/17384))
|
||||
* Bump mypy-zope from 1.0.4 to 1.0.5. ([\#17414](https://github.com/element-hq/synapse/issues/17414))
|
||||
* Bump pillow from 10.3.0 to 10.4.0. ([\#17412](https://github.com/element-hq/synapse/issues/17412))
|
||||
* Bump pydantic from 2.7.1 to 2.8.2. ([\#17415](https://github.com/element-hq/synapse/issues/17415))
|
||||
* Bump ruff from 0.3.7 to 0.5.0. ([\#17381](https://github.com/element-hq/synapse/issues/17381))
|
||||
* Bump serde from 1.0.203 to 1.0.204. ([\#17409](https://github.com/element-hq/synapse/issues/17409))
|
||||
* Bump serde_json from 1.0.117 to 1.0.120. ([\#17385](https://github.com/element-hq/synapse/issues/17385), [\#17408](https://github.com/element-hq/synapse/issues/17408))
|
||||
* Bump types-setuptools from 69.5.0.20240423 to 70.1.0.20240627. ([\#17380](https://github.com/element-hq/synapse/issues/17380))
|
||||
|
||||
# Synapse 1.110.0 (2024-07-03)
|
||||
|
||||
No significant changes since 1.110.0rc3.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc3 (2024-07-02)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug where `/sync` requests could get blocked indefinitely after an upgrade from Synapse versions before v1.109.0. ([\#17386](https://github.com/element-hq/synapse/issues/17386), [\#17391](https://github.com/element-hq/synapse/issues/17391))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Limit size of presence EDUs to 50 entries. ([\#17371](https://github.com/element-hq/synapse/issues/17371))
|
||||
- Fix building debian package for debian sid. ([\#17389](https://github.com/element-hq/synapse/issues/17389))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc2 (2024-06-26)
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc1 (2024-06-26)
|
||||
|
||||
### Features
|
||||
|
||||
- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17187](https://github.com/element-hq/synapse/issues/17187))
|
||||
- Add experimental support for [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension. ([\#17255](https://github.com/element-hq/synapse/issues/17255))
|
||||
- Improve ratelimiting in Synapse. ([\#17256](https://github.com/element-hq/synapse/issues/17256))
|
||||
- Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API. ([\#17270](https://github.com/element-hq/synapse/issues/17270), [\#17296](https://github.com/element-hq/synapse/issues/17296))
|
||||
- Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api). ([\#17276](https://github.com/element-hq/synapse/issues/17276))
|
||||
- Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17277](https://github.com/element-hq/synapse/issues/17277))
|
||||
- Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17281](https://github.com/element-hq/synapse/issues/17281))
|
||||
- Include user membership in events served to clients, per [MSC4115](https://github.com/matrix-org/matrix-spec-proposals/pull/4115). ([\#17282](https://github.com/element-hq/synapse/issues/17282))
|
||||
- Do not require user-interactive authentication for uploading cross-signing keys for the first time, per [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967). ([\#17284](https://github.com/element-hq/synapse/issues/17284))
|
||||
- Add `stream_ordering` sort to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17293](https://github.com/element-hq/synapse/issues/17293))
|
||||
- `register_new_matrix_user` now supports a --password-file flag, which
|
||||
is useful for scripting. ([\#17294](https://github.com/element-hq/synapse/issues/17294))
|
||||
- `register_new_matrix_user` now supports a --exists-ok flag to allow registration of users that already exist in the database.
|
||||
This is useful for scripts that bootstrap user accounts with initial passwords. ([\#17304](https://github.com/element-hq/synapse/issues/17304))
|
||||
- Add support for via query parameter from [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156). ([\#17322](https://github.com/element-hq/synapse/issues/17322))
|
||||
- Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17335](https://github.com/element-hq/synapse/issues/17335))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix searching for users with their exact localpart whose ID includes a hyphen. ([\#17254](https://github.com/element-hq/synapse/issues/17254))
|
||||
- Fix wrong retention policy being used when filtering events. ([\#17272](https://github.com/element-hq/synapse/issues/17272))
|
||||
- Fix bug where OTKs were not always included in `/sync` response when using workers. ([\#17275](https://github.com/element-hq/synapse/issues/17275))
|
||||
- Fix a long-standing bug where an invalid 'from' parameter to [`/notifications`](https://spec.matrix.org/v1.10/client-server-api/#get_matrixclientv3notifications) would result in an Internal Server Error. ([\#17283](https://github.com/element-hq/synapse/issues/17283))
|
||||
- Fix edge case in `/sync` returning the wrong the state when using sharded event persisters. ([\#17295](https://github.com/element-hq/synapse/issues/17295))
|
||||
- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17301](https://github.com/element-hq/synapse/issues/17301))
|
||||
- Fix email notification subject when invited to a space. ([\#17336](https://github.com/element-hq/synapse/issues/17336))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Add missing quotes for example for `exclude_rooms_from_sync`. ([\#17308](https://github.com/element-hq/synapse/issues/17308))
|
||||
- Update header in the README to visually fix the the auto-generated table of contents. ([\#17329](https://github.com/element-hq/synapse/issues/17329))
|
||||
- Fix stale references to the Foundation's Security Disclosure Policy. ([\#17341](https://github.com/element-hq/synapse/issues/17341))
|
||||
- Add default values for `rc_invites.per_issuer` to docs. ([\#17347](https://github.com/element-hq/synapse/issues/17347))
|
||||
- Fix an error in the docs for `search_all_users` parameter under `user_directory`. ([\#17348](https://github.com/element-hq/synapse/issues/17348))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Remove unused `expire_access_token` option in the Synapse Docker config file. Contributed by @AaronDewes. ([\#17198](https://github.com/element-hq/synapse/issues/17198))
|
||||
- Use fully-qualified `PersistedEventPosition` when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation. ([\#17265](https://github.com/element-hq/synapse/issues/17265))
|
||||
- Add debug logging for when room keys are uploaded, including whether they are replacing other room keys. ([\#17266](https://github.com/element-hq/synapse/issues/17266))
|
||||
- Handle OTK uploads off master. ([\#17271](https://github.com/element-hq/synapse/issues/17271))
|
||||
- Don't try and resync devices for remote users whose servers are marked as down. ([\#17273](https://github.com/element-hq/synapse/issues/17273))
|
||||
- Re-organize Pydantic models and types used in handlers. ([\#17279](https://github.com/element-hq/synapse/issues/17279))
|
||||
- Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`. ([\#17300](https://github.com/element-hq/synapse/issues/17300))
|
||||
- Update the README with Element branding, improve headers and fix the #synapse:matrix.org support room link rendering. ([\#17324](https://github.com/element-hq/synapse/issues/17324))
|
||||
- Change path of the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync implementation to `/org.matrix.simplified_msc3575/sync` since our simplified API is slightly incompatible with what's in the current MSC. ([\#17331](https://github.com/element-hq/synapse/issues/17331))
|
||||
- Handle device lists notifications for large accounts more efficiently in worker mode. ([\#17333](https://github.com/element-hq/synapse/issues/17333), [\#17358](https://github.com/element-hq/synapse/issues/17358))
|
||||
- Do not block event sending/receiving while calculating large event auth chains. ([\#17338](https://github.com/element-hq/synapse/issues/17338))
|
||||
- Tidy up `parse_integer` docs and call sites to reflect the fact that they require non-negative integers by default, and bring `parse_integer_from_args` default in alignment. Contributed by Denis Kasak (@dkasak). ([\#17339](https://github.com/element-hq/synapse/issues/17339))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump authlib from 1.3.0 to 1.3.1. ([\#17343](https://github.com/element-hq/synapse/issues/17343))
|
||||
* Bump dawidd6/action-download-artifact from 3.1.4 to 5. ([\#17289](https://github.com/element-hq/synapse/issues/17289))
|
||||
* Bump dawidd6/action-download-artifact from 5 to 6. ([\#17313](https://github.com/element-hq/synapse/issues/17313))
|
||||
* Bump docker/build-push-action from 5 to 6. ([\#17312](https://github.com/element-hq/synapse/issues/17312))
|
||||
* Bump jinja2 from 3.1.3 to 3.1.4. ([\#17287](https://github.com/element-hq/synapse/issues/17287))
|
||||
* Bump lazy_static from 1.4.0 to 1.5.0. ([\#17355](https://github.com/element-hq/synapse/issues/17355))
|
||||
* Bump msgpack from 1.0.7 to 1.0.8. ([\#17317](https://github.com/element-hq/synapse/issues/17317))
|
||||
* Bump netaddr from 1.2.1 to 1.3.0. ([\#17353](https://github.com/element-hq/synapse/issues/17353))
|
||||
* Bump packaging from 24.0 to 24.1. ([\#17352](https://github.com/element-hq/synapse/issues/17352))
|
||||
* Bump phonenumbers from 8.13.37 to 8.13.39. ([\#17315](https://github.com/element-hq/synapse/issues/17315))
|
||||
* Bump regex from 1.10.4 to 1.10.5. ([\#17290](https://github.com/element-hq/synapse/issues/17290))
|
||||
* Bump requests from 2.31.0 to 2.32.2. ([\#17345](https://github.com/element-hq/synapse/issues/17345))
|
||||
* Bump sentry-sdk from 2.1.1 to 2.3.1. ([\#17263](https://github.com/element-hq/synapse/issues/17263))
|
||||
* Bump sentry-sdk from 2.3.1 to 2.6.0. ([\#17351](https://github.com/element-hq/synapse/issues/17351))
|
||||
* Bump tornado from 6.4 to 6.4.1. ([\#17344](https://github.com/element-hq/synapse/issues/17344))
|
||||
* Bump mypy from 1.8.0 to 1.9.0. ([\#17297](https://github.com/element-hq/synapse/issues/17297))
|
||||
* Bump types-jsonschema from 4.21.0.20240311 to 4.22.0.20240610. ([\#17288](https://github.com/element-hq/synapse/issues/17288))
|
||||
* Bump types-netaddr from 1.2.0.20240219 to 1.3.0.20240530. ([\#17314](https://github.com/element-hq/synapse/issues/17314))
|
||||
* Bump types-pillow from 10.2.0.20240423 to 10.2.0.20240520. ([\#17285](https://github.com/element-hq/synapse/issues/17285))
|
||||
* Bump types-pyyaml from 6.0.12.12 to 6.0.12.20240311. ([\#17316](https://github.com/element-hq/synapse/issues/17316))
|
||||
* Bump typing-extensions from 4.11.0 to 4.12.2. ([\#17354](https://github.com/element-hq/synapse/issues/17354))
|
||||
* Bump urllib3 from 2.0.7 to 2.2.2. ([\#17346](https://github.com/element-hq/synapse/issues/17346))
|
||||
|
||||
# Synapse 1.109.0 (2024-06-18)
|
||||
|
||||
### Internal Changes
|
||||
|
||||
28
Cargo.lock
generated
28
Cargo.lock
generated
@@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "1.6.0"
|
||||
version = "1.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9"
|
||||
checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
@@ -212,9 +212,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
@@ -234,9 +234,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.21"
|
||||
version = "0.4.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
|
||||
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
@@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.203"
|
||||
version = "1.0.204"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094"
|
||||
checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.203"
|
||||
version = "1.0.204"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba"
|
||||
checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -505,9 +505,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.117"
|
||||
version = "1.0.120"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3"
|
||||
checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -597,9 +597,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
|
||||
|
||||
[[package]]
|
||||
name = "ulid"
|
||||
version = "1.1.2"
|
||||
version = "1.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34778c17965aa2a08913b57e1f34db9b4a63f5de31768b55bf20d2795f921259"
|
||||
checksum = "04f903f293d11f31c0c29e4148f6dc0d033a7f80cebc0282bea147611667d289"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"rand",
|
||||
|
||||
30
README.rst
30
README.rst
@@ -1,20 +1,20 @@
|
||||
.. image:: https://github.com/element-hq/product/assets/87339233/7abf477a-5277-47f3-be44-ea44917d8ed7
|
||||
.. image:: ./docs/element_logo_white_bg.svg
|
||||
:height: 60px
|
||||
|
||||
===========================================================================================================
|
||||
Element Synapse - Matrix homeserver implementation |support| |development| |documentation| |license| |pypi| |python|
|
||||
===========================================================================================================
|
||||
**Element Synapse - Matrix homeserver implementation**
|
||||
|
||||
Synapse is an open source `Matrix <https://matrix.org>`_ homeserver
|
||||
|support| |development| |documentation| |license| |pypi| |python|
|
||||
|
||||
Synapse is an open source `Matrix <https://matrix.org>`__ homeserver
|
||||
implementation, written and maintained by `Element <https://element.io>`_.
|
||||
`Matrix <https://github.com/matrix-org>`_ is the open standard for
|
||||
`Matrix <https://github.com/matrix-org>`__ is the open standard for
|
||||
secure and interoperable real time communications. You can directly run
|
||||
and manage the source code in this repository, available under an AGPL
|
||||
license. There is no support provided from Element unless you have a
|
||||
subscription.
|
||||
|
||||
Subscription alternative
|
||||
------------------------
|
||||
========================
|
||||
|
||||
Alternatively, for those that need an enterprise-ready solution, Element
|
||||
Server Suite (ESS) is `available as a subscription <https://element.io/pricing>`_.
|
||||
@@ -119,7 +119,7 @@ impact to other applications will be minimal.
|
||||
|
||||
|
||||
🧪 Testing a new installation
|
||||
============================
|
||||
=============================
|
||||
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client.
|
||||
@@ -173,19 +173,19 @@ As when logging in, you will need to specify a "Custom server". Specify your
|
||||
desired ``localpart`` in the 'User name' box.
|
||||
|
||||
🎯 Troubleshooting and support
|
||||
=============================
|
||||
==============================
|
||||
|
||||
🚀 Professional support
|
||||
----------------------
|
||||
-----------------------
|
||||
|
||||
Enterprise quality support for Synapse including SLAs is available as part of an
|
||||
`Element Server Suite (ESS) <https://element.io/pricing>` subscription.
|
||||
`Element Server Suite (ESS) <https://element.io/pricing>`_ subscription.
|
||||
|
||||
If you are an existing ESS subscriber then you can raise a `support request <https://ems.element.io/support>`
|
||||
and access the `knowledge base <https://ems-docs.element.io>`.
|
||||
If you are an existing ESS subscriber then you can raise a `support request <https://ems.element.io/support>`_
|
||||
and access the `knowledge base <https://ems-docs.element.io>`_.
|
||||
|
||||
🤝 Community support
|
||||
-------------------
|
||||
--------------------
|
||||
|
||||
The `Admin FAQ <https://element-hq.github.io/synapse/latest/usage/administration/admin_faq.html>`_
|
||||
includes tips on dealing with some common problems. For more details, see
|
||||
@@ -202,7 +202,7 @@ issues for support requests, only for bug reports and feature requests.
|
||||
.. _docs: docs
|
||||
|
||||
🪪 Identity Servers
|
||||
==================
|
||||
===================
|
||||
|
||||
Identity servers have the job of mapping email addresses and other 3rd Party
|
||||
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
@@ -1 +0,0 @@
|
||||
Remove unused `expire_access_token` option in the Synapse Docker config file. Contributed by @AaronDewes.
|
||||
@@ -1 +0,0 @@
|
||||
Fix searching for users with their exact localpart whose ID includes a hyphen.
|
||||
@@ -1 +0,0 @@
|
||||
Improve ratelimiting in Synapse (#17256).
|
||||
@@ -1 +0,0 @@
|
||||
Use fully-qualified `PersistedEventPosition` when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation.
|
||||
@@ -1 +0,0 @@
|
||||
Add debug logging for when room keys are uploaded, including whether they are replacing other room keys.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API.
|
||||
@@ -1 +0,0 @@
|
||||
Handle OTK uploads off master.
|
||||
@@ -1 +0,0 @@
|
||||
Fix wrong retention policy being used when filtering events.
|
||||
@@ -1 +0,0 @@
|
||||
Don't try and resync devices for remote users whose servers are marked as down.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug where OTKs were not always included in `/sync` response when using workers.
|
||||
@@ -1 +0,0 @@
|
||||
Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api).
|
||||
@@ -1 +0,0 @@
|
||||
Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
@@ -1 +0,0 @@
|
||||
Re-organize Pydantic models and types used in handlers.
|
||||
@@ -1 +0,0 @@
|
||||
Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
@@ -1 +0,0 @@
|
||||
Include user membership in events served to clients, per MSC4115.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long-standing bug where an invalid 'from' parameter to [`/notifications`](https://spec.matrix.org/v1.10/client-server-api/#get_matrixclientv3notifications) would result in an Internal Server Error.
|
||||
@@ -1 +0,0 @@
|
||||
Do not require user-interactive authentication for uploading cross-signing keys for the first time, per MSC3967.
|
||||
@@ -1 +0,0 @@
|
||||
Add `stream_ordering` sort to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
@@ -1,2 +0,0 @@
|
||||
`register_new_matrix_user` now supports a --password-file flag, which
|
||||
is useful for scripting.
|
||||
@@ -1 +0,0 @@
|
||||
Fix edge case in `/sync` returning the wrong the state when using sharded event persisters.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API.
|
||||
@@ -1 +0,0 @@
|
||||
Bump `mypy` from 1.8.0 to 1.9.0.
|
||||
@@ -1 +0,0 @@
|
||||
Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`.
|
||||
@@ -1 +0,0 @@
|
||||
Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
||||
@@ -1,2 +0,0 @@
|
||||
`register_new_matrix_user` now supports a --exists-ok flag to allow registration of users that already exist in the database.
|
||||
This is useful for scripts that bootstrap user accounts with initial passwords.
|
||||
@@ -1 +0,0 @@
|
||||
Add missing quotes for example for `exclude_rooms_from_sync`.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for via query parameter from MSC415.
|
||||
@@ -1 +0,0 @@
|
||||
Update the README with Element branding, improve headers and fix the #synapse:matrix.org support room link rendering.
|
||||
@@ -1 +0,0 @@
|
||||
This is a changelog so tests will run.
|
||||
@@ -1 +0,0 @@
|
||||
Change path of the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync implementation to `/org.matrix.simplified_msc3575/sync` since our simplified API is slightly incompatible with what's in the current MSC.
|
||||
53
debian/changelog
vendored
53
debian/changelog
vendored
@@ -1,8 +1,57 @@
|
||||
matrix-synapse-py3 (1.109.0+nmu1) UNRELEASED; urgency=medium
|
||||
matrix-synapse-py3 (1.112.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.112.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Jul 2024 17:15:48 +0100
|
||||
|
||||
matrix-synapse-py3 (1.112.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.112.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 23 Jul 2024 08:58:55 -0600
|
||||
|
||||
matrix-synapse-py3 (1.111.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.111.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Jul 2024 12:42:46 +0200
|
||||
|
||||
matrix-synapse-py3 (1.111.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.111.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 10 Jul 2024 08:46:54 +0000
|
||||
|
||||
matrix-synapse-py3 (1.111.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.111.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 09 Jul 2024 09:49:25 +0000
|
||||
|
||||
matrix-synapse-py3 (1.110.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 03 Jul 2024 09:08:59 -0600
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Jul 2024 08:28:56 -0600
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Jun 2024 18:14:48 +0200
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc1) stable; urgency=medium
|
||||
|
||||
* `register_new_matrix_user` now supports a --password-file and a --exists-ok flag.
|
||||
* New Synapse release 1.110.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jun 2024 13:29:36 +0100
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Jun 2024 14:07:56 +0200
|
||||
|
||||
matrix-synapse-py3 (1.109.0) stable; urgency=medium
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ ARG PYTHON_VERSION=3.11
|
||||
###
|
||||
# We hardcode the use of Debian bookworm here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bookworm.
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as requirements
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS requirements
|
||||
|
||||
# RUN --mount is specific to buildkit and is documented at
|
||||
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
|
||||
@@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
###
|
||||
### Stage 1: builder
|
||||
###
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as builder
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS builder
|
||||
|
||||
# install the OS build deps
|
||||
RUN \
|
||||
|
||||
@@ -24,7 +24,7 @@ ARG distro=""
|
||||
# https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but
|
||||
# it's not obviously easier to use that than to build our own.)
|
||||
|
||||
FROM docker.io/library/${distro} as builder
|
||||
FROM docker.io/library/${distro} AS builder
|
||||
|
||||
RUN apt-get update -qq -o Acquire::Languages=none
|
||||
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
@@ -73,6 +73,8 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
# Required for building cffi from source.
|
||||
libffi-dev \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
|
||||
@@ -11,6 +11,9 @@ DIST=$(cut -d ':' -f2 <<< "${distro:?}")
|
||||
cp -aT /synapse/source /synapse/build
|
||||
cd /synapse/build
|
||||
|
||||
# Delete any existing `.so` files to ensure a clean build.
|
||||
rm -f /synapse/build/synapse/*.so
|
||||
|
||||
# if this is a prerelease, set the Section accordingly.
|
||||
#
|
||||
# When the package is later added to the package repo, reprepro will use the
|
||||
|
||||
@@ -117,7 +117,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
},
|
||||
"media_repository": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["media"],
|
||||
"listener_resources": ["media", "client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/media/",
|
||||
"^/_synapse/admin/v1/purge_media_cache$",
|
||||
@@ -125,6 +125,8 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_synapse/admin/v1/user/.*/media.*$",
|
||||
"^/_synapse/admin/v1/media/.*$",
|
||||
"^/_synapse/admin/v1/quarantine_media/.*$",
|
||||
"^/_matrix/client/v1/media/.*$",
|
||||
"^/_matrix/federation/v1/media/.*$",
|
||||
],
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
|
||||
@@ -1,21 +1,17 @@
|
||||
# Experimental Features API
|
||||
|
||||
This API allows a server administrator to enable or disable some experimental features on a per-user
|
||||
basis. The currently supported features are:
|
||||
- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy
|
||||
presence state enabled
|
||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require
|
||||
UIA when first uploading cross-signing keys.
|
||||
|
||||
basis. The currently supported features are:
|
||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
|
||||
## Enabling/Disabling Features
|
||||
|
||||
This API allows a server administrator to enable experimental features for a given user. The request must
|
||||
This API allows a server administrator to enable experimental features for a given user. The request must
|
||||
provide a body containing the user id and listing the features to enable/disable in the following format:
|
||||
```json
|
||||
{
|
||||
@@ -35,7 +31,7 @@ PUT /_synapse/admin/v1/experimental_features/<user_id>
|
||||
```
|
||||
|
||||
## Listing Enabled Features
|
||||
|
||||
|
||||
To list which features are enabled/disabled for a given user send a request to the following API:
|
||||
|
||||
```
|
||||
@@ -52,4 +48,4 @@ user like so:
|
||||
"msc3967": false
|
||||
}
|
||||
}
|
||||
```
|
||||
```
|
||||
|
||||
@@ -449,9 +449,9 @@ For example, a fix in PR #1234 would have its changelog entry in
|
||||
> The security levels of Florbs are now validated when received
|
||||
> via the `/federation/florb` endpoint. Contributed by Jane Matrix.
|
||||
|
||||
If there are multiple pull requests involved in a single bugfix/feature/etc,
|
||||
then the content for each `changelog.d` file should be the same. Towncrier will
|
||||
merge the matching files together into a single changelog entry when we come to
|
||||
If there are multiple pull requests involved in a single bugfix/feature/etc, then the
|
||||
content for each `changelog.d` file and file extension should be the same. Towncrier
|
||||
will merge the matching files together into a single changelog entry when we come to
|
||||
release.
|
||||
|
||||
### How do I know what to call the changelog file before I create the PR?
|
||||
|
||||
94
docs/element_logo_white_bg.svg
Normal file
94
docs/element_logo_white_bg.svg
Normal file
@@ -0,0 +1,94 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="41.440346mm"
|
||||
height="10.383124mm"
|
||||
viewBox="0 0 41.440346 10.383125"
|
||||
version="1.1"
|
||||
id="svg1"
|
||||
xml:space="preserve"
|
||||
sodipodi:docname="element_logo_white_bg.svg"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><sodipodi:namedview
|
||||
id="namedview1"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#000000"
|
||||
borderopacity="0.25"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="0"
|
||||
inkscape:deskcolor="#d1d1d1"
|
||||
inkscape:document-units="mm"
|
||||
showgrid="false"
|
||||
inkscape:export-bgcolor="#ffffffff" /><defs
|
||||
id="defs1" /><g
|
||||
id="layer1"
|
||||
transform="translate(-84.803844,-143.2075)"
|
||||
inkscape:export-filename="element_logo_white_bg.svg"
|
||||
inkscape:export-xdpi="96"
|
||||
inkscape:export-ydpi="96"><g
|
||||
style="fill:none"
|
||||
id="g1"
|
||||
transform="matrix(0.26458333,0,0,0.26458333,85.841658,144.26667)"><rect
|
||||
style="display:inline;fill:#ffffff;fill-opacity:1;stroke:#ffffff;stroke-width:1.31041;stroke-dasharray:none;stroke-opacity:1"
|
||||
id="rect20"
|
||||
width="155.31451"
|
||||
height="37.932892"
|
||||
x="-3.2672384"
|
||||
y="-3.3479743"
|
||||
rx="3.3718522"
|
||||
ry="3.7915266"
|
||||
transform="translate(-2.1259843e-6)"
|
||||
inkscape:label="rect20"
|
||||
inkscape:export-filename="rect20.svg"
|
||||
inkscape:export-xdpi="96"
|
||||
inkscape:export-ydpi="96" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="M 16,32 C 24.8366,32 32,24.8366 32,16 32,7.16344 24.8366,0 16,0 7.16344,0 0,7.16344 0,16 0,24.8366 7.16344,32 16,32 Z"
|
||||
fill="#0dbd8b"
|
||||
id="path1" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="m 13.0756,7.455 c 0,-0.64584 0.5247,-1.1694 1.1719,-1.1694 4.3864,0 7.9423,3.54853 7.9423,7.9259 0,0.6458 -0.5246,1.1694 -1.1718,1.1694 -0.6472,0 -1.1719,-0.5236 -1.1719,-1.1694 0,-3.0857 -2.5066,-5.58711 -5.5986,-5.58711 -0.6472,0 -1.1719,-0.52355 -1.1719,-1.16939 z"
|
||||
fill="#ffffff"
|
||||
id="path2" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="m 24.5424,13.042 c 0.6472,0 1.1719,0.5235 1.1719,1.1694 0,4.3773 -3.5559,7.9258 -7.9424,7.9258 -0.6472,0 -1.1718,-0.5235 -1.1718,-1.1693 0,-0.6459 0.5246,-1.1694 1.1718,-1.1694 3.0921,0 5.5987,-2.5015 5.5987,-5.5871 0,-0.6459 0.5247,-1.1694 1.1718,-1.1694 z"
|
||||
fill="#ffffff"
|
||||
id="path3" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="m 18.9446,24.5446 c 0,0.6459 -0.5247,1.1694 -1.1718,1.1694 -4.3865,0 -7.94239,-3.5485 -7.94239,-7.9258 0,-0.6459 0.52469,-1.1694 1.17179,-1.1694 0.6472,0 1.1719,0.5235 1.1719,1.1694 0,3.0856 2.5066,5.587 5.5987,5.587 0.6471,0 1.1718,0.5236 1.1718,1.1694 z"
|
||||
fill="#ffffff"
|
||||
id="path4" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="m 7.45823,18.9576 c -0.64718,0 -1.17183,-0.5235 -1.17183,-1.1694 0,-4.3773 3.55591,-7.92581 7.9423,-7.92581 0.6472,0 1.1719,0.52351 1.1719,1.16941 0,0.6458 -0.5247,1.1694 -1.1719,1.1694 -3.092,0 -5.59864,2.5014 -5.59864,5.587 0,0.6459 -0.52465,1.1694 -1.17183,1.1694 z"
|
||||
fill="#ffffff"
|
||||
id="path5" /><path
|
||||
d="M 56.2856,18.1428 H 44.9998 c 0.1334,1.181 0.5619,2.1238 1.2858,2.8286 0.7238,0.6857 1.6761,1.0286 2.8571,1.0286 0.7809,0 1.4857,-0.1905 2.1143,-0.5715 0.6286,-0.3809 1.0762,-0.8952 1.3428,-1.5428 h 3.4286 c -0.4571,1.5047 -1.3143,2.7238 -2.5714,3.6571 -1.2381,0.9143 -2.7048,1.3715 -4.4,1.3715 -2.2095,0 -4,-0.7334 -5.3714,-2.2 -1.3524,-1.4667 -2.0286,-3.3239 -2.0286,-5.5715 0,-2.1905 0.6857,-4.0285 2.0571,-5.5143 1.3715,-1.4857 3.1429,-2.22853 5.3143,-2.22853 2.1714,0 3.9238,0.73333 5.2572,2.20003 1.3523,1.4476 2.0285,3.2762 2.0285,5.4857 z m -7.2572,-5.9714 c -1.0667,0 -1.9524,0.3143 -2.6571,0.9429 -0.7048,0.6285 -1.1429,1.4666 -1.3143,2.5142 h 7.8857 c -0.1524,-1.0476 -0.5714,-1.8857 -1.2571,-2.5142 -0.6858,-0.6286 -1.5715,-0.9429 -2.6572,-0.9429 z"
|
||||
fill="#000000"
|
||||
id="path6" /><path
|
||||
d="M 58.6539,20.1428 V 3.14282 h 3.4 V 20.2 c 0,0.7619 0.419,1.1428 1.2571,1.1428 l 0.6,-0.0285 v 3.2285 c -0.3238,0.0572 -0.6667,0.0857 -1.0286,0.0857 -1.4666,0 -2.5428,-0.3714 -3.2285,-1.1142 -0.6667,-0.7429 -1,-1.8667 -1,-3.3715 z"
|
||||
fill="#000000"
|
||||
id="path7" /><path
|
||||
d="M 79.7454,18.1428 H 68.4597 c 0.1333,1.181 0.5619,2.1238 1.2857,2.8286 0.7238,0.6857 1.6762,1.0286 2.8571,1.0286 0.781,0 1.4857,-0.1905 2.1143,-0.5715 0.6286,-0.3809 1.0762,-0.8952 1.3429,-1.5428 h 3.4285 c -0.4571,1.5047 -1.3143,2.7238 -2.5714,3.6571 -1.2381,0.9143 -2.7048,1.3715 -4.4,1.3715 -2.2095,0 -4,-0.7334 -5.3714,-2.2 -1.3524,-1.4667 -2.0286,-3.3239 -2.0286,-5.5715 0,-2.1905 0.6857,-4.0285 2.0571,-5.5143 1.3715,-1.4857 3.1429,-2.22853 5.3143,-2.22853 2.1715,0 3.9238,0.73333 5.2572,2.20003 1.3524,1.4476 2.0285,3.2762 2.0285,5.4857 z m -7.2572,-5.9714 c -1.0666,0 -1.9524,0.3143 -2.6571,0.9429 -0.7048,0.6285 -1.1429,1.4666 -1.3143,2.5142 h 7.8857 c -0.1524,-1.0476 -0.5714,-1.8857 -1.2571,-2.5142 -0.6857,-0.6286 -1.5715,-0.9429 -2.6572,-0.9429 z"
|
||||
fill="#000000"
|
||||
id="path8" /><path
|
||||
d="m 95.0851,16.0571 v 8.5143 h -3.4 v -8.8857 c 0,-2.2476 -0.9333,-3.3714 -2.8,-3.3714 -1.0095,0 -1.819,0.3238 -2.4286,0.9714 -0.5904,0.6476 -0.8857,1.5333 -0.8857,2.6571 v 8.6286 h -3.4 V 9.74282 h 3.1429 v 1.97148 c 0.3619,-0.6667 0.9143,-1.2191 1.6571,-1.6572 0.7429,-0.43809 1.6667,-0.65713 2.7714,-0.65713 2.0572,0 3.5429,0.78093 4.4572,2.34283 1.2571,-1.5619 2.9333,-2.34283 5.0286,-2.34283 1.733,0 3.067,0.54285 4,1.62853 0.933,1.0667 1.4,2.4762 1.4,4.2286 v 9.3143 h -3.4 v -8.8857 c 0,-2.2476 -0.933,-3.3714 -2.8,-3.3714 -1.0286,0 -1.8477,0.3333 -2.4572,1 -0.5905,0.6476 -0.8857,1.5619 -0.8857,2.7428 z"
|
||||
fill="#000000"
|
||||
id="path9" /><path
|
||||
d="m 121.537,18.1428 h -11.286 c 0.133,1.181 0.562,2.1238 1.286,2.8286 0.723,0.6857 1.676,1.0286 2.857,1.0286 0.781,0 1.486,-0.1905 2.114,-0.5715 0.629,-0.3809 1.076,-0.8952 1.343,-1.5428 h 3.429 c -0.458,1.5047 -1.315,2.7238 -2.572,3.6571 -1.238,0.9143 -2.705,1.3715 -4.4,1.3715 -2.209,0 -4,-0.7334 -5.371,-2.2 -1.353,-1.4667 -2.029,-3.3239 -2.029,-5.5715 0,-2.1905 0.686,-4.0285 2.057,-5.5143 1.372,-1.4857 3.143,-2.22853 5.315,-2.22853 2.171,0 3.923,0.73333 5.257,2.20003 1.352,1.4476 2.028,3.2762 2.028,5.4857 z m -7.257,-5.9714 c -1.067,0 -1.953,0.3143 -2.658,0.9429 -0.704,0.6285 -1.142,1.4666 -1.314,2.5142 h 7.886 c -0.153,-1.0476 -0.572,-1.8857 -1.257,-2.5142 -0.686,-0.6286 -1.572,-0.9429 -2.657,-0.9429 z"
|
||||
fill="#000000"
|
||||
id="path10" /><path
|
||||
d="m 127.105,9.74282 v 1.97148 c 0.343,-0.6477 0.905,-1.1905 1.686,-1.6286 0.8,-0.45716 1.762,-0.68573 2.885,-0.68573 1.753,0 3.105,0.53333 4.058,1.60003 0.971,1.0666 1.457,2.4857 1.457,4.2571 v 9.3143 h -3.4 v -8.8857 c 0,-1.0476 -0.248,-1.8667 -0.743,-2.4572 -0.476,-0.6095 -1.21,-0.9142 -2.2,-0.9142 -1.086,0 -1.943,0.3238 -2.572,0.9714 -0.609,0.6476 -0.914,1.5428 -0.914,2.6857 v 8.6 h -3.4 V 9.74282 Z"
|
||||
fill="#000000"
|
||||
id="path11" /><path
|
||||
d="m 147.12,21.5428 v 2.9429 c -0.419,0.1143 -1.009,0.1714 -1.771,0.1714 -2.895,0 -4.343,-1.4571 -4.343,-4.3714 v -7.8286 h -2.257 V 9.74282 h 2.257 V 5.88568 h 3.4 v 3.85714 h 2.772 v 2.71428 h -2.772 v 7.4857 c 0,1.1619 0.552,1.7429 1.657,1.7429 z"
|
||||
fill="#000000"
|
||||
id="path12" /></g></g></svg>
|
||||
|
After Width: | Height: | Size: 7.5 KiB |
@@ -67,7 +67,7 @@ in Synapse can be deactivated.
|
||||
**NOTE**: This has an impact on security and is for testing purposes only!
|
||||
|
||||
To deactivate the certificate validation, the following setting must be added to
|
||||
your [homserver.yaml](../usage/configuration/homeserver_sample_config.md).
|
||||
your [homeserver.yaml](../usage/configuration/homeserver_sample_config.md).
|
||||
|
||||
```yaml
|
||||
use_insecure_ssl_client_just_for_testing_do_not_use: true
|
||||
|
||||
@@ -309,7 +309,62 @@ sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
|
||||
python3-virtualenv libffi-devel openssl-devel python3-devel \
|
||||
libicu-devel
|
||||
sudo dnf groupinstall "Development Tools"
|
||||
sudo dnf group install "Development Tools"
|
||||
```
|
||||
|
||||
##### Red Hat Enterprise Linux / Rocky Linux
|
||||
|
||||
*Note: The term "RHEL" below refers to both Red Hat Enterprise Linux and Rocky Linux. The distributions are 1:1 binary compatible.*
|
||||
|
||||
It's recommended to use the latest Python versions.
|
||||
|
||||
RHEL 8 in particular ships with Python 3.6 by default which is EOL and therefore no longer supported by Synapse. RHEL 9 ship with Python 3.9 which is still supported by the Python core team as of this writing. However, newer Python versions provide significant performance improvements and they're available in official distributions' repositories. Therefore it's recommended to use them.
|
||||
|
||||
Python 3.11 and 3.12 are available for both RHEL 8 and 9.
|
||||
|
||||
These commands should be run as root user.
|
||||
|
||||
RHEL 8
|
||||
```bash
|
||||
# Enable PowerTools repository
|
||||
dnf config-manager --set-enabled powertools
|
||||
```
|
||||
RHEL 9
|
||||
```bash
|
||||
# Enable CodeReady Linux Builder repository
|
||||
crb enable
|
||||
```
|
||||
|
||||
Install new version of Python. You only need one of these:
|
||||
```bash
|
||||
# Python 3.11
|
||||
dnf install python3.11 python3.11-devel
|
||||
```
|
||||
```bash
|
||||
# Python 3.12
|
||||
dnf install python3.12 python3.12-devel
|
||||
```
|
||||
Finally, install common prerequisites
|
||||
```bash
|
||||
dnf install libicu libicu-devel libpq5 libpq5-devel lz4 pkgconf
|
||||
dnf group install "Development Tools"
|
||||
```
|
||||
###### Using venv module instead of virtualenv command
|
||||
|
||||
It's recommended to use Python venv module directly rather than the virtualenv command.
|
||||
* On RHEL 9, virtualenv is only available on [EPEL](https://docs.fedoraproject.org/en-US/epel/).
|
||||
* On RHEL 8, virtualenv is based on Python 3.6. It does not support creating 3.11/3.12 virtual environments.
|
||||
|
||||
Here's an example of creating Python 3.12 virtual environment and installing Synapse from PyPI.
|
||||
|
||||
```bash
|
||||
mkdir -p ~/synapse
|
||||
# To use Python 3.11, simply use the command "python3.11" instead.
|
||||
python3.12 -m venv ~/synapse/env
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade setuptools
|
||||
pip install matrix-synapse
|
||||
```
|
||||
|
||||
##### macOS
|
||||
|
||||
@@ -117,6 +117,20 @@ each upgrade are complete before moving on to the next upgrade, to avoid
|
||||
stacking them up. You can monitor the currently running background updates with
|
||||
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||
|
||||
# Upgrading to v1.111.0
|
||||
|
||||
## New worker endpoints for authenticated client and federation media
|
||||
|
||||
[Media repository workers](./workers.md#synapseappmedia_repository) handling
|
||||
Media APIs can now handle the following endpoint patterns:
|
||||
|
||||
```
|
||||
^/_matrix/client/v1/media/.*$
|
||||
^/_matrix/federation/v1/media/.*$
|
||||
```
|
||||
|
||||
Please update your reverse proxy configuration.
|
||||
|
||||
# Upgrading to v1.106.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
||||
@@ -246,6 +246,7 @@ Example configuration:
|
||||
```yaml
|
||||
presence:
|
||||
enabled: false
|
||||
include_offline_users_on_sync: false
|
||||
```
|
||||
|
||||
`enabled` can also be set to a special value of "untracked" which ignores updates
|
||||
@@ -254,6 +255,10 @@ received via clients and federation, while still accepting updates from the
|
||||
|
||||
*The "untracked" option was added in Synapse 1.96.0.*
|
||||
|
||||
When clients perform an initial or `full_state` sync, presence results for offline users are
|
||||
not included by default. Setting `include_offline_users_on_sync` to `true` will always include
|
||||
offline users in the results. Defaults to false.
|
||||
|
||||
---
|
||||
### `require_auth_for_profile_requests`
|
||||
|
||||
@@ -1759,8 +1764,9 @@ rc_3pid_validation:
|
||||
### `rc_invites`
|
||||
|
||||
This option sets ratelimiting how often invites can be sent in a room or to a
|
||||
specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and
|
||||
`per_user` defaults to `per_second: 0.003`, `burst_count: 5`.
|
||||
specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10`,
|
||||
`per_user` defaults to `per_second: 0.003`, `burst_count: 5`, and `per_issuer`
|
||||
defaults to `per_second: 0.3`, `burst_count: 10`.
|
||||
|
||||
Client requests that invite user(s) when [creating a
|
||||
room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom)
|
||||
@@ -1862,6 +1868,18 @@ federation_rr_transactions_per_room_per_second: 40
|
||||
## Media Store
|
||||
Config options related to Synapse's media store.
|
||||
|
||||
---
|
||||
### `enable_authenticated_media`
|
||||
|
||||
When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy
|
||||
unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false
|
||||
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but
|
||||
this will change to true in a future Synapse release.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_authenticated_media: true
|
||||
```
|
||||
---
|
||||
### `enable_media_repo`
|
||||
|
||||
@@ -1975,9 +1993,10 @@ This will not prevent the listed domains from accessing media themselves.
|
||||
It simply prevents users on this server from downloading media originating
|
||||
from the listed servers.
|
||||
|
||||
This will have no effect on media originating from the local server.
|
||||
This only affects media downloaded from other Matrix servers, to
|
||||
block domains from URL previews see [`url_preview_url_blacklist`](#url_preview_url_blacklist).
|
||||
This will have no effect on media originating from the local server. This only
|
||||
affects media downloaded from other Matrix servers, to control URL previews see
|
||||
[`url_preview_ip_range_blacklist`](#url_preview_ip_range_blacklist) or
|
||||
[`url_preview_url_blacklist`](#url_preview_url_blacklist).
|
||||
|
||||
Defaults to an empty list (nothing blocked).
|
||||
|
||||
@@ -2129,12 +2148,14 @@ url_preview_ip_range_whitelist:
|
||||
---
|
||||
### `url_preview_url_blacklist`
|
||||
|
||||
Optional list of URL matches that the URL preview spider is
|
||||
denied from accessing. You should use `url_preview_ip_range_blacklist`
|
||||
in preference to this, otherwise someone could define a public DNS
|
||||
entry that points to a private IP address and circumvent the blacklist.
|
||||
This is more useful if you know there is an entire shape of URL that
|
||||
you know that will never want synapse to try to spider.
|
||||
Optional list of URL matches that the URL preview spider is denied from
|
||||
accessing. This is a usability feature, not a security one. You should use
|
||||
`url_preview_ip_range_blacklist` in preference to this, otherwise someone could
|
||||
define a public DNS entry that points to a private IP address and circumvent
|
||||
the blacklist. Applications that perform redirects or serve different content
|
||||
when detecting that Synapse is accessing them can also bypass the blacklist.
|
||||
This is more useful if you know there is an entire shape of URL that you know
|
||||
that you do not want Synapse to preview.
|
||||
|
||||
Each list entry is a dictionary of url component attributes as returned
|
||||
by urlparse.urlsplit as applied to the absolute form of the URL. See
|
||||
@@ -2718,7 +2739,7 @@ Example configuration:
|
||||
session_lifetime: 24h
|
||||
```
|
||||
---
|
||||
### `refresh_access_token_lifetime`
|
||||
### `refreshable_access_token_lifetime`
|
||||
|
||||
Time that an access token remains valid for, if the session is using refresh tokens.
|
||||
|
||||
@@ -3806,7 +3827,8 @@ This setting defines options related to the user directory.
|
||||
This option has the following sub-options:
|
||||
* `enabled`: Defines whether users can search the user directory. If false then
|
||||
empty responses are returned to all queries. Defaults to true.
|
||||
* `search_all_users`: Defines whether to search all users visible to your HS at the time the search is performed. If set to true, will return all users who share a room with the user from the homeserver.
|
||||
* `search_all_users`: Defines whether to search all users visible to your homeserver at the time the search is performed.
|
||||
If set to true, will return all users known to the homeserver matching the search query.
|
||||
If false, search results will only contain users
|
||||
visible in public rooms and users sharing a room with the requester.
|
||||
Defaults to false.
|
||||
@@ -4129,6 +4151,38 @@ default_power_level_content_override:
|
||||
trusted_private_chat: null
|
||||
public_chat: null
|
||||
```
|
||||
|
||||
The default power levels for each preset are:
|
||||
```yaml
|
||||
"m.room.name": 50
|
||||
"m.room.power_levels": 100
|
||||
"m.room.history_visibility": 100
|
||||
"m.room.canonical_alias": 50
|
||||
"m.room.avatar": 50
|
||||
"m.room.tombstone": 100
|
||||
"m.room.server_acl": 100
|
||||
"m.room.encryption": 100
|
||||
```
|
||||
|
||||
So a complete example where the default power-levels for a preset are maintained
|
||||
but the power level for a new key is set is:
|
||||
```yaml
|
||||
default_power_level_content_override:
|
||||
private_chat:
|
||||
events:
|
||||
"com.example.foo": 0
|
||||
"m.room.name": 50
|
||||
"m.room.power_levels": 100
|
||||
"m.room.history_visibility": 100
|
||||
"m.room.canonical_alias": 50
|
||||
"m.room.avatar": 50
|
||||
"m.room.tombstone": 100
|
||||
"m.room.server_acl": 100
|
||||
"m.room.encryption": 100
|
||||
trusted_private_chat: null
|
||||
public_chat: null
|
||||
```
|
||||
|
||||
---
|
||||
### `forget_rooms_on_leave`
|
||||
|
||||
|
||||
@@ -62,6 +62,6 @@ following documentation:
|
||||
|
||||
## Reporting a security vulnerability
|
||||
|
||||
If you've found a security issue in Synapse or any other Matrix.org Foundation
|
||||
project, please report it to us in accordance with our [Security Disclosure
|
||||
Policy](https://www.matrix.org/security-disclosure-policy/). Thank you!
|
||||
If you've found a security issue in Synapse or any other Element project,
|
||||
please report it to us in accordance with our [Security Disclosure
|
||||
Policy](https://element.io/security/security-disclosure-policy). Thank you!
|
||||
|
||||
@@ -739,6 +739,8 @@ An example for a federation sender instance:
|
||||
Handles the media repository. It can handle all endpoints starting with:
|
||||
|
||||
/_matrix/media/
|
||||
/_matrix/client/v1/media/
|
||||
/_matrix/federation/v1/media/
|
||||
|
||||
... and the following regular expressions matching media-specific administration APIs:
|
||||
|
||||
|
||||
3
mypy.ini
3
mypy.ini
@@ -96,3 +96,6 @@ ignore_missing_imports = True
|
||||
# https://github.com/twisted/treq/pull/366
|
||||
[mypy-treq.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-multipart.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
1107
poetry.lock
generated
1107
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -43,6 +43,7 @@ target-version = ['py38', 'py39', 'py310', 'py311']
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
|
||||
[tool.ruff.lint]
|
||||
# See https://beta.ruff.rs/docs/rules/#error-e
|
||||
# for error codes. The ones we ignore are:
|
||||
# E501: Line too long (black enforces this for us)
|
||||
@@ -96,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.109.0"
|
||||
version = "1.112.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -224,6 +225,8 @@ pydantic = ">=1.7.4, <3"
|
||||
# needed.
|
||||
setuptools_rust = ">=1.3"
|
||||
|
||||
# This is used for parsing multipart responses
|
||||
python-multipart = ">=0.0.9"
|
||||
|
||||
# Optional Dependencies
|
||||
# ---------------------
|
||||
@@ -319,7 +322,7 @@ all = [
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.7.0"
|
||||
ruff = "0.3.7"
|
||||
ruff = "0.5.4"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
|
||||
@@ -112,7 +112,7 @@ python3 -m black "${files[@]}"
|
||||
|
||||
# Catch any common programming mistakes in Python code.
|
||||
# --quiet suppresses the update check.
|
||||
ruff --quiet --fix "${files[@]}"
|
||||
ruff check --quiet --fix "${files[@]}"
|
||||
|
||||
# Catch any common programming mistakes in Rust code.
|
||||
#
|
||||
|
||||
@@ -70,6 +70,7 @@ def cli() -> None:
|
||||
pip install -e .[dev]
|
||||
|
||||
- A checkout of the sytest repository at ../sytest
|
||||
- A checkout of the complement repository at ../complement
|
||||
|
||||
Then to use:
|
||||
|
||||
@@ -112,10 +113,12 @@ def _prepare() -> None:
|
||||
# Make sure we're in a git repo.
|
||||
synapse_repo = get_repo_and_check_clean_checkout()
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
complement_repo = get_repo_and_check_clean_checkout("../complement", "complement")
|
||||
|
||||
click.secho("Updating Synapse and Sytest git repos...")
|
||||
synapse_repo.remote().fetch()
|
||||
sytest_repo.remote().fetch()
|
||||
complement_repo.remote().fetch()
|
||||
|
||||
# Get the current version and AST from root Synapse module.
|
||||
current_version = get_package_version()
|
||||
@@ -208,7 +211,15 @@ def _prepare() -> None:
|
||||
"Which branch should the release be based on?", default=default
|
||||
)
|
||||
|
||||
for repo_name, repo in {"synapse": synapse_repo, "sytest": sytest_repo}.items():
|
||||
for repo_name, repo in {
|
||||
"synapse": synapse_repo,
|
||||
"sytest": sytest_repo,
|
||||
"complement": complement_repo,
|
||||
}.items():
|
||||
# Special case for Complement: `develop` maps to `main`
|
||||
if repo_name == "complement" and branch_name == "develop":
|
||||
branch_name = "main"
|
||||
|
||||
base_branch = find_ref(repo, branch_name)
|
||||
if not base_branch:
|
||||
print(f"Could not find base branch {branch_name} for {repo_name}!")
|
||||
@@ -231,6 +242,12 @@ def _prepare() -> None:
|
||||
if click.confirm("Push new SyTest branch?", default=True):
|
||||
sytest_repo.git.push("-u", sytest_repo.remote().name, release_branch_name)
|
||||
|
||||
# Same for Complement
|
||||
if click.confirm("Push new Complement branch?", default=True):
|
||||
complement_repo.git.push(
|
||||
"-u", complement_repo.remote().name, release_branch_name
|
||||
)
|
||||
|
||||
# Switch to the release branch and ensure it's up to date.
|
||||
synapse_repo.git.checkout(release_branch_name)
|
||||
update_branch(synapse_repo)
|
||||
@@ -630,6 +647,9 @@ def _merge_back() -> None:
|
||||
else:
|
||||
# Full release
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
complement_repo = get_repo_and_check_clean_checkout(
|
||||
"../complement", "complement"
|
||||
)
|
||||
|
||||
if click.confirm(f"Merge {branch_name} → master?", default=True):
|
||||
_merge_into(synapse_repo, branch_name, "master")
|
||||
@@ -643,6 +663,9 @@ def _merge_back() -> None:
|
||||
if click.confirm("On SyTest, merge master → develop?", default=True):
|
||||
_merge_into(sytest_repo, "master", "develop")
|
||||
|
||||
if click.confirm(f"On Complement, merge {branch_name} → main?", default=True):
|
||||
_merge_into(complement_repo, branch_name, "main")
|
||||
|
||||
|
||||
@cli.command()
|
||||
def announce() -> None:
|
||||
|
||||
@@ -44,7 +44,7 @@ logger = logging.getLogger("generate_workers_map")
|
||||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None:
|
||||
super().__init__(config.server.server_name, config=config)
|
||||
|
||||
@@ -119,18 +119,19 @@ BOOLEAN_COLUMNS = {
|
||||
"e2e_room_keys": ["is_verified"],
|
||||
"event_edges": ["is_state"],
|
||||
"events": ["processed", "outlier", "contains_url"],
|
||||
"local_media_repository": ["safe_from_quarantine"],
|
||||
"local_media_repository": ["safe_from_quarantine", "authenticated"],
|
||||
"per_user_experimental_features": ["enabled"],
|
||||
"presence_list": ["accepted"],
|
||||
"presence_stream": ["currently_active"],
|
||||
"public_room_list_stream": ["visibility"],
|
||||
"pushers": ["enabled"],
|
||||
"redactions": ["have_censored"],
|
||||
"remote_media_cache": ["authenticated"],
|
||||
"room_stats_state": ["is_federatable"],
|
||||
"rooms": ["is_public", "has_auth_chain_index"],
|
||||
"users": ["shadow_banned", "approved", "locked", "suspended"],
|
||||
"un_partial_stated_event_stream": ["rejection_status_changed"],
|
||||
"users_who_share_rooms": ["share_private"],
|
||||
"per_user_experimental_features": ["enabled"],
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ logger = logging.getLogger("update_database")
|
||||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore [assignment]
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
super().__init__(
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
from typing import Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
|
||||
from typing_extensions import Protocol
|
||||
|
||||
@@ -28,6 +28,9 @@ from synapse.appservice import ApplicationService
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.types import Requester
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
|
||||
# guests always get this device id.
|
||||
GUEST_DEVICE_ID = "guest_device"
|
||||
|
||||
@@ -87,6 +90,19 @@ class Auth(Protocol):
|
||||
AuthError if access is denied for the user in the access token
|
||||
"""
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
"""Like `get_user_by_req`, except also checks if the user has access to
|
||||
the experimental feature. If they don't returns a 404 unrecognized
|
||||
request.
|
||||
"""
|
||||
|
||||
async def validate_appservice_can_control_user_id(
|
||||
self, app_service: ApplicationService, user_id: str
|
||||
) -> None:
|
||||
|
||||
@@ -28,6 +28,7 @@ from synapse.api.errors import (
|
||||
Codes,
|
||||
InvalidClientTokenError,
|
||||
MissingClientTokenError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
|
||||
@@ -38,8 +39,10 @@ from . import GUEST_DEVICE_ID
|
||||
from .base import BaseAuth
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -106,6 +109,32 @@ class InternalAuth(BaseAuth):
|
||||
parent_span.set_tag("appservice_id", requester.app_service.id)
|
||||
return requester
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
try:
|
||||
requester = await self.get_user_by_req(
|
||||
request,
|
||||
allow_guest=allow_guest,
|
||||
allow_expired=allow_expired,
|
||||
allow_locked=allow_locked,
|
||||
)
|
||||
if await self.store.is_feature_enabled(requester.user.to_string(), feature):
|
||||
return requester
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
except (AuthError, InvalidClientTokenError):
|
||||
if feature.is_globally_enabled(self.hs.config):
|
||||
# If its globally enabled then return the auth error
|
||||
raise
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
|
||||
@cancellable
|
||||
async def _wrapped_get_user_by_req(
|
||||
self,
|
||||
|
||||
@@ -40,6 +40,7 @@ from synapse.api.errors import (
|
||||
OAuthInsufficientScopeError,
|
||||
StoreError,
|
||||
SynapseError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
@@ -48,6 +49,7 @@ from synapse.util import json_decoder
|
||||
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -143,6 +145,18 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
# metadata.validate_introspection_endpoint()
|
||||
return metadata
|
||||
|
||||
async def _introspection_endpoint(self) -> str:
|
||||
"""
|
||||
Returns the introspection endpoint of the issuer
|
||||
|
||||
It uses the config option if set, otherwise it will use OIDC discovery to get it
|
||||
"""
|
||||
if self._config.introspection_endpoint is not None:
|
||||
return self._config.introspection_endpoint
|
||||
|
||||
metadata = await self._load_metadata()
|
||||
return metadata.get("introspection_endpoint")
|
||||
|
||||
async def _introspect_token(self, token: str) -> IntrospectionToken:
|
||||
"""
|
||||
Send a token to the introspection endpoint and returns the introspection response
|
||||
@@ -159,8 +173,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
Returns:
|
||||
The introspection response
|
||||
"""
|
||||
metadata = await self._issuer_metadata.get()
|
||||
introspection_endpoint = metadata.get("introspection_endpoint")
|
||||
introspection_endpoint = await self._introspection_endpoint()
|
||||
raw_headers: Dict[str, str] = {
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
"User-Agent": str(self._http_client.user_agent, "utf-8"),
|
||||
@@ -245,6 +258,32 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
|
||||
return requester
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
try:
|
||||
requester = await self.get_user_by_req(
|
||||
request,
|
||||
allow_guest=allow_guest,
|
||||
allow_expired=allow_expired,
|
||||
allow_locked=allow_locked,
|
||||
)
|
||||
if await self.store.is_feature_enabled(requester.user.to_string(), feature):
|
||||
return requester
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
except (AuthError, InvalidClientTokenError):
|
||||
if feature.is_globally_enabled(self.hs.config):
|
||||
# If its globally enabled then return the auth error
|
||||
raise
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
|
||||
async def get_user_by_access_token(
|
||||
self,
|
||||
token: str,
|
||||
|
||||
@@ -50,7 +50,7 @@ class Membership:
|
||||
KNOCK: Final = "knock"
|
||||
LEAVE: Final = "leave"
|
||||
BAN: Final = "ban"
|
||||
LIST: Final = {INVITE, JOIN, KNOCK, LEAVE, BAN}
|
||||
LIST: Final = frozenset((INVITE, JOIN, KNOCK, LEAVE, BAN))
|
||||
|
||||
|
||||
class PresenceState:
|
||||
@@ -128,9 +128,13 @@ class EventTypes:
|
||||
SpaceParent: Final = "m.space.parent"
|
||||
|
||||
Reaction: Final = "m.reaction"
|
||||
Sticker: Final = "m.sticker"
|
||||
LiveLocationShareStart: Final = "m.beacon_info"
|
||||
|
||||
CallInvite: Final = "m.call.invite"
|
||||
|
||||
PollStart: Final = "m.poll.start"
|
||||
|
||||
|
||||
class ToDeviceEventTypes:
|
||||
RoomKeyRequest: Final = "m.room_key_request"
|
||||
|
||||
@@ -130,7 +130,8 @@ class Ratelimiter:
|
||||
Overrides the value set during instantiation if set.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
update: Whether to count this check as performing the action. If the action
|
||||
cannot be performed, the user's action count is not incremented at all.
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
@@ -235,9 +236,8 @@ class Ratelimiter:
|
||||
requester: The requester that is doing the action, if any.
|
||||
key: An arbitrary key used to classify an action. Defaults to the
|
||||
requester's user ID.
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
n_actions: The number of times the user performed the action. May be negative
|
||||
to "refund" the rate limit.
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
"""
|
||||
|
||||
@@ -110,7 +110,7 @@ class AdminCmdStore(
|
||||
|
||||
|
||||
class AdminCmdServer(HomeServer):
|
||||
DATASTORE_CLASS = AdminCmdStore # type: ignore
|
||||
DATASTORE_CLASS = AdminCmdStore
|
||||
|
||||
|
||||
async def export_data_command(hs: HomeServer, args: argparse.Namespace) -> None:
|
||||
|
||||
@@ -74,6 +74,9 @@ from synapse.storage.databases.main.event_push_actions import (
|
||||
EventPushActionsWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.databases.main.experimental_features import (
|
||||
ExperimentalFeaturesStore,
|
||||
)
|
||||
from synapse.storage.databases.main.filtering import FilteringWorkerStore
|
||||
from synapse.storage.databases.main.keys import KeyStore
|
||||
from synapse.storage.databases.main.lock import LockStore
|
||||
@@ -155,6 +158,7 @@ class GenericWorkerStore(
|
||||
LockStore,
|
||||
SessionStore,
|
||||
TaskSchedulerWorkerStore,
|
||||
ExperimentalFeaturesStore,
|
||||
):
|
||||
# Properties that multiple storage classes define. Tell mypy what the
|
||||
# expected type is.
|
||||
@@ -163,7 +167,7 @@ class GenericWorkerStore(
|
||||
|
||||
|
||||
class GenericWorkerServer(HomeServer):
|
||||
DATASTORE_CLASS = GenericWorkerStore # type: ignore
|
||||
DATASTORE_CLASS = GenericWorkerStore
|
||||
|
||||
def _listen_http(self, listener_config: ListenerConfig) -> None:
|
||||
assert listener_config.http_options is not None
|
||||
|
||||
@@ -81,7 +81,7 @@ def gz_wrap(r: Resource) -> Resource:
|
||||
|
||||
|
||||
class SynapseHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def _listener_http(
|
||||
self,
|
||||
@@ -217,7 +217,7 @@ class SynapseHomeServer(HomeServer):
|
||||
)
|
||||
|
||||
if name in ["media", "federation", "client"]:
|
||||
if self.config.server.enable_media_repo:
|
||||
if self.config.media.can_load_media_repo:
|
||||
media_repo = self.get_media_repository_resource()
|
||||
resources.update(
|
||||
{
|
||||
|
||||
@@ -140,6 +140,12 @@ class MSC3861:
|
||||
("experimental", "msc3861", "client_auth_method"),
|
||||
)
|
||||
|
||||
introspection_endpoint: Optional[str] = attr.ib(
|
||||
default=None,
|
||||
validator=attr.validators.optional(attr.validators.instance_of(str)),
|
||||
)
|
||||
"""The URL of the introspection endpoint used to validate access tokens."""
|
||||
|
||||
account_management_url: Optional[str] = attr.ib(
|
||||
default=None,
|
||||
validator=attr.validators.optional(attr.validators.instance_of(str)),
|
||||
@@ -433,8 +439,8 @@ class ExperimentalConfig(Config):
|
||||
("experimental", "msc4108_delegation_endpoint"),
|
||||
)
|
||||
|
||||
self.msc3916_authenticated_media_enabled = experimental.get(
|
||||
"msc3916_authenticated_media_enabled", False
|
||||
self.msc3823_account_suspension = experimental.get(
|
||||
"msc3823_account_suspension", False
|
||||
)
|
||||
|
||||
# MSC4151: Report room API (Client-Server API)
|
||||
|
||||
@@ -126,7 +126,7 @@ class ContentRepositoryConfig(Config):
|
||||
# Only enable the media repo if either the media repo is enabled or the
|
||||
# current worker app is the media repo.
|
||||
if (
|
||||
self.root.server.enable_media_repo is False
|
||||
config.get("enable_media_repo", True) is False
|
||||
and config.get("worker_app") != "synapse.app.media_repository"
|
||||
):
|
||||
self.can_load_media_repo = False
|
||||
@@ -272,6 +272,10 @@ class ContentRepositoryConfig(Config):
|
||||
remote_media_lifetime
|
||||
)
|
||||
|
||||
self.enable_authenticated_media = config.get(
|
||||
"enable_authenticated_media", False
|
||||
)
|
||||
|
||||
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
|
||||
assert data_dir_path is not None
|
||||
media_store = os.path.join(data_dir_path, "media_store")
|
||||
|
||||
@@ -384,6 +384,11 @@ class ServerConfig(Config):
|
||||
# Whether to internally track presence, requires that presence is enabled,
|
||||
self.track_presence = self.presence_enabled and presence_enabled != "untracked"
|
||||
|
||||
# Determines if presence results for offline users are included on initial/full sync
|
||||
self.presence_include_offline_users_on_sync = presence_config.get(
|
||||
"include_offline_users_on_sync", False
|
||||
)
|
||||
|
||||
# Custom presence router module
|
||||
# This is the legacy way of configuring it (the config should now be put in the modules section)
|
||||
self.presence_router_module_class = None
|
||||
@@ -395,12 +400,6 @@ class ServerConfig(Config):
|
||||
self.presence_router_config,
|
||||
) = load_module(presence_router_config, ("presence", "presence_router"))
|
||||
|
||||
# whether to enable the media repository endpoints. This should be set
|
||||
# to false if the media repository is running as a separate endpoint;
|
||||
# doing so ensures that we will not run cache cleanup jobs on the
|
||||
# master, potentially causing inconsistency.
|
||||
self.enable_media_repo = config.get("enable_media_repo", True)
|
||||
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API.
|
||||
self.require_auth_for_profile_requests = config.get(
|
||||
|
||||
@@ -836,3 +836,21 @@ def maybe_upsert_event_field(
|
||||
del container[key]
|
||||
|
||||
return upsert_okay
|
||||
|
||||
|
||||
def strip_event(event: EventBase) -> JsonDict:
|
||||
"""
|
||||
Used for "stripped state" events which provide a simplified view of the state of a
|
||||
room intended to help a potential joiner identify the room (relevant when the user
|
||||
is invited or knocked).
|
||||
|
||||
Stripped state events can only have the `sender`, `type`, `state_key` and `content`
|
||||
properties present.
|
||||
"""
|
||||
|
||||
return {
|
||||
"type": event.type,
|
||||
"state_key": event.state_key,
|
||||
"content": event.content,
|
||||
"sender": event.sender,
|
||||
}
|
||||
|
||||
@@ -1871,6 +1871,52 @@ class FederationClient(FederationBase):
|
||||
|
||||
return filtered_statuses, filtered_failures
|
||||
|
||||
async def federation_download_media(
|
||||
self,
|
||||
destination: str,
|
||||
media_id: str,
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Union[
|
||||
Tuple[int, Dict[bytes, List[bytes]], bytes],
|
||||
Tuple[int, Dict[bytes, List[bytes]]],
|
||||
]:
|
||||
try:
|
||||
return await self.transport_layer.federation_download_media(
|
||||
destination,
|
||||
media_id,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the _matrix/media/v3/download endpoint. Otherwise, consider it a legitimate error
|
||||
# and raise.
|
||||
if not is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug(
|
||||
"Couldn't download media %s/%s over _matrix/federation/v1/media/download, falling back to _matrix/media/v3/download path",
|
||||
destination,
|
||||
media_id,
|
||||
)
|
||||
|
||||
return await self.transport_layer.download_media_v3(
|
||||
destination,
|
||||
media_id,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def download_media(
|
||||
self,
|
||||
destination: str,
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#
|
||||
import datetime
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, Type
|
||||
|
||||
@@ -68,6 +69,10 @@ sent_edus_by_type = Counter(
|
||||
# If the retry interval is larger than this then we enter "catchup" mode
|
||||
CATCHUP_RETRY_INTERVAL = 60 * 60 * 1000
|
||||
|
||||
# Limit how many presence states we add to each presence EDU, to ensure that
|
||||
# they are bounded in size.
|
||||
MAX_PRESENCE_STATES_PER_EDU = 50
|
||||
|
||||
|
||||
class PerDestinationQueue:
|
||||
"""
|
||||
@@ -144,7 +149,7 @@ class PerDestinationQueue:
|
||||
|
||||
# Map of user_id -> UserPresenceState of pending presence to be sent to this
|
||||
# destination
|
||||
self._pending_presence: Dict[str, UserPresenceState] = {}
|
||||
self._pending_presence: OrderedDict[str, UserPresenceState] = OrderedDict()
|
||||
|
||||
# List of room_id -> receipt_type -> user_id -> receipt_dict,
|
||||
#
|
||||
@@ -333,12 +338,11 @@ class PerDestinationQueue:
|
||||
# not caught up yet
|
||||
return
|
||||
|
||||
pending_pdus = []
|
||||
while True:
|
||||
self._new_data_to_send = False
|
||||
|
||||
async with _TransactionQueueManager(self) as (
|
||||
pending_pdus,
|
||||
pending_pdus, # noqa: F811
|
||||
pending_edus,
|
||||
):
|
||||
if not pending_pdus and not pending_edus:
|
||||
@@ -399,7 +403,7 @@ class PerDestinationQueue:
|
||||
# through another mechanism, because this is all volatile!
|
||||
self._pending_edus = []
|
||||
self._pending_edus_keyed = {}
|
||||
self._pending_presence = {}
|
||||
self._pending_presence.clear()
|
||||
self._pending_receipt_edus = []
|
||||
|
||||
self._start_catching_up()
|
||||
@@ -721,22 +725,26 @@ class _TransactionQueueManager:
|
||||
|
||||
# Add presence EDU.
|
||||
if self.queue._pending_presence:
|
||||
# Only send max 50 presence entries in the EDU, to bound the amount
|
||||
# of data we're sending.
|
||||
presence_to_add: List[JsonDict] = []
|
||||
while (
|
||||
self.queue._pending_presence
|
||||
and len(presence_to_add) < MAX_PRESENCE_STATES_PER_EDU
|
||||
):
|
||||
_, presence = self.queue._pending_presence.popitem(last=False)
|
||||
presence_to_add.append(
|
||||
format_user_presence_state(presence, self.queue._clock.time_msec())
|
||||
)
|
||||
|
||||
pending_edus.append(
|
||||
Edu(
|
||||
origin=self.queue._server_name,
|
||||
destination=self.queue._destination,
|
||||
edu_type=EduTypes.PRESENCE,
|
||||
content={
|
||||
"push": [
|
||||
format_user_presence_state(
|
||||
presence, self.queue._clock.time_msec()
|
||||
)
|
||||
for presence in self.queue._pending_presence.values()
|
||||
]
|
||||
},
|
||||
content={"push": presence_to_add},
|
||||
)
|
||||
)
|
||||
self.queue._pending_presence = {}
|
||||
|
||||
# Add read receipt EDUs.
|
||||
pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
|
||||
|
||||
@@ -824,7 +824,6 @@ class TransportLayerClient:
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
path = f"/_matrix/media/r0/download/{destination}/{media_id}"
|
||||
|
||||
return await self.client.get_file(
|
||||
destination,
|
||||
path,
|
||||
@@ -852,7 +851,6 @@ class TransportLayerClient:
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
path = f"/_matrix/media/v3/download/{destination}/{media_id}"
|
||||
|
||||
return await self.client.get_file(
|
||||
destination,
|
||||
path,
|
||||
@@ -873,6 +871,29 @@ class TransportLayerClient:
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def federation_download_media(
|
||||
self,
|
||||
destination: str,
|
||||
media_id: str,
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
|
||||
path = f"/_matrix/federation/v1/media/download/{media_id}"
|
||||
return await self.client.federation_get_file(
|
||||
destination,
|
||||
path,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
args={
|
||||
"timeout_ms": str(max_timeout_ms),
|
||||
},
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
|
||||
def _create_path(federation_prefix: str, path: str, *args: str) -> str:
|
||||
"""
|
||||
|
||||
@@ -32,6 +32,8 @@ from synapse.federation.transport.server._base import (
|
||||
from synapse.federation.transport.server.federation import (
|
||||
FEDERATION_SERVLET_CLASSES,
|
||||
FederationAccountStatusServlet,
|
||||
FederationMediaDownloadServlet,
|
||||
FederationMediaThumbnailServlet,
|
||||
FederationUnstableClientKeysClaimServlet,
|
||||
)
|
||||
from synapse.http.server import HttpServer, JsonResource
|
||||
@@ -315,6 +317,13 @@ def register_servlets(
|
||||
):
|
||||
continue
|
||||
|
||||
if (
|
||||
servletclass == FederationMediaDownloadServlet
|
||||
or servletclass == FederationMediaThumbnailServlet
|
||||
):
|
||||
if not hs.config.media.can_load_media_repo:
|
||||
continue
|
||||
|
||||
servletclass(
|
||||
hs=hs,
|
||||
authenticator=authenticator,
|
||||
|
||||
@@ -360,13 +360,33 @@ class BaseFederationServlet:
|
||||
"request"
|
||||
)
|
||||
return None
|
||||
if (
|
||||
func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaDownloadServlet"
|
||||
or func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaThumbnailServlet"
|
||||
):
|
||||
response = await func(
|
||||
origin, content, request, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = await func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
if (
|
||||
func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaDownloadServlet"
|
||||
or func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaThumbnailServlet"
|
||||
):
|
||||
response = await func(
|
||||
origin, content, request, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = await func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = await func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
finally:
|
||||
# if we used the origin's context as the parent, add a new span using
|
||||
# the servlet span as a parent, so that we have a link
|
||||
|
||||
@@ -44,10 +44,15 @@ from synapse.federation.transport.server._base import (
|
||||
)
|
||||
from synapse.http.servlet import (
|
||||
parse_boolean_from_args,
|
||||
parse_integer,
|
||||
parse_integer_from_args,
|
||||
parse_string,
|
||||
parse_string_from_args,
|
||||
parse_strings_from_args,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.media._base import DEFAULT_MAX_TIMEOUT_MS, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS
|
||||
from synapse.media.thumbnailer import ThumbnailProvider
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
@@ -787,6 +792,95 @@ class FederationAccountStatusServlet(BaseFederationServerServlet):
|
||||
return 200, {"account_statuses": statuses, "failures": failures}
|
||||
|
||||
|
||||
class FederationMediaDownloadServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
Implementation of new federation media `/download` endpoint outlined in MSC3916. Returns
|
||||
a multipart/mixed response consisting of a JSON object and the requested media
|
||||
item. This endpoint only returns local media.
|
||||
"""
|
||||
|
||||
PATH = "/media/download/(?P<media_id>[^/]*)"
|
||||
RATELIMIT = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
ratelimiter: FederationRateLimiter,
|
||||
authenticator: Authenticator,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.media_repo = self.hs.get_media_repository()
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: Optional[str],
|
||||
content: Literal[None],
|
||||
request: SynapseRequest,
|
||||
media_id: str,
|
||||
) -> None:
|
||||
max_timeout_ms = parse_integer(
|
||||
request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
|
||||
)
|
||||
max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
|
||||
await self.media_repo.get_local_media(
|
||||
request, media_id, None, max_timeout_ms, federation=True
|
||||
)
|
||||
|
||||
|
||||
class FederationMediaThumbnailServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
Implementation of new federation media `/thumbnail` endpoint outlined in MSC3916. Returns
|
||||
a multipart/mixed response consisting of a JSON object and the requested media
|
||||
item. This endpoint only returns local media.
|
||||
"""
|
||||
|
||||
PATH = "/media/thumbnail/(?P<media_id>[^/]*)"
|
||||
RATELIMIT = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
ratelimiter: FederationRateLimiter,
|
||||
authenticator: Authenticator,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.media_repo = self.hs.get_media_repository()
|
||||
self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
|
||||
self.thumbnail_provider = ThumbnailProvider(
|
||||
hs, self.media_repo, self.media_repo.media_storage
|
||||
)
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: Optional[str],
|
||||
content: Literal[None],
|
||||
request: SynapseRequest,
|
||||
media_id: str,
|
||||
) -> None:
|
||||
|
||||
width = parse_integer(request, "width", required=True)
|
||||
height = parse_integer(request, "height", required=True)
|
||||
method = parse_string(request, "method", "scale")
|
||||
# TODO Parse the Accept header to get an prioritised list of thumbnail types.
|
||||
m_type = "image/png"
|
||||
max_timeout_ms = parse_integer(
|
||||
request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
|
||||
)
|
||||
max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
|
||||
|
||||
if self.dynamic_thumbnails:
|
||||
await self.thumbnail_provider.select_or_generate_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms, True
|
||||
)
|
||||
else:
|
||||
await self.thumbnail_provider.respond_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms, True
|
||||
)
|
||||
self.media_repo.mark_recently_accessed(None, media_id)
|
||||
|
||||
|
||||
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationSendServlet,
|
||||
FederationEventServlet,
|
||||
@@ -818,4 +912,6 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationV1SendKnockServlet,
|
||||
FederationMakeKnockServlet,
|
||||
FederationAccountStatusServlet,
|
||||
FederationMediaDownloadServlet,
|
||||
FederationMediaThumbnailServlet,
|
||||
)
|
||||
|
||||
@@ -283,6 +283,10 @@ class DeactivateAccountHandler:
|
||||
ratelimit=False,
|
||||
require_consent=False,
|
||||
)
|
||||
|
||||
# Mark the room forgotten too, because they won't be able to do this
|
||||
# for us. This may lead to the room being purged eventually.
|
||||
await self._room_member_handler.forget(user, room_id)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to part user %r from room %r: ignoring and continuing",
|
||||
|
||||
@@ -39,6 +39,7 @@ from synapse.metrics.background_process_metrics import (
|
||||
)
|
||||
from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo
|
||||
from synapse.types import (
|
||||
DeviceListUpdates,
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
ScheduledTask,
|
||||
@@ -214,7 +215,7 @@ class DeviceWorkerHandler:
|
||||
@cancellable
|
||||
async def get_user_ids_changed(
|
||||
self, user_id: str, from_token: StreamToken
|
||||
) -> JsonDict:
|
||||
) -> DeviceListUpdates:
|
||||
"""Get list of users that have had the devices updated, or have newly
|
||||
joined a room, that `user_id` may be interested in.
|
||||
"""
|
||||
@@ -341,11 +342,19 @@ class DeviceWorkerHandler:
|
||||
possibly_joined = set()
|
||||
possibly_left = set()
|
||||
|
||||
result = {"changed": list(possibly_joined), "left": list(possibly_left)}
|
||||
device_list_updates = DeviceListUpdates(
|
||||
changed=possibly_joined,
|
||||
left=possibly_left,
|
||||
)
|
||||
|
||||
log_kv(result)
|
||||
log_kv(
|
||||
{
|
||||
"changed": device_list_updates.changed,
|
||||
"left": device_list_updates.left,
|
||||
}
|
||||
)
|
||||
|
||||
return result
|
||||
return device_list_updates
|
||||
|
||||
async def on_federation_query_user_devices(self, user_id: str) -> JsonDict:
|
||||
if not self.hs.is_mine(UserID.from_string(user_id)):
|
||||
|
||||
@@ -34,7 +34,7 @@ from synapse.api.errors import (
|
||||
from synapse.logging.opentracing import log_kv, trace
|
||||
from synapse.storage.databases.main.e2e_room_keys import RoomKey
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.async_helpers import ReadWriteLock
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -58,7 +58,7 @@ class E2eRoomKeysHandler:
|
||||
# clients belonging to a user will receive and try to upload a new session at
|
||||
# roughly the same time. Also used to lock out uploads when the key is being
|
||||
# changed.
|
||||
self._upload_linearizer = Linearizer("upload_room_keys_lock")
|
||||
self._upload_lock = ReadWriteLock()
|
||||
|
||||
@trace
|
||||
async def get_room_keys(
|
||||
@@ -89,7 +89,7 @@ class E2eRoomKeysHandler:
|
||||
|
||||
# we deliberately take the lock to get keys so that changing the version
|
||||
# works atomically
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.read(user_id):
|
||||
# make sure the backup version exists
|
||||
try:
|
||||
await self.store.get_e2e_room_keys_version_info(user_id, version)
|
||||
@@ -132,7 +132,7 @@ class E2eRoomKeysHandler:
|
||||
"""
|
||||
|
||||
# lock for consistency with uploading
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
# make sure the backup version exists
|
||||
try:
|
||||
version_info = await self.store.get_e2e_room_keys_version_info(
|
||||
@@ -193,7 +193,7 @@ class E2eRoomKeysHandler:
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
|
||||
# XXX: perhaps we should use a finer grained lock here?
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
# Check that the version we're trying to upload is the current version
|
||||
try:
|
||||
version_info = await self.store.get_e2e_room_keys_version_info(user_id)
|
||||
@@ -355,7 +355,7 @@ class E2eRoomKeysHandler:
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
|
||||
# lock everyone out until we've switched version
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
new_version = await self.store.create_e2e_room_keys_version(
|
||||
user_id, version_info
|
||||
)
|
||||
@@ -382,7 +382,7 @@ class E2eRoomKeysHandler:
|
||||
}
|
||||
"""
|
||||
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.read(user_id):
|
||||
try:
|
||||
res = await self.store.get_e2e_room_keys_version_info(user_id, version)
|
||||
except StoreError as e:
|
||||
@@ -407,7 +407,7 @@ class E2eRoomKeysHandler:
|
||||
NotFoundError: if this backup version doesn't exist
|
||||
"""
|
||||
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
try:
|
||||
await self.store.delete_e2e_room_keys_version(user_id, version)
|
||||
except StoreError as e:
|
||||
@@ -437,7 +437,7 @@ class E2eRoomKeysHandler:
|
||||
raise SynapseError(
|
||||
400, "Version in body does not match", Codes.INVALID_PARAM
|
||||
)
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
try:
|
||||
old_info = await self.store.get_e2e_room_keys_version_info(
|
||||
user_id, version
|
||||
|
||||
@@ -642,6 +642,17 @@ class EventCreationHandler:
|
||||
"""
|
||||
await self.auth_blocking.check_auth_blocking(requester=requester)
|
||||
|
||||
if event_dict["type"] == EventTypes.Message:
|
||||
requester_suspended = await self.store.get_user_suspended_status(
|
||||
requester.user.to_string()
|
||||
)
|
||||
if requester_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Sending messages while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
|
||||
room_version_id = event_dict["content"]["room_version"]
|
||||
maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
||||
|
||||
@@ -1188,6 +1188,8 @@ class RoomCreationHandler:
|
||||
)
|
||||
events_to_send.append((power_event, power_context))
|
||||
else:
|
||||
# Please update the docs for `default_power_level_content_override` when
|
||||
# updating the `events` dict below
|
||||
power_level_content: JsonDict = {
|
||||
"users": {creator_id: 100},
|
||||
"users_default": 0,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1352,7 +1352,7 @@ class SyncHandler:
|
||||
await_full_state = True
|
||||
lazy_load_members = False
|
||||
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_at(
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
@@ -1480,11 +1480,13 @@ class SyncHandler:
|
||||
else:
|
||||
# We can get here if the user has ignored the senders of all
|
||||
# the recent events.
|
||||
state_at_timeline_start = await self._state_storage_controller.get_state_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
state_at_timeline_start = (
|
||||
await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
)
|
||||
|
||||
if batch.limited:
|
||||
@@ -1502,14 +1504,14 @@ class SyncHandler:
|
||||
# about them).
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
state_at_previous_sync = await self._state_storage_controller.get_state_at(
|
||||
state_at_previous_sync = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=since_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_at(
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
@@ -2268,7 +2270,11 @@ class SyncHandler:
|
||||
user=user,
|
||||
from_key=presence_key,
|
||||
is_guest=sync_config.is_guest,
|
||||
include_offline=include_offline,
|
||||
include_offline=(
|
||||
True
|
||||
if self.hs_config.server.presence_include_offline_users_on_sync
|
||||
else include_offline
|
||||
),
|
||||
)
|
||||
assert presence_key
|
||||
sync_result_builder.now_token = now_token.copy_and_replace(
|
||||
@@ -2508,7 +2514,7 @@ class SyncHandler:
|
||||
continue
|
||||
|
||||
if room_id in sync_result_builder.joined_room_ids or has_join:
|
||||
old_state_ids = await self._state_storage_controller.get_state_at(
|
||||
old_state_ids = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types([(EventTypes.Member, user_id)]),
|
||||
@@ -2539,7 +2545,7 @@ class SyncHandler:
|
||||
else:
|
||||
if not old_state_ids:
|
||||
old_state_ids = (
|
||||
await self._state_storage_controller.get_state_at(
|
||||
await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types(
|
||||
|
||||
@@ -35,6 +35,8 @@ from typing import (
|
||||
Union,
|
||||
)
|
||||
|
||||
import attr
|
||||
import multipart
|
||||
import treq
|
||||
from canonicaljson import encode_canonical_json
|
||||
from netaddr import AddrFormatError, IPAddress, IPSet
|
||||
@@ -1006,6 +1008,130 @@ class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
self._maybe_fail()
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True, slots=True)
|
||||
class MultipartResponse:
|
||||
"""
|
||||
A small class to hold parsed values of a multipart response.
|
||||
"""
|
||||
|
||||
json: bytes = b"{}"
|
||||
length: Optional[int] = None
|
||||
content_type: Optional[bytes] = None
|
||||
disposition: Optional[bytes] = None
|
||||
url: Optional[bytes] = None
|
||||
|
||||
|
||||
class _MultipartParserProtocol(protocol.Protocol):
|
||||
"""
|
||||
Protocol to read and parse a MSC3916 multipart/mixed response
|
||||
"""
|
||||
|
||||
transport: Optional[ITCPTransport] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stream: ByteWriteable,
|
||||
deferred: defer.Deferred,
|
||||
boundary: str,
|
||||
max_length: Optional[int],
|
||||
) -> None:
|
||||
self.stream = stream
|
||||
self.deferred = deferred
|
||||
self.boundary = boundary
|
||||
self.max_length = max_length
|
||||
self.parser = None
|
||||
self.multipart_response = MultipartResponse()
|
||||
self.has_redirect = False
|
||||
self.in_json = False
|
||||
self.json_done = False
|
||||
self.file_length = 0
|
||||
self.total_length = 0
|
||||
self.in_disposition = False
|
||||
self.in_content_type = False
|
||||
|
||||
def dataReceived(self, incoming_data: bytes) -> None:
|
||||
if self.deferred.called:
|
||||
return
|
||||
|
||||
# we don't have a parser yet, instantiate it
|
||||
if not self.parser:
|
||||
|
||||
def on_header_field(data: bytes, start: int, end: int) -> None:
|
||||
if data[start:end] == b"Location":
|
||||
self.has_redirect = True
|
||||
if data[start:end] == b"Content-Disposition":
|
||||
self.in_disposition = True
|
||||
if data[start:end] == b"Content-Type":
|
||||
self.in_content_type = True
|
||||
|
||||
def on_header_value(data: bytes, start: int, end: int) -> None:
|
||||
# the first header should be content-type for application/json
|
||||
if not self.in_json and not self.json_done:
|
||||
assert data[start:end] == b"application/json"
|
||||
self.in_json = True
|
||||
elif self.has_redirect:
|
||||
self.multipart_response.url = data[start:end]
|
||||
elif self.in_content_type:
|
||||
self.multipart_response.content_type = data[start:end]
|
||||
self.in_content_type = False
|
||||
elif self.in_disposition:
|
||||
self.multipart_response.disposition = data[start:end]
|
||||
self.in_disposition = False
|
||||
|
||||
def on_part_data(data: bytes, start: int, end: int) -> None:
|
||||
# we've seen json header but haven't written the json data
|
||||
if self.in_json and not self.json_done:
|
||||
self.multipart_response.json = data[start:end]
|
||||
self.json_done = True
|
||||
# we have a redirect header rather than a file, and have already captured it
|
||||
elif self.has_redirect:
|
||||
return
|
||||
# otherwise we are in the file part
|
||||
else:
|
||||
logger.info("Writing multipart file data to stream")
|
||||
try:
|
||||
self.stream.write(data[start:end])
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Exception encountered writing file data to stream: {e}"
|
||||
)
|
||||
self.deferred.errback()
|
||||
self.file_length += end - start
|
||||
|
||||
callbacks = {
|
||||
"on_header_field": on_header_field,
|
||||
"on_header_value": on_header_value,
|
||||
"on_part_data": on_part_data,
|
||||
}
|
||||
self.parser = multipart.MultipartParser(self.boundary, callbacks)
|
||||
|
||||
self.total_length += len(incoming_data)
|
||||
if self.max_length is not None and self.total_length >= self.max_length:
|
||||
self.deferred.errback(BodyExceededMaxSize())
|
||||
# Close the connection (forcefully) since all the data will get
|
||||
# discarded anyway.
|
||||
assert self.transport is not None
|
||||
self.transport.abortConnection()
|
||||
|
||||
try:
|
||||
self.parser.write(incoming_data) # type: ignore[attr-defined]
|
||||
except Exception as e:
|
||||
logger.warning(f"Exception writing to multipart parser: {e}")
|
||||
self.deferred.errback()
|
||||
return
|
||||
|
||||
def connectionLost(self, reason: Failure = connectionDone) -> None:
|
||||
# If the maximum size was already exceeded, there's nothing to do.
|
||||
if self.deferred.called:
|
||||
return
|
||||
|
||||
if reason.check(ResponseDone):
|
||||
self.multipart_response.length = self.file_length
|
||||
self.deferred.callback(self.multipart_response)
|
||||
else:
|
||||
self.deferred.errback(reason)
|
||||
|
||||
|
||||
class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
"""A protocol which reads body to a stream, erroring if the body exceeds a maximum size."""
|
||||
|
||||
@@ -1091,6 +1217,32 @@ def read_body_with_max_size(
|
||||
return d
|
||||
|
||||
|
||||
def read_multipart_response(
|
||||
response: IResponse, stream: ByteWriteable, boundary: str, max_length: Optional[int]
|
||||
) -> "defer.Deferred[MultipartResponse]":
|
||||
"""
|
||||
Reads a MSC3916 multipart/mixed response and parses it, reading the file part (if it contains one) into
|
||||
the stream passed in and returning a deferred resolving to a MultipartResponse
|
||||
|
||||
Args:
|
||||
response: The HTTP response to read from.
|
||||
stream: The file-object to write to.
|
||||
boundary: the multipart/mixed boundary string
|
||||
max_length: maximum allowable length of the response
|
||||
"""
|
||||
d: defer.Deferred[MultipartResponse] = defer.Deferred()
|
||||
|
||||
# If the Content-Length header gives a size larger than the maximum allowed
|
||||
# size, do not bother downloading the body.
|
||||
if max_length is not None and response.length != UNKNOWN_LENGTH:
|
||||
if response.length > max_length:
|
||||
response.deliverBody(_DiscardBodyWithMaxSizeProtocol(d))
|
||||
return d
|
||||
|
||||
response.deliverBody(_MultipartParserProtocol(stream, d, boundary, max_length))
|
||||
return d
|
||||
|
||||
|
||||
def encode_query_args(args: Optional[QueryParams]) -> bytes:
|
||||
"""
|
||||
Encodes a map of query arguments to bytes which can be appended to a URL.
|
||||
|
||||
@@ -75,9 +75,11 @@ from synapse.http.client import (
|
||||
BlocklistingAgentWrapper,
|
||||
BodyExceededMaxSize,
|
||||
ByteWriteable,
|
||||
SimpleHttpClient,
|
||||
_make_scheduler,
|
||||
encode_query_args,
|
||||
read_body_with_max_size,
|
||||
read_multipart_response,
|
||||
)
|
||||
from synapse.http.connectproxyclient import BearerProxyCredentials
|
||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||
@@ -88,7 +90,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.opentracing import set_tag, start_active_span, tags
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.async_helpers import AwakenableSleeper, timeout_deferred
|
||||
from synapse.util.async_helpers import AwakenableSleeper, Linearizer, timeout_deferred
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.stringutils import parse_and_validate_server_name
|
||||
|
||||
@@ -466,6 +468,15 @@ class MatrixFederationHttpClient:
|
||||
|
||||
self._sleeper = AwakenableSleeper(self.reactor)
|
||||
|
||||
self._simple_http_client = SimpleHttpClient(
|
||||
hs,
|
||||
ip_blocklist=hs.config.server.federation_ip_range_blocklist,
|
||||
ip_allowlist=hs.config.server.federation_ip_range_allowlist,
|
||||
use_proxy=True,
|
||||
)
|
||||
|
||||
self.remote_download_linearizer = Linearizer("remote_download_linearizer", 6)
|
||||
|
||||
def wake_destination(self, destination: str) -> None:
|
||||
"""Called when the remote server may have come back online."""
|
||||
|
||||
@@ -1477,35 +1488,44 @@ class MatrixFederationHttpClient:
|
||||
)
|
||||
|
||||
headers = dict(response.headers.getAllRawHeaders())
|
||||
|
||||
expected_size = response.length
|
||||
# if we don't get an expected length then use the max length
|
||||
|
||||
if expected_size == UNKNOWN_LENGTH:
|
||||
expected_size = max_size
|
||||
logger.debug(
|
||||
f"File size unknown, assuming file is max allowable size: {max_size}"
|
||||
)
|
||||
else:
|
||||
if int(expected_size) > max_size:
|
||||
msg = "Requested file is too large > %r bytes" % (max_size,)
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
|
||||
|
||||
read_body, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None,
|
||||
key=ip_address,
|
||||
n_actions=expected_size,
|
||||
)
|
||||
if not read_body:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
read_body, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None,
|
||||
key=ip_address,
|
||||
n_actions=expected_size,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
if not read_body:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(
|
||||
HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED
|
||||
)
|
||||
|
||||
try:
|
||||
# add a byte of headroom to max size as function errs at >=
|
||||
d = read_body_with_max_size(response, output_stream, expected_size + 1)
|
||||
d.addTimeout(self.default_timeout_seconds, self.reactor)
|
||||
length = await make_deferred_yieldable(d)
|
||||
async with self.remote_download_linearizer.queue(ip_address):
|
||||
# add a byte of headroom to max size as function errs at >=
|
||||
d = read_body_with_max_size(response, output_stream, expected_size + 1)
|
||||
d.addTimeout(self.default_timeout_seconds, self.reactor)
|
||||
length = await make_deferred_yieldable(d)
|
||||
except BodyExceededMaxSize:
|
||||
msg = "Requested file is too large > %r bytes" % (expected_size,)
|
||||
logger.warning(
|
||||
@@ -1551,8 +1571,214 @@ class MatrixFederationHttpClient:
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
|
||||
# if we didn't know the length upfront, decrement the actual size from ratelimiter
|
||||
if response.length == UNKNOWN_LENGTH:
|
||||
download_ratelimiter.record_action(
|
||||
requester=None, key=ip_address, n_actions=length
|
||||
)
|
||||
|
||||
return length, headers
|
||||
|
||||
async def federation_get_file(
|
||||
self,
|
||||
destination: str,
|
||||
path: str,
|
||||
output_stream: BinaryIO,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
max_size: int,
|
||||
args: Optional[QueryParams] = None,
|
||||
retry_on_dns_fail: bool = True,
|
||||
ignore_backoff: bool = False,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
|
||||
"""GETs a file from a given homeserver over the federation /download endpoint
|
||||
Args:
|
||||
destination: The remote server to send the HTTP request to.
|
||||
path: The HTTP path to GET.
|
||||
output_stream: File to write the response body to.
|
||||
download_ratelimiter: a ratelimiter to limit remote media downloads, keyed to
|
||||
requester IP
|
||||
ip_address: IP address of the requester
|
||||
max_size: maximum allowable size in bytes of the file
|
||||
args: Optional dictionary used to create the query string.
|
||||
ignore_backoff: true to ignore the historical backoff data
|
||||
and try the request anyway.
|
||||
|
||||
Returns:
|
||||
Resolves to an (int, dict, bytes) tuple of
|
||||
the file length, a dict of the response headers, and the file json
|
||||
|
||||
Raises:
|
||||
HttpResponseException: If we get an HTTP response code >= 300
|
||||
(except 429).
|
||||
NotRetryingDestination: If we are not yet ready to retry this
|
||||
server.
|
||||
FederationDeniedError: If this destination is not on our
|
||||
federation whitelist
|
||||
RequestSendFailed: If there were problems connecting to the
|
||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||
SynapseError: If the requested file exceeds ratelimits or the response from the
|
||||
remote server is not a multipart response
|
||||
AssertionError: if the resolved multipart response's length is None
|
||||
"""
|
||||
request = MatrixFederationRequest(
|
||||
method="GET", destination=destination, path=path, query=args
|
||||
)
|
||||
|
||||
# check for a minimum balance of 1MiB in ratelimiter before initiating request
|
||||
send_req, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None, key=ip_address, n_actions=1048576, update=False
|
||||
)
|
||||
|
||||
if not send_req:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
|
||||
response = await self._send_request(
|
||||
request,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
ignore_backoff=ignore_backoff,
|
||||
)
|
||||
|
||||
headers = dict(response.headers.getAllRawHeaders())
|
||||
expected_size = response.length
|
||||
|
||||
if expected_size == UNKNOWN_LENGTH:
|
||||
expected_size = max_size
|
||||
else:
|
||||
if int(expected_size) > max_size:
|
||||
msg = "Requested file is too large > %r bytes" % (max_size,)
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
|
||||
|
||||
read_body, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None,
|
||||
key=ip_address,
|
||||
n_actions=expected_size,
|
||||
)
|
||||
if not read_body:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(
|
||||
HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED
|
||||
)
|
||||
|
||||
# this should be a multipart/mixed response with the boundary string in the header
|
||||
try:
|
||||
raw_content_type = headers.get(b"Content-Type")
|
||||
assert raw_content_type is not None
|
||||
content_type = raw_content_type[0].decode("UTF-8")
|
||||
content_type_parts = content_type.split("boundary=")
|
||||
boundary = content_type_parts[1]
|
||||
except Exception:
|
||||
msg = "Remote response is malformed: expected Content-Type of multipart/mixed with a boundary present."
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg)
|
||||
|
||||
try:
|
||||
async with self.remote_download_linearizer.queue(ip_address):
|
||||
# add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >=
|
||||
deferred = read_multipart_response(
|
||||
response, output_stream, boundary, expected_size + 1
|
||||
)
|
||||
deferred.addTimeout(self.default_timeout_seconds, self.reactor)
|
||||
except BodyExceededMaxSize:
|
||||
msg = "Requested file is too large > %r bytes" % (expected_size,)
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
|
||||
except defer.TimeoutError as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Timed out reading response - %s %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
raise RequestSendFailed(e, can_retry=True) from e
|
||||
except ResponseFailed as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Failed to read response - %s %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
raise RequestSendFailed(e, can_retry=True) from e
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Error reading response: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
e,
|
||||
)
|
||||
raise
|
||||
|
||||
multipart_response = await make_deferred_yieldable(deferred)
|
||||
if not multipart_response.url:
|
||||
assert multipart_response.length is not None
|
||||
length = multipart_response.length
|
||||
headers[b"Content-Type"] = [multipart_response.content_type]
|
||||
headers[b"Content-Disposition"] = [multipart_response.disposition]
|
||||
|
||||
# the response contained a redirect url to download the file from
|
||||
else:
|
||||
str_url = multipart_response.url.decode("utf-8")
|
||||
logger.info(
|
||||
"{%s} [%s] File download redirected, now downloading from: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
str_url,
|
||||
)
|
||||
length, headers, _, _ = await self._simple_http_client.get_file(
|
||||
str_url, output_stream, expected_size
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"{%s} [%s] Completed: %d %s [%d bytes] %s %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
response.code,
|
||||
response.phrase.decode("ascii", errors="replace"),
|
||||
length,
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
|
||||
# if we didn't know the length upfront, decrement the actual size from ratelimiter
|
||||
if response.length == UNKNOWN_LENGTH:
|
||||
download_ratelimiter.record_action(
|
||||
requester=None, key=ip_address, n_actions=length
|
||||
)
|
||||
|
||||
return length, headers, multipart_response.json
|
||||
|
||||
|
||||
def _flatten_response_never_received(e: BaseException) -> str:
|
||||
if hasattr(e, "reasons"):
|
||||
|
||||
@@ -62,6 +62,15 @@ HOP_BY_HOP_HEADERS = {
|
||||
"Upgrade",
|
||||
}
|
||||
|
||||
if hasattr(Headers, "_canonicalNameCaps"):
|
||||
# Twisted < 24.7.0rc1
|
||||
_canonicalHeaderName = Headers()._canonicalNameCaps # type: ignore[attr-defined]
|
||||
else:
|
||||
# Twisted >= 24.7.0rc1
|
||||
# But note that `_encodeName` still exists on prior versions,
|
||||
# it just encodes differently
|
||||
_canonicalHeaderName = Headers()._encodeName
|
||||
|
||||
|
||||
def parse_connection_header_value(
|
||||
connection_header_value: Optional[bytes],
|
||||
@@ -85,11 +94,10 @@ def parse_connection_header_value(
|
||||
The set of header names that should not be copied over from the remote response.
|
||||
The keys are capitalized in canonical capitalization.
|
||||
"""
|
||||
headers = Headers()
|
||||
extra_headers_to_remove: Set[str] = set()
|
||||
if connection_header_value:
|
||||
extra_headers_to_remove = {
|
||||
headers._canonicalNameCaps(connection_option.strip()).decode("ascii")
|
||||
_canonicalHeaderName(connection_option.strip()).decode("ascii")
|
||||
for connection_option in connection_header_value.split(b",")
|
||||
}
|
||||
|
||||
|
||||
@@ -74,6 +74,7 @@ from synapse.api.errors import (
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background
|
||||
from synapse.logging.opentracing import active_span, start_active_span, trace_servlet
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util import json_encoder
|
||||
from synapse.util.caches import intern_dict
|
||||
from synapse.util.cancellation import is_function_cancellable
|
||||
@@ -868,7 +869,8 @@ async def _async_write_json_to_request_in_thread(
|
||||
|
||||
with start_active_span("encode_json_response"):
|
||||
span = active_span()
|
||||
json_str = await defer_to_thread(request.reactor, encode, span)
|
||||
reactor: ISynapseReactor = request.reactor # type: ignore
|
||||
json_str = await defer_to_thread(reactor, encode, span)
|
||||
|
||||
_write_bytes_to_request(request, json_str)
|
||||
|
||||
|
||||
@@ -119,14 +119,15 @@ def parse_integer(
|
||||
default: value to use if the parameter is absent, defaults to None.
|
||||
required: whether to raise a 400 SynapseError if the parameter is absent,
|
||||
defaults to False.
|
||||
negative: whether to allow negative integers, defaults to True.
|
||||
negative: whether to allow negative integers, defaults to False (disallowing
|
||||
negatives).
|
||||
Returns:
|
||||
An int value or the default.
|
||||
|
||||
Raises:
|
||||
SynapseError: if the parameter is absent and required, if the
|
||||
parameter is present and not an integer, or if the
|
||||
parameter is illegitimate negative.
|
||||
parameter is illegitimately negative.
|
||||
"""
|
||||
args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore
|
||||
return parse_integer_from_args(args, name, default, required, negative)
|
||||
@@ -164,7 +165,7 @@ def parse_integer_from_args(
|
||||
name: str,
|
||||
default: Optional[int] = None,
|
||||
required: bool = False,
|
||||
negative: bool = True,
|
||||
negative: bool = False,
|
||||
) -> Optional[int]:
|
||||
"""Parse an integer parameter from the request string
|
||||
|
||||
@@ -174,7 +175,8 @@ def parse_integer_from_args(
|
||||
default: value to use if the parameter is absent, defaults to None.
|
||||
required: whether to raise a 400 SynapseError if the parameter is absent,
|
||||
defaults to False.
|
||||
negative: whether to allow negative integers, defaults to True.
|
||||
negative: whether to allow negative integers, defaults to False (disallowing
|
||||
negatives).
|
||||
|
||||
Returns:
|
||||
An int value or the default.
|
||||
@@ -182,7 +184,7 @@ def parse_integer_from_args(
|
||||
Raises:
|
||||
SynapseError: if the parameter is absent and required, if the
|
||||
parameter is present and not an integer, or if the
|
||||
parameter is illegitimate negative.
|
||||
parameter is illegitimately negative.
|
||||
"""
|
||||
name_bytes = name.encode("ascii")
|
||||
|
||||
|
||||
@@ -683,7 +683,7 @@ class SynapseSite(ProxySite):
|
||||
self.access_logger = logging.getLogger(logger_name)
|
||||
self.server_version_string = server_version_string.encode("ascii")
|
||||
|
||||
def log(self, request: SynapseRequest) -> None:
|
||||
def log(self, request: SynapseRequest) -> None: # type: ignore[override]
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@@ -25,7 +25,16 @@ import os
|
||||
import urllib
|
||||
from abc import ABC, abstractmethod
|
||||
from types import TracebackType
|
||||
from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Awaitable,
|
||||
Dict,
|
||||
Generator,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
)
|
||||
|
||||
import attr
|
||||
|
||||
@@ -37,8 +46,13 @@ from synapse.api.errors import Codes, cs_error
|
||||
from synapse.http.server import finish_request, respond_with_json
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import is_ascii
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.storage.databases.main.media_repository import LocalMedia
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# list all text content types that will have the charset default to UTF-8 when
|
||||
@@ -207,6 +221,7 @@ def add_file_headers(
|
||||
# select private. don't bother setting Expires as all our
|
||||
# clients are smart enough to be happy with Cache-Control
|
||||
request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
|
||||
|
||||
if file_size is not None:
|
||||
request.setHeader(b"Content-Length", b"%d" % (file_size,))
|
||||
|
||||
@@ -260,6 +275,93 @@ def _can_encode_filename_as_token(x: str) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
async def respond_with_multipart_responder(
|
||||
clock: Clock,
|
||||
request: SynapseRequest,
|
||||
responder: "Optional[Responder]",
|
||||
media_info: "LocalMedia",
|
||||
) -> None:
|
||||
"""
|
||||
Responds to requests originating from the federation media `/download` endpoint by
|
||||
streaming a multipart/mixed response
|
||||
|
||||
Args:
|
||||
clock:
|
||||
request: the federation request to respond to
|
||||
responder: the responder which will send the response
|
||||
media_info: metadata about the media item
|
||||
"""
|
||||
if not responder:
|
||||
respond_404(request)
|
||||
return
|
||||
|
||||
# If we have a responder we *must* use it as a context manager.
|
||||
with responder:
|
||||
if request._disconnected:
|
||||
logger.warning(
|
||||
"Not sending response to request %s, already disconnected.", request
|
||||
)
|
||||
return
|
||||
|
||||
if media_info.media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES:
|
||||
disposition = "inline"
|
||||
else:
|
||||
disposition = "attachment"
|
||||
|
||||
def _quote(x: str) -> str:
|
||||
return urllib.parse.quote(x.encode("utf-8"))
|
||||
|
||||
if media_info.upload_name:
|
||||
if _can_encode_filename_as_token(media_info.upload_name):
|
||||
disposition = "%s; filename=%s" % (
|
||||
disposition,
|
||||
media_info.upload_name,
|
||||
)
|
||||
else:
|
||||
disposition = "%s; filename*=utf-8''%s" % (
|
||||
disposition,
|
||||
_quote(media_info.upload_name),
|
||||
)
|
||||
|
||||
from synapse.media.media_storage import MultipartFileConsumer
|
||||
|
||||
# note that currently the json_object is just {}, this will change when linked media
|
||||
# is implemented
|
||||
multipart_consumer = MultipartFileConsumer(
|
||||
clock,
|
||||
request,
|
||||
media_info.media_type,
|
||||
{},
|
||||
disposition,
|
||||
media_info.media_length,
|
||||
)
|
||||
|
||||
logger.debug("Responding to media request with responder %s", responder)
|
||||
if media_info.media_length is not None:
|
||||
content_length = multipart_consumer.content_length()
|
||||
assert content_length is not None
|
||||
request.setHeader(b"Content-Length", b"%d" % (content_length,))
|
||||
|
||||
request.setHeader(
|
||||
b"Content-Type",
|
||||
b"multipart/mixed; boundary=%s" % multipart_consumer.boundary,
|
||||
)
|
||||
|
||||
try:
|
||||
await responder.write_to_consumer(multipart_consumer)
|
||||
except Exception as e:
|
||||
# The majority of the time this will be due to the client having gone
|
||||
# away. Unfortunately, Twisted simply throws a generic exception at us
|
||||
# in that case.
|
||||
logger.warning("Failed to write to consumer: %s %s", type(e), e)
|
||||
|
||||
# Unregister the producer, if it has one, so Twisted doesn't complain
|
||||
if request.producer:
|
||||
request.unregisterProducer()
|
||||
|
||||
finish_request(request)
|
||||
|
||||
|
||||
async def respond_with_responder(
|
||||
request: SynapseRequest,
|
||||
responder: "Optional[Responder]",
|
||||
|
||||
@@ -54,6 +54,7 @@ from synapse.media._base import (
|
||||
ThumbnailInfo,
|
||||
get_filename_from_headers,
|
||||
respond_404,
|
||||
respond_with_multipart_responder,
|
||||
respond_with_responder,
|
||||
)
|
||||
from synapse.media.filepath import MediaFilePaths
|
||||
@@ -429,6 +430,8 @@ class MediaRepository:
|
||||
media_id: str,
|
||||
name: Optional[str],
|
||||
max_timeout_ms: int,
|
||||
allow_authenticated: bool = True,
|
||||
federation: bool = False,
|
||||
) -> None:
|
||||
"""Responds to requests for local media, if exists, or returns 404.
|
||||
|
||||
@@ -440,6 +443,8 @@ class MediaRepository:
|
||||
the filename in the Content-Disposition header of the response.
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
allow_authenticated: whether media marked as authenticated may be served to this request
|
||||
federation: whether the local media being fetched is for a federation request
|
||||
|
||||
Returns:
|
||||
Resolves once a response has successfully been written to request
|
||||
@@ -448,6 +453,10 @@ class MediaRepository:
|
||||
if not media_info:
|
||||
return
|
||||
|
||||
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
|
||||
if media_info.authenticated:
|
||||
raise NotFoundError()
|
||||
|
||||
self.mark_recently_accessed(None, media_id)
|
||||
|
||||
media_type = media_info.media_type
|
||||
@@ -460,9 +469,14 @@ class MediaRepository:
|
||||
file_info = FileInfo(None, media_id, url_cache=bool(url_cache))
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
await respond_with_responder(
|
||||
request, responder, media_type, media_length, upload_name
|
||||
)
|
||||
if federation:
|
||||
await respond_with_multipart_responder(
|
||||
self.clock, request, responder, media_info
|
||||
)
|
||||
else:
|
||||
await respond_with_responder(
|
||||
request, responder, media_type, media_length, upload_name
|
||||
)
|
||||
|
||||
async def get_remote_media(
|
||||
self,
|
||||
@@ -472,6 +486,8 @@ class MediaRepository:
|
||||
name: Optional[str],
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation_endpoint: bool,
|
||||
allow_authenticated: bool = True,
|
||||
) -> None:
|
||||
"""Respond to requests for remote media.
|
||||
|
||||
@@ -484,6 +500,10 @@ class MediaRepository:
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
ip_address: the IP address of the requester
|
||||
use_federation_endpoint: whether to request the remote media over the new
|
||||
federation `/download` endpoint
|
||||
allow_authenticated: whether media marked as authenticated may be served to this
|
||||
request
|
||||
|
||||
Returns:
|
||||
Resolves once a response has successfully been written to request
|
||||
@@ -514,6 +534,8 @@ class MediaRepository:
|
||||
max_timeout_ms,
|
||||
self.download_ratelimiter,
|
||||
ip_address,
|
||||
use_federation_endpoint,
|
||||
allow_authenticated,
|
||||
)
|
||||
|
||||
# We deliberately stream the file outside the lock
|
||||
@@ -530,7 +552,13 @@ class MediaRepository:
|
||||
respond_404(request)
|
||||
|
||||
async def get_remote_media_info(
|
||||
self, server_name: str, media_id: str, max_timeout_ms: int, ip_address: str
|
||||
self,
|
||||
server_name: str,
|
||||
media_id: str,
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation: bool,
|
||||
allow_authenticated: bool,
|
||||
) -> RemoteMedia:
|
||||
"""Gets the media info associated with the remote file, downloading
|
||||
if necessary.
|
||||
@@ -541,6 +569,10 @@ class MediaRepository:
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
ip_address: IP address of the requester
|
||||
use_federation: if a download is necessary, whether to request the remote file
|
||||
over the federation `/download` endpoint
|
||||
allow_authenticated: whether media marked as authenticated may be served to this
|
||||
request
|
||||
|
||||
Returns:
|
||||
The media info of the file
|
||||
@@ -561,6 +593,8 @@ class MediaRepository:
|
||||
max_timeout_ms,
|
||||
self.download_ratelimiter,
|
||||
ip_address,
|
||||
use_federation,
|
||||
allow_authenticated,
|
||||
)
|
||||
|
||||
# Ensure we actually use the responder so that it releases resources
|
||||
@@ -577,6 +611,8 @@ class MediaRepository:
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
use_federation_endpoint: bool,
|
||||
allow_authenticated: bool,
|
||||
) -> Tuple[Optional[Responder], RemoteMedia]:
|
||||
"""Looks for media in local cache, if not there then attempt to
|
||||
download from remote server.
|
||||
@@ -590,12 +626,19 @@ class MediaRepository:
|
||||
download_ratelimiter: a ratelimiter limiting remote media downloads, keyed to
|
||||
requester IP.
|
||||
ip_address: the IP address of the requester
|
||||
use_federation_endpoint: whether to request the remote media over the new federation
|
||||
/download endpoint
|
||||
|
||||
Returns:
|
||||
A tuple of responder and the media info of the file.
|
||||
"""
|
||||
media_info = await self.store.get_cached_remote_media(server_name, media_id)
|
||||
|
||||
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
|
||||
# if it isn't cached then don't fetch it or if it's authenticated then don't serve it
|
||||
if not media_info or media_info.authenticated:
|
||||
raise NotFoundError()
|
||||
|
||||
# file_id is the ID we use to track the file locally. If we've already
|
||||
# seen the file then reuse the existing ID, otherwise generate a new
|
||||
# one.
|
||||
@@ -621,9 +664,23 @@ class MediaRepository:
|
||||
# Failed to find the file anywhere, lets download it.
|
||||
|
||||
try:
|
||||
media_info = await self._download_remote_file(
|
||||
server_name, media_id, max_timeout_ms, download_ratelimiter, ip_address
|
||||
)
|
||||
if not use_federation_endpoint:
|
||||
media_info = await self._download_remote_file(
|
||||
server_name,
|
||||
media_id,
|
||||
max_timeout_ms,
|
||||
download_ratelimiter,
|
||||
ip_address,
|
||||
)
|
||||
else:
|
||||
media_info = await self._federation_download_remote_file(
|
||||
server_name,
|
||||
media_id,
|
||||
max_timeout_ms,
|
||||
download_ratelimiter,
|
||||
ip_address,
|
||||
)
|
||||
|
||||
except SynapseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
@@ -755,6 +812,11 @@ class MediaRepository:
|
||||
|
||||
logger.info("Stored remote media in file %r", fname)
|
||||
|
||||
if self.hs.config.media.enable_authenticated_media:
|
||||
authenticated = True
|
||||
else:
|
||||
authenticated = False
|
||||
|
||||
return RemoteMedia(
|
||||
media_origin=server_name,
|
||||
media_id=media_id,
|
||||
@@ -765,6 +827,136 @@ class MediaRepository:
|
||||
filesystem_id=file_id,
|
||||
last_access_ts=time_now_ms,
|
||||
quarantined_by=None,
|
||||
authenticated=authenticated,
|
||||
)
|
||||
|
||||
async def _federation_download_remote_file(
|
||||
self,
|
||||
server_name: str,
|
||||
media_id: str,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> RemoteMedia:
|
||||
"""Attempt to download the remote file from the given server name.
|
||||
Uses the given file_id as the local id and downloads the file over the federation
|
||||
v1 download endpoint
|
||||
|
||||
Args:
|
||||
server_name: Originating server
|
||||
media_id: The media ID of the content (as defined by the
|
||||
remote server). This is different than the file_id, which is
|
||||
locally generated.
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
download_ratelimiter: a ratelimiter limiting remote media downloads, keyed to
|
||||
requester IP
|
||||
ip_address: the IP address of the requester
|
||||
|
||||
Returns:
|
||||
The media info of the file.
|
||||
"""
|
||||
|
||||
file_id = random_string(24)
|
||||
|
||||
file_info = FileInfo(server_name=server_name, file_id=file_id)
|
||||
|
||||
async with self.media_storage.store_into_file(file_info) as (f, fname):
|
||||
try:
|
||||
res = await self.client.federation_download_media(
|
||||
server_name,
|
||||
media_id,
|
||||
output_stream=f,
|
||||
max_size=self.max_upload_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
# if we had to fall back to the _matrix/media endpoint it will only return
|
||||
# the headers and length, check the length of the tuple before unpacking
|
||||
if len(res) == 3:
|
||||
length, headers, json = res
|
||||
else:
|
||||
length, headers = res
|
||||
except RequestSendFailed as e:
|
||||
logger.warning(
|
||||
"Request failed fetching remote media %s/%s: %r",
|
||||
server_name,
|
||||
media_id,
|
||||
e,
|
||||
)
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
|
||||
except HttpResponseException as e:
|
||||
logger.warning(
|
||||
"HTTP error fetching remote media %s/%s: %s",
|
||||
server_name,
|
||||
media_id,
|
||||
e.response,
|
||||
)
|
||||
if e.code == twisted.web.http.NOT_FOUND:
|
||||
raise e.to_synapse_error()
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
|
||||
except SynapseError:
|
||||
logger.warning(
|
||||
"Failed to fetch remote media %s/%s", server_name, media_id
|
||||
)
|
||||
raise
|
||||
except NotRetryingDestination:
|
||||
logger.warning("Not retrying destination %r", server_name)
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to fetch remote media %s/%s", server_name, media_id
|
||||
)
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
|
||||
if b"Content-Type" in headers:
|
||||
media_type = headers[b"Content-Type"][0].decode("ascii")
|
||||
else:
|
||||
media_type = "application/octet-stream"
|
||||
upload_name = get_filename_from_headers(headers)
|
||||
time_now_ms = self.clock.time_msec()
|
||||
|
||||
# Multiple remote media download requests can race (when using
|
||||
# multiple media repos), so this may throw a violation constraint
|
||||
# exception. If it does we'll delete the newly downloaded file from
|
||||
# disk (as we're in the ctx manager).
|
||||
#
|
||||
# However: we've already called `finish()` so we may have also
|
||||
# written to the storage providers. This is preferable to the
|
||||
# alternative where we call `finish()` *after* this, where we could
|
||||
# end up having an entry in the DB but fail to write the files to
|
||||
# the storage providers.
|
||||
await self.store.store_cached_remote_media(
|
||||
origin=server_name,
|
||||
media_id=media_id,
|
||||
media_type=media_type,
|
||||
time_now_ms=time_now_ms,
|
||||
upload_name=upload_name,
|
||||
media_length=length,
|
||||
filesystem_id=file_id,
|
||||
)
|
||||
|
||||
logger.debug("Stored remote media in file %r", fname)
|
||||
|
||||
if self.hs.config.media.enable_authenticated_media:
|
||||
authenticated = True
|
||||
else:
|
||||
authenticated = False
|
||||
|
||||
return RemoteMedia(
|
||||
media_origin=server_name,
|
||||
media_id=media_id,
|
||||
media_type=media_type,
|
||||
media_length=length,
|
||||
upload_name=upload_name,
|
||||
created_ts=time_now_ms,
|
||||
filesystem_id=file_id,
|
||||
last_access_ts=time_now_ms,
|
||||
quarantined_by=None,
|
||||
authenticated=authenticated,
|
||||
)
|
||||
|
||||
def _get_thumbnail_requirements(
|
||||
@@ -870,7 +1062,12 @@ class MediaRepository:
|
||||
t_len = os.path.getsize(output_path)
|
||||
|
||||
await self.store.store_local_thumbnail(
|
||||
media_id, t_width, t_height, t_type, t_method, t_len
|
||||
media_id,
|
||||
t_width,
|
||||
t_height,
|
||||
t_type,
|
||||
t_method,
|
||||
t_len,
|
||||
)
|
||||
|
||||
return output_path
|
||||
|
||||
@@ -19,9 +19,12 @@
|
||||
#
|
||||
#
|
||||
import contextlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
from contextlib import closing
|
||||
from io import BytesIO
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
IO,
|
||||
@@ -30,24 +33,35 @@ from typing import (
|
||||
AsyncIterator,
|
||||
BinaryIO,
|
||||
Callable,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
from uuid import uuid4
|
||||
|
||||
import attr
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import interfaces
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.interfaces import IConsumer
|
||||
from twisted.protocols.basic import FileSender
|
||||
|
||||
from synapse.api.errors import NotFoundError
|
||||
from synapse.logging.context import defer_to_thread, make_deferred_yieldable
|
||||
from synapse.logging.context import (
|
||||
defer_to_thread,
|
||||
make_deferred_yieldable,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.logging.opentracing import start_active_span, trace, trace_with_opname
|
||||
from synapse.util import Clock
|
||||
from synapse.util.file_consumer import BackgroundFileConsumer
|
||||
|
||||
from ..types import JsonDict
|
||||
from ._base import FileInfo, Responder
|
||||
from .filepath import MediaFilePaths
|
||||
|
||||
@@ -57,6 +71,8 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CRLF = b"\r\n"
|
||||
|
||||
|
||||
class MediaStorage:
|
||||
"""Responsible for storing/fetching files from local sources.
|
||||
@@ -174,7 +190,7 @@ class MediaStorage:
|
||||
and configured storage providers.
|
||||
|
||||
Args:
|
||||
file_info
|
||||
file_info: Metadata about the media file
|
||||
|
||||
Returns:
|
||||
Returns a Responder if the file was found, otherwise None.
|
||||
@@ -316,7 +332,7 @@ class FileResponder(Responder):
|
||||
"""Wraps an open file that can be sent to a request.
|
||||
|
||||
Args:
|
||||
open_file: A file like object to be streamed ot the client,
|
||||
open_file: A file like object to be streamed to the client,
|
||||
is closed when finished streaming.
|
||||
"""
|
||||
|
||||
@@ -370,3 +386,245 @@ class ReadableFileWrapper:
|
||||
|
||||
# We yield to the reactor by sleeping for 0 seconds.
|
||||
await self.clock.sleep(0)
|
||||
|
||||
|
||||
@implementer(interfaces.IConsumer)
|
||||
@implementer(interfaces.IPushProducer)
|
||||
class MultipartFileConsumer:
|
||||
"""Wraps a given consumer so that any data that gets written to it gets
|
||||
converted to a multipart format.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
clock: Clock,
|
||||
wrapped_consumer: interfaces.IConsumer,
|
||||
file_content_type: str,
|
||||
json_object: JsonDict,
|
||||
disposition: str,
|
||||
content_length: Optional[int],
|
||||
) -> None:
|
||||
self.clock = clock
|
||||
self.wrapped_consumer = wrapped_consumer
|
||||
self.json_field = json_object
|
||||
self.json_field_written = False
|
||||
self.file_headers_written = False
|
||||
self.file_content_type = file_content_type
|
||||
self.boundary = uuid4().hex.encode("ascii")
|
||||
|
||||
# The producer that registered with us, and if it's a push or pull
|
||||
# producer.
|
||||
self.producer: Optional["interfaces.IProducer"] = None
|
||||
self.streaming: Optional[bool] = None
|
||||
|
||||
# Whether the wrapped consumer has asked us to pause.
|
||||
self.paused = False
|
||||
|
||||
self.length = content_length
|
||||
self.disposition = disposition
|
||||
|
||||
### IConsumer APIs ###
|
||||
|
||||
def registerProducer(
|
||||
self, producer: "interfaces.IProducer", streaming: bool
|
||||
) -> None:
|
||||
"""
|
||||
Register to receive data from a producer.
|
||||
|
||||
This sets self to be a consumer for a producer. When this object runs
|
||||
out of data (as when a send(2) call on a socket succeeds in moving the
|
||||
last data from a userspace buffer into a kernelspace buffer), it will
|
||||
ask the producer to resumeProducing().
|
||||
|
||||
For L{IPullProducer} providers, C{resumeProducing} will be called once
|
||||
each time data is required.
|
||||
|
||||
For L{IPushProducer} providers, C{pauseProducing} will be called
|
||||
whenever the write buffer fills up and C{resumeProducing} will only be
|
||||
called when it empties. The consumer will only call C{resumeProducing}
|
||||
to balance a previous C{pauseProducing} call; the producer is assumed
|
||||
to start in an un-paused state.
|
||||
|
||||
@param streaming: C{True} if C{producer} provides L{IPushProducer},
|
||||
C{False} if C{producer} provides L{IPullProducer}.
|
||||
|
||||
@raise RuntimeError: If a producer is already registered.
|
||||
"""
|
||||
self.producer = producer
|
||||
self.streaming = streaming
|
||||
|
||||
self.wrapped_consumer.registerProducer(self, True)
|
||||
|
||||
# kick off producing if `self.producer` is not a streaming producer
|
||||
if not streaming:
|
||||
self.resumeProducing()
|
||||
|
||||
def unregisterProducer(self) -> None:
|
||||
"""
|
||||
Stop consuming data from a producer, without disconnecting.
|
||||
"""
|
||||
self.wrapped_consumer.write(CRLF + b"--" + self.boundary + b"--" + CRLF)
|
||||
self.wrapped_consumer.unregisterProducer()
|
||||
self.paused = True
|
||||
|
||||
def write(self, data: bytes) -> None:
|
||||
"""
|
||||
The producer will write data by calling this method.
|
||||
|
||||
The implementation must be non-blocking and perform whatever
|
||||
buffering is necessary. If the producer has provided enough data
|
||||
for now and it is a L{IPushProducer}, the consumer may call its
|
||||
C{pauseProducing} method.
|
||||
"""
|
||||
if not self.json_field_written:
|
||||
self.wrapped_consumer.write(CRLF + b"--" + self.boundary + CRLF)
|
||||
|
||||
content_type = Header(b"Content-Type", b"application/json")
|
||||
self.wrapped_consumer.write(bytes(content_type) + CRLF)
|
||||
|
||||
json_field = json.dumps(self.json_field)
|
||||
json_bytes = json_field.encode("utf-8")
|
||||
self.wrapped_consumer.write(CRLF + json_bytes)
|
||||
self.wrapped_consumer.write(CRLF + b"--" + self.boundary + CRLF)
|
||||
|
||||
self.json_field_written = True
|
||||
|
||||
# if we haven't written the content type yet, do so
|
||||
if not self.file_headers_written:
|
||||
type = self.file_content_type.encode("utf-8")
|
||||
content_type = Header(b"Content-Type", type)
|
||||
self.wrapped_consumer.write(bytes(content_type) + CRLF)
|
||||
disp_header = Header(b"Content-Disposition", self.disposition)
|
||||
self.wrapped_consumer.write(bytes(disp_header) + CRLF + CRLF)
|
||||
self.file_headers_written = True
|
||||
|
||||
self.wrapped_consumer.write(data)
|
||||
|
||||
### IPushProducer APIs ###
|
||||
|
||||
def stopProducing(self) -> None:
|
||||
"""
|
||||
Stop producing data.
|
||||
|
||||
This tells a producer that its consumer has died, so it must stop
|
||||
producing data for good.
|
||||
"""
|
||||
assert self.producer is not None
|
||||
self.paused = True
|
||||
self.producer.stopProducing()
|
||||
|
||||
def pauseProducing(self) -> None:
|
||||
"""
|
||||
Pause producing data.
|
||||
|
||||
Tells a producer that it has produced too much data to process for
|
||||
the time being, and to stop until C{resumeProducing()} is called.
|
||||
"""
|
||||
assert self.producer is not None
|
||||
self.paused = True
|
||||
|
||||
if self.streaming:
|
||||
cast("interfaces.IPushProducer", self.producer).pauseProducing()
|
||||
else:
|
||||
self.paused = True
|
||||
|
||||
def resumeProducing(self) -> None:
|
||||
"""
|
||||
Resume producing data.
|
||||
|
||||
This tells a producer to re-add itself to the main loop and produce
|
||||
more data for its consumer.
|
||||
"""
|
||||
assert self.producer is not None
|
||||
|
||||
if self.streaming:
|
||||
cast("interfaces.IPushProducer", self.producer).resumeProducing()
|
||||
else:
|
||||
# If the producer is not a streaming producer we need to start
|
||||
# repeatedly calling `resumeProducing` in a loop.
|
||||
run_in_background(self._resumeProducingRepeatedly)
|
||||
|
||||
def content_length(self) -> Optional[int]:
|
||||
"""
|
||||
Calculate the content length of the multipart response
|
||||
in bytes.
|
||||
"""
|
||||
if not self.length:
|
||||
return None
|
||||
# calculate length of json field and content-type, disposition headers
|
||||
json_field = json.dumps(self.json_field)
|
||||
json_bytes = json_field.encode("utf-8")
|
||||
json_length = len(json_bytes)
|
||||
|
||||
type = self.file_content_type.encode("utf-8")
|
||||
content_type = Header(b"Content-Type", type)
|
||||
type_length = len(bytes(content_type))
|
||||
|
||||
disp = self.disposition.encode("utf-8")
|
||||
disp_header = Header(b"Content-Disposition", disp)
|
||||
disp_length = len(bytes(disp_header))
|
||||
|
||||
# 156 is the length of the elements that aren't variable, ie
|
||||
# CRLFs and boundary strings, etc
|
||||
self.length += json_length + type_length + disp_length + 156
|
||||
|
||||
return self.length
|
||||
|
||||
### Internal APIs. ###
|
||||
|
||||
async def _resumeProducingRepeatedly(self) -> None:
|
||||
assert self.producer is not None
|
||||
assert not self.streaming
|
||||
producer = cast("interfaces.IPullProducer", self.producer)
|
||||
|
||||
self.paused = False
|
||||
while not self.paused:
|
||||
producer.resumeProducing()
|
||||
await self.clock.sleep(0)
|
||||
|
||||
|
||||
class Header:
|
||||
"""
|
||||
`Header` This class is a tiny wrapper that produces
|
||||
request headers. We can't use standard python header
|
||||
class because it encodes unicode fields using =? bla bla ?=
|
||||
encoding, which is correct, but no one in HTTP world expects
|
||||
that, everyone wants utf-8 raw bytes. (stolen from treq.multipart)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: bytes,
|
||||
value: Any,
|
||||
params: Optional[List[Tuple[Any, Any]]] = None,
|
||||
):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.params = params or []
|
||||
|
||||
def add_param(self, name: Any, value: Any) -> None:
|
||||
self.params.append((name, value))
|
||||
|
||||
def __bytes__(self) -> bytes:
|
||||
with closing(BytesIO()) as h:
|
||||
h.write(self.name + b": " + escape(self.value).encode("us-ascii"))
|
||||
if self.params:
|
||||
for name, val in self.params:
|
||||
h.write(b"; ")
|
||||
h.write(escape(name).encode("us-ascii"))
|
||||
h.write(b"=")
|
||||
h.write(b'"' + escape(val).encode("utf-8") + b'"')
|
||||
h.seek(0)
|
||||
return h.read()
|
||||
|
||||
|
||||
def escape(value: Union[str, bytes]) -> str:
|
||||
"""
|
||||
This function prevents header values from corrupting the request,
|
||||
a newline in the file name parameter makes form-data request unreadable
|
||||
for a majority of parsers. (stolen from treq.multipart)
|
||||
"""
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode("utf-8")
|
||||
return value.replace("\r", "").replace("\n", "").replace('"', '\\"')
|
||||
|
||||
@@ -26,7 +26,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, Type
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError, cs_error
|
||||
from synapse.api.errors import Codes, NotFoundError, SynapseError, cs_error
|
||||
from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP
|
||||
from synapse.http.server import respond_with_json
|
||||
from synapse.http.site import SynapseRequest
|
||||
@@ -36,9 +36,11 @@ from synapse.media._base import (
|
||||
ThumbnailInfo,
|
||||
respond_404,
|
||||
respond_with_file,
|
||||
respond_with_multipart_responder,
|
||||
respond_with_responder,
|
||||
)
|
||||
from synapse.media.media_storage import MediaStorage
|
||||
from synapse.media.media_storage import FileResponder, MediaStorage
|
||||
from synapse.storage.databases.main.media_repository import LocalMedia
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.media.media_repository import MediaRepository
|
||||
@@ -271,6 +273,8 @@ class ThumbnailProvider:
|
||||
method: str,
|
||||
m_type: str,
|
||||
max_timeout_ms: int,
|
||||
for_federation: bool,
|
||||
allow_authenticated: bool = True,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_local_media_info(
|
||||
request, media_id, max_timeout_ms
|
||||
@@ -278,6 +282,12 @@ class ThumbnailProvider:
|
||||
if not media_info:
|
||||
return
|
||||
|
||||
# if the media the thumbnail is generated from is authenticated, don't serve the
|
||||
# thumbnail over an unauthenticated endpoint
|
||||
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
|
||||
if media_info.authenticated:
|
||||
raise NotFoundError()
|
||||
|
||||
thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
|
||||
await self._select_and_respond_with_thumbnail(
|
||||
request,
|
||||
@@ -290,6 +300,8 @@ class ThumbnailProvider:
|
||||
media_id,
|
||||
url_cache=bool(media_info.url_cache),
|
||||
server_name=None,
|
||||
for_federation=for_federation,
|
||||
media_info=media_info,
|
||||
)
|
||||
|
||||
async def select_or_generate_local_thumbnail(
|
||||
@@ -301,14 +313,21 @@ class ThumbnailProvider:
|
||||
desired_method: str,
|
||||
desired_type: str,
|
||||
max_timeout_ms: int,
|
||||
for_federation: bool,
|
||||
allow_authenticated: bool = True,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_local_media_info(
|
||||
request, media_id, max_timeout_ms
|
||||
)
|
||||
|
||||
if not media_info:
|
||||
return
|
||||
|
||||
# if the media the thumbnail is generated from is authenticated, don't serve the
|
||||
# thumbnail over an unauthenticated endpoint
|
||||
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
|
||||
if media_info.authenticated:
|
||||
raise NotFoundError()
|
||||
|
||||
thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
|
||||
for info in thumbnail_infos:
|
||||
t_w = info.width == desired_width
|
||||
@@ -326,10 +345,16 @@ class ThumbnailProvider:
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
if responder:
|
||||
await respond_with_responder(
|
||||
request, responder, info.type, info.length
|
||||
)
|
||||
return
|
||||
if for_federation:
|
||||
await respond_with_multipart_responder(
|
||||
self.hs.get_clock(), request, responder, media_info
|
||||
)
|
||||
return
|
||||
else:
|
||||
await respond_with_responder(
|
||||
request, responder, info.type, info.length
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug("We don't have a thumbnail of that size. Generating")
|
||||
|
||||
@@ -344,7 +369,15 @@ class ThumbnailProvider:
|
||||
)
|
||||
|
||||
if file_path:
|
||||
await respond_with_file(request, desired_type, file_path)
|
||||
if for_federation:
|
||||
await respond_with_multipart_responder(
|
||||
self.hs.get_clock(),
|
||||
request,
|
||||
FileResponder(open(file_path, "rb")),
|
||||
media_info,
|
||||
)
|
||||
else:
|
||||
await respond_with_file(request, desired_type, file_path)
|
||||
else:
|
||||
logger.warning("Failed to generate thumbnail")
|
||||
raise SynapseError(400, "Failed to generate thumbnail.")
|
||||
@@ -360,14 +393,28 @@ class ThumbnailProvider:
|
||||
desired_type: str,
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation: bool,
|
||||
allow_authenticated: bool = True,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_remote_media_info(
|
||||
server_name, media_id, max_timeout_ms, ip_address
|
||||
server_name,
|
||||
media_id,
|
||||
max_timeout_ms,
|
||||
ip_address,
|
||||
use_federation,
|
||||
allow_authenticated,
|
||||
)
|
||||
if not media_info:
|
||||
respond_404(request)
|
||||
return
|
||||
|
||||
# if the media the thumbnail is generated from is authenticated, don't serve the
|
||||
# thumbnail over an unauthenticated endpoint
|
||||
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
|
||||
if media_info.authenticated:
|
||||
respond_404(request)
|
||||
return
|
||||
|
||||
thumbnail_infos = await self.store.get_remote_media_thumbnails(
|
||||
server_name, media_id
|
||||
)
|
||||
@@ -424,16 +471,29 @@ class ThumbnailProvider:
|
||||
m_type: str,
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation: bool,
|
||||
allow_authenticated: bool = True,
|
||||
) -> None:
|
||||
# TODO: Don't download the whole remote file
|
||||
# We should proxy the thumbnail from the remote server instead of
|
||||
# downloading the remote file and generating our own thumbnails.
|
||||
media_info = await self.media_repo.get_remote_media_info(
|
||||
server_name, media_id, max_timeout_ms, ip_address
|
||||
server_name,
|
||||
media_id,
|
||||
max_timeout_ms,
|
||||
ip_address,
|
||||
use_federation,
|
||||
allow_authenticated,
|
||||
)
|
||||
if not media_info:
|
||||
return
|
||||
|
||||
# if the media the thumbnail is generated from is authenticated, don't serve the
|
||||
# thumbnail over an unauthenticated endpoint
|
||||
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
|
||||
if media_info.authenticated:
|
||||
raise NotFoundError()
|
||||
|
||||
thumbnail_infos = await self.store.get_remote_media_thumbnails(
|
||||
server_name, media_id
|
||||
)
|
||||
@@ -448,6 +508,7 @@ class ThumbnailProvider:
|
||||
media_info.filesystem_id,
|
||||
url_cache=False,
|
||||
server_name=server_name,
|
||||
for_federation=False,
|
||||
)
|
||||
|
||||
async def _select_and_respond_with_thumbnail(
|
||||
@@ -461,6 +522,8 @@ class ThumbnailProvider:
|
||||
media_id: str,
|
||||
file_id: str,
|
||||
url_cache: bool,
|
||||
for_federation: bool,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
server_name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
@@ -476,6 +539,8 @@ class ThumbnailProvider:
|
||||
file_id: The ID of the media that a thumbnail is being requested for.
|
||||
url_cache: True if this is from a URL cache.
|
||||
server_name: The server name, if this is a remote thumbnail.
|
||||
for_federation: whether the request is from the federation /thumbnail request
|
||||
media_info: metadata about the media being requested.
|
||||
"""
|
||||
logger.debug(
|
||||
"_select_and_respond_with_thumbnail: media_id=%s desired=%sx%s (%s) thumbnail_infos=%s",
|
||||
@@ -511,13 +576,20 @@ class ThumbnailProvider:
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
if responder:
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
return
|
||||
if for_federation:
|
||||
assert media_info is not None
|
||||
await respond_with_multipart_responder(
|
||||
self.hs.get_clock(), request, responder, media_info
|
||||
)
|
||||
return
|
||||
else:
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
return
|
||||
|
||||
# If we can't find the thumbnail we regenerate it. This can happen
|
||||
# if e.g. we've deleted the thumbnails but still have the original
|
||||
@@ -558,12 +630,18 @@ class ThumbnailProvider:
|
||||
)
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
if for_federation:
|
||||
assert media_info is not None
|
||||
await respond_with_multipart_responder(
|
||||
self.hs.get_clock(), request, responder, media_info
|
||||
)
|
||||
else:
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
else:
|
||||
# This might be because:
|
||||
# 1. We can't create thumbnails for the given media (corrupted or
|
||||
|
||||
@@ -764,8 +764,16 @@ class Notifier:
|
||||
|
||||
async def wait_for_stream_token(self, stream_token: StreamToken) -> bool:
|
||||
"""Wait for this worker to catch up with the given stream token."""
|
||||
current_token = self.event_sources.get_current_token()
|
||||
if stream_token.is_before_or_eq(current_token):
|
||||
return True
|
||||
|
||||
# Work around a bug where older Synapse versions gave out tokens "from
|
||||
# the future", i.e. that are ahead of the tokens persisted in the DB.
|
||||
stream_token = await self.event_sources.bound_future_token(stream_token)
|
||||
|
||||
start = self.clock.time_msec()
|
||||
logged = False
|
||||
while True:
|
||||
current_token = self.event_sources.get_current_token()
|
||||
if stream_token.is_before_or_eq(current_token):
|
||||
@@ -776,11 +784,13 @@ class Notifier:
|
||||
if now - start > 10_000:
|
||||
return False
|
||||
|
||||
logger.info(
|
||||
"Waiting for current token to reach %s; currently at %s",
|
||||
stream_token,
|
||||
current_token,
|
||||
)
|
||||
if not logged:
|
||||
logger.info(
|
||||
"Waiting for current token to reach %s; currently at %s",
|
||||
stream_token,
|
||||
current_token,
|
||||
)
|
||||
logged = True
|
||||
|
||||
# TODO: be better
|
||||
await self.clock.sleep(0.5)
|
||||
|
||||
@@ -28,7 +28,7 @@ import jinja2
|
||||
from markupsafe import Markup
|
||||
from prometheus_client import Counter
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, RoomTypes
|
||||
from synapse.api.constants import EventContentFields, EventTypes, Membership, RoomTypes
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.config.emailconfig import EmailSubjectConfig
|
||||
from synapse.events import EventBase
|
||||
@@ -716,7 +716,8 @@ class Mailer:
|
||||
)
|
||||
if (
|
||||
create_event
|
||||
and create_event.content.get("room_type") == RoomTypes.SPACE
|
||||
and create_event.content.get(EventContentFields.ROOM_TYPE)
|
||||
== RoomTypes.SPACE
|
||||
):
|
||||
return self.email_subjects.invite_from_person_to_space % {
|
||||
"person": inviter_name,
|
||||
|
||||
@@ -114,13 +114,19 @@ class ReplicationDataHandler:
|
||||
"""
|
||||
all_room_ids: Set[str] = set()
|
||||
if stream_name == DeviceListsStream.NAME:
|
||||
if any(row.entity.startswith("@") and not row.is_signature for row in rows):
|
||||
if any(not row.is_signature and not row.hosts_calculated for row in rows):
|
||||
prev_token = self.store.get_device_stream_token()
|
||||
all_room_ids = await self.store.get_all_device_list_changes(
|
||||
prev_token, token
|
||||
)
|
||||
self.store.device_lists_in_rooms_have_changed(all_room_ids, token)
|
||||
|
||||
# If we're sending federation we need to update the device lists
|
||||
# outbound pokes stream change cache with updated hosts.
|
||||
if self.send_handler and any(row.hosts_calculated for row in rows):
|
||||
hosts = await self.store.get_destinations_for_device(token)
|
||||
self.store.device_lists_outbound_pokes_have_changed(hosts, token)
|
||||
|
||||
self.store.process_replication_rows(stream_name, instance_name, token, rows)
|
||||
# NOTE: this must be called after process_replication_rows to ensure any
|
||||
# cache invalidations are first handled before any stream ID advances.
|
||||
@@ -433,12 +439,11 @@ class FederationSenderHandler:
|
||||
# The entities are either user IDs (starting with '@') whose devices
|
||||
# have changed, or remote servers that we need to tell about
|
||||
# changes.
|
||||
hosts = {
|
||||
row.entity
|
||||
for row in rows
|
||||
if not row.entity.startswith("@") and not row.is_signature
|
||||
}
|
||||
await self.federation_sender.send_device_messages(hosts, immediate=False)
|
||||
if any(row.hosts_calculated for row in rows):
|
||||
hosts = await self.store.get_destinations_for_device(token)
|
||||
await self.federation_sender.send_device_messages(
|
||||
hosts, immediate=False
|
||||
)
|
||||
|
||||
elif stream_name == ToDeviceStream.NAME:
|
||||
# The to_device stream includes stuff to be pushed to both local
|
||||
|
||||
@@ -549,10 +549,14 @@ class DeviceListsStream(_StreamFromIdGen):
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class DeviceListsStreamRow:
|
||||
entity: str
|
||||
user_id: str
|
||||
# Indicates that a user has signed their own device with their user-signing key
|
||||
is_signature: bool
|
||||
|
||||
# Indicates if this is a notification that we've calculated the hosts we
|
||||
# need to send the update to.
|
||||
hosts_calculated: bool
|
||||
|
||||
NAME = "device_lists"
|
||||
ROW_TYPE = DeviceListsStreamRow
|
||||
|
||||
@@ -594,13 +598,13 @@ class DeviceListsStream(_StreamFromIdGen):
|
||||
upper_limit_token = min(upper_limit_token, signatures_to_token)
|
||||
|
||||
device_updates = [
|
||||
(stream_id, (entity, False))
|
||||
for stream_id, (entity,) in device_updates
|
||||
(stream_id, (entity, False, hosts))
|
||||
for stream_id, (entity, hosts) in device_updates
|
||||
if stream_id <= upper_limit_token
|
||||
]
|
||||
|
||||
signatures_updates = [
|
||||
(stream_id, (entity, True))
|
||||
(stream_id, (entity, True, False))
|
||||
for stream_id, (entity,) in signatures_updates
|
||||
if stream_id <= upper_limit_token
|
||||
]
|
||||
|
||||
@@ -145,6 +145,10 @@ class ClientRestResource(JsonResource):
|
||||
password_policy.register_servlets(hs, client_resource)
|
||||
knock.register_servlets(hs, client_resource)
|
||||
appservice_ping.register_servlets(hs, client_resource)
|
||||
if hs.config.media.can_load_media_repo:
|
||||
from synapse.rest.client import media
|
||||
|
||||
media.register_servlets(hs, client_resource)
|
||||
|
||||
# moving to /_synapse/admin
|
||||
if is_main_process:
|
||||
|
||||
@@ -101,6 +101,7 @@ from synapse.rest.admin.users import (
|
||||
ResetPasswordRestServlet,
|
||||
SearchUsersRestServlet,
|
||||
ShadowBanRestServlet,
|
||||
SuspendAccountRestServlet,
|
||||
UserAdminServlet,
|
||||
UserByExternalId,
|
||||
UserByThreePid,
|
||||
@@ -327,6 +328,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
BackgroundUpdateRestServlet(hs).register(http_server)
|
||||
BackgroundUpdateStartJobRestServlet(hs).register(http_server)
|
||||
ExperimentalFeaturesRestServlet(hs).register(http_server)
|
||||
if hs.config.experimental.msc3823_account_suspension:
|
||||
SuspendAccountRestServlet(hs).register(http_server)
|
||||
|
||||
|
||||
def register_servlets_for_client_rest_resource(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user