Compare commits

...

35 Commits

Author SHA1 Message Date
Erik Johnston
1385cea78e Newsfile 2024-07-22 14:25:28 +01:00
Erik Johnston
262f3f3702 Speed up SS room sorting
We do this by bulk fetching the latest stream ordering
2024-07-22 14:25:28 +01:00
Shay
dc8ddc6472 Prepare for authenticated media freeze (#17433)
As part of the rollout of
[MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md)
this PR adds support for designating authenticated media and ensuring
that authenticated media is not served over unauthenticated endpoints.
2024-07-22 10:33:17 +01:00
Erik Johnston
d3f9afd8d9 Add a cache on get_rooms_for_local_user_where_membership_is (#17460)
As it gets used in sliding sync.

We basically invalidate it in all the same places as
`get_rooms_for_user`. Most of the changes are due to needing the
arguments you pass in to be hashable (which lists aren't)
2024-07-19 16:19:15 +01:00
Erik Johnston
43c865f7c9 Generate room sync data concurrently (#17458)
This is also what we do for standard `/sync`.
2024-07-19 12:09:39 +01:00
dependabot[bot]
71d83477cb Bump sentry-sdk from 2.6.0 to 2.8.0 (#17456) 2024-07-19 11:02:38 +01:00
Ben Banfield-Zanin
6a01af59e1 Improve default_power_level_content_override documentation (#17451)
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
2024-07-18 13:32:32 +01:00
Erik Johnston
f583f1dce4 Revert "Bump setuptools from 67.6.0 to 70.0.0" (#17455)
Reverts element-hq/synapse#17448

We hit a bug when deploying with synctl:

```
Traceback (most recent call last):
  File "/home/synapse/env-python311/bin/synctl", line 33, in <module>
    sys.exit(load_entry_point('matrix-synapse', 'console_scripts', 'synctl')())
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/synapse/env-python311/bin/synctl", line 25, in importlib_load_entry_point
    return next(matches).load()
           ^^^^^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.11/importlib/metadata/__init__.py", line 202, in load
    module = import_module(match.group('module'))
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.11/importlib/__init__.py", line 126, in import_module
    return _bootstrap._gcd_import(name[level:], package, level)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "<frozen importlib._bootstrap>", line 1204, in _gcd_import
  File "<frozen importlib._bootstrap>", line 1176, in _find_and_load
  File "<frozen importlib._bootstrap>", line 1147, in _find_and_load_unlocked
  File "<frozen importlib._bootstrap>", line 690, in _load_unlocked
  File "<frozen importlib._bootstrap_external>", line 940, in exec_module
  File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
  File "/home/synapse/src/synapse/_scripts/synctl.py", line 37, in <module>
    from synapse.config import find_config_files
  File "/home/synapse/src/synapse/config/__init__.py", line 22, in <module>
    from ._base import ConfigError, find_config_files
  File "/home/synapse/src/synapse/config/_base.py", line 49, in <module>
    import pkg_resources
  File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 3282, in <module>
    @_call_aside
     ^^^^^^^^^^^
  File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 3266, in _call_aside
    f(*args, **kwargs)
  File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 3295, in _initialize_master_working_set
    working_set = _declare_state('object', 'working_set', WorkingSet._build_master())
                                                          ^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 589, in _build_master
    ws.require(__requires__)
  File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 926, in require
    needed = self.resolve(parse_requirements(requirements))
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 787, in resolve
    dist = self._resolve_dist(
           ^^^^^^^^^^^^^^^^^^^
  File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 816, in _resolve_dist
    env = Environment(self.entries)
          ^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 1014, in __init__
    self.scan(search_path)
  File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 1046, in scan
    for dist in find_distributions(item):
  File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 2091, in find_on_path
    yield from factory(fullpath)
               ^^^^^^^^^^^^^^^^^
  File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 2183, in resolve_egg_link
    return next(dist_groups, ())
           ^^^^^^^^^^^^^^^^^^^^^
  File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 2179, in <genexpr>
    resolved_paths = (
                     ^
  File "/home/synapse/env-python311/lib/python3.11/site-packages/pkg_resources/__init__.py", line 2167, in non_empty_lines
    for line in _read_utf8_with_fallback(path).splitlines():
                ^^^^^^^^^^^^^^^^^^^^^^^^
NameError: name '_read_utf8_with_fallback' is not defined
```
2024-07-18 12:59:53 +01:00
Eric Eastwood
a574de0062 Add m.room.create to default bump event types (#17453)
Add `m.room.create` to default bump event types

This probably helps when no messages have been sent in the room and it
was just created.
2024-07-18 12:49:53 +01:00
Eric Eastwood
3fee32ed6b Order heroes by stream_ordering (as spec'ed) (#17435)
The spec specifically mentions `stream_ordering` but that's a Synapse specific concept. In any case, the essence of the spec is basically the first 5 members of the room which `stream_ordering` accomplishes.

Split off from https://github.com/element-hq/synapse/pull/17419#discussion_r1671342794

## Spec compliance

> This should be the first 5 members of the room, **ordered by stream ordering**, which are joined or invited. The list must never include the client’s own user ID. When no joined or invited members are available, this should consist of the banned and left users.
>
> *-- https://spec.matrix.org/v1.10/client-server-api/#_matrixclientv3sync_roomsummary*

Related to https://github.com/matrix-org/matrix-spec/issues/1334
2024-07-17 13:10:15 -05:00
Till Faelligen
5884f0a956 Merge branch 'master' into develop 2024-07-16 14:33:58 +02:00
dependabot[bot]
79924aebef Bump mypy from 1.9.0 to 1.10.1 (#17445) 2024-07-16 12:08:06 +01:00
dependabot[bot]
83894180b2 Bump matrix-org/done-action from 2 to 3 (#17440) 2024-07-16 11:35:47 +01:00
Shay
429ecb7564 Handle remote download responses with UNKNOWN_LENGTH more gracefully (#17439)
Prior to this PR, remote downloads which did not provide a
`content-length` were decremented from the remote download ratelimiter
at the max allowable size, leading to excessive ratelimiting - see
https://github.com/element-hq/synapse/issues/17394.

This PR adds a linearizer to limit concurrent remote downloads to 6 per
IP address, and decrements remote downloads without a `content-length`
from the ratelimiter *after* the download is complete and the response
length is known.

Also adds logic to ensure that responses with a known length respect the
`max_download_size`.
2024-07-16 11:13:55 +01:00
dependabot[bot]
9e1acea051 Bump setuptools from 67.6.0 to 70.0.0 (#17448) 2024-07-16 10:06:05 +01:00
Shay
899d33f2ba Remove unnecessary call to resume producing in fake channel (#17449)
This fell out of the authenticated media work - this bit of code masked
a bug but does not break anything when removed, so probably should be
removed.
2024-07-16 09:52:39 +01:00
Erik Johnston
df11af14db Fix bug where sync could get stuck when using workers (#17438)
This is because we serialized the token wrong if the instance map
contained entries from before the minimum token.
2024-07-15 16:13:04 +01:00
dependabot[bot]
d88ba45db9 Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712 (#17446) 2024-07-15 13:58:28 +01:00
dependabot[bot]
14f2b1eb00 Bump bytes from 1.6.0 to 1.6.1 (#17441) 2024-07-15 13:58:12 +01:00
dependabot[bot]
2af729a193 Bump ulid from 1.1.2 to 1.1.3 (#17442) 2024-07-15 13:57:45 +01:00
dependabot[bot]
0de0689ae8 Bump jsonschema from 4.22.0 to 4.23.0 (#17444) 2024-07-15 13:57:30 +01:00
dependabot[bot]
4c44020838 Bump twine from 5.1.0 to 5.1.1 (#17443) 2024-07-15 13:57:13 +01:00
Quentin Gliech
4f6194492a Make sure we use the right logic for enabling the media repo. (#17424)
This removes the `enable_media_repo` attribute on the server config in
favour of always using the `can_load_media_repo` in the media config.
This should avoid issues like in #17420 in the future
2024-07-15 11:42:59 +02:00
Eric Eastwood
ab62aa09da Add room subscriptions to Sliding Sync /sync (#17432)
Add room subscriptions to Sliding Sync `/sync`

Based on
[MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575):
Sliding Sync

Currently, you can only subscribe to rooms you have had *any* membership
in before.

In the future, we will allow `world_readable` rooms to be subscribed to
without joining.
2024-07-15 10:37:10 +01:00
Eric Eastwood
fb66e938b2 Add is_dm room field to Sliding Sync /sync (#17429)
Based on
[MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575):
Sliding Sync
2024-07-11 18:19:26 -05:00
Eric Eastwood
5a97bbd895 Add heroes and room summary fields to Sliding Sync /sync (#17419)
Additional room summary fields: `joined_count`, `invited_count`

Based on
[MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575):
Sliding Sync
2024-07-11 14:05:38 -05:00
Erik Johnston
606da398fc Fix filtering room types on remote rooms (#17434)
We can only fetch room types for rooms the server is in, so we need to
only filter rooms that we're joined to.

Also includes a perf fix to bulk fetch room types.
2024-07-11 16:00:44 +01:00
Travis Ralston
677142b6a9 Fix docs on record_action to clarify the actions are applied (#17426)
This looks like a copy/paste error: the function doesn't reject
anything, but instead allows the action count to go through regardless.
The remainder of the function's documentation appears correct.
2024-07-11 14:03:13 +01:00
villepeh
342f0c35b7 Add Red Hat Enterprise Linux and Rocky Linux installation instructions (#17423)
Added RHEL/Rocky install instructions (PyPI). Instructions cover
versions 8 and 9 which are the only supported ones - except for RHEL7
which is now on extended life cycle support phase.

Large part of the guide is for installing Python 3.11 or 3.12. RHEL8
ships with Python 3.6 and RHEL9 ships with 3.9. Newer Python versions
can be installed easily as they don't interfere with OS software that
still relies on the default Python version.

I was first planning to add prerequisites part to the prerequisites
section and then install instructions on the top of the page but that
section is for pre-built packages so it just didn't sound right. So I
just dumped everything to the PyPI section of the page. But suggestions
to change are welcome.

I also didn't combine these with Fedora section. I haven't tested those
packages on RHEL and Fedora ships with Python 3.12 out-of-box.
2024-07-11 14:02:19 +01:00
Joe Groocock
5871daf877 Use consistent casing between FROM and AS (#17431)
Signed-off-by: Joe Groocock <me@frebib.net>
2024-07-11 13:56:25 +01:00
dependabot[bot]
30e14c8510 Bump zipp from 3.15.0 to 3.19.1 (#17427) 2024-07-11 11:54:21 +01:00
Will Lewis
3232bc2982 Upload new logo with white bg and update readme to use it (#17387) 2024-07-10 14:59:24 +01:00
Erik Johnston
4ca13ce0dd Handle to-device extensions to Sliding Sync (#17416)
Implements MSC3885

---------

Co-authored-by: Eric Eastwood <eric.eastwood@beta.gouv.fr>
2024-07-10 11:58:42 +01:00
Quentin Gliech
8e229535fa Merge branch 'release-v1.111' into develop 2024-07-10 11:36:07 +02:00
Eric Eastwood
1cf3ff6b40 Add rooms name and avatar to Sliding Sync /sync (#17418)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
2024-07-09 12:26:45 -05:00
68 changed files with 4385 additions and 683 deletions

View File

@@ -305,7 +305,7 @@ jobs:
- lint-readme
runs-on: ubuntu-latest
steps:
- uses: matrix-org/done-action@v2
- uses: matrix-org/done-action@v3
with:
needs: ${{ toJSON(needs) }}
@@ -737,7 +737,7 @@ jobs:
- linting-done
runs-on: ubuntu-latest
steps:
- uses: matrix-org/done-action@v2
- uses: matrix-org/done-action@v3
with:
needs: ${{ toJSON(needs) }}

8
Cargo.lock generated
View File

@@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]]
name = "bytes"
version = "1.6.0"
version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9"
checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952"
[[package]]
name = "cfg-if"
@@ -597,9 +597,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "ulid"
version = "1.1.2"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34778c17965aa2a08913b57e1f34db9b4a63f5de31768b55bf20d2795f921259"
checksum = "04f903f293d11f31c0c29e4148f6dc0d033a7f80cebc0282bea147611667d289"
dependencies = [
"getrandom",
"rand",

View File

@@ -1,4 +1,4 @@
.. image:: https://github.com/element-hq/product/assets/87339233/7abf477a-5277-47f3-be44-ea44917d8ed7
.. image:: ./docs/element_logo_white_bg.svg
:height: 60px
**Element Synapse - Matrix homeserver implementation**

1
changelog.d/17387.doc Normal file
View File

@@ -0,0 +1 @@
Update the readme image to have a white background, so that it is readable in dark mode.

View File

@@ -0,0 +1 @@
Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

View File

@@ -0,0 +1 @@
Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

View File

@@ -0,0 +1 @@
Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

1
changelog.d/17423.doc Normal file
View File

@@ -0,0 +1 @@
Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions.

1
changelog.d/17424.misc Normal file
View File

@@ -0,0 +1 @@
Make sure we always use the right logic for enabling the media repo.

1
changelog.d/17426.misc Normal file
View File

@@ -0,0 +1 @@
Fix documentation on `RateLimiter#record_action`.

View File

@@ -0,0 +1 @@
Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

View File

@@ -0,0 +1 @@
Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

View File

@@ -0,0 +1 @@
Prepare for authenticated media freeze.

1
changelog.d/17434.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites.

1
changelog.d/17435.bugfix Normal file
View File

@@ -0,0 +1 @@
Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`).

1
changelog.d/17438.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers.

1
changelog.d/17439.bugfix Normal file
View File

@@ -0,0 +1 @@
Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete.

1
changelog.d/17449.bugfix Normal file
View File

@@ -0,0 +1 @@
Remove unnecessary call to resume producing in fake channel.

1
changelog.d/17451.doc Normal file
View File

@@ -0,0 +1 @@
Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option.

1
changelog.d/17453.misc Normal file
View File

@@ -0,0 +1 @@
Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created.

1
changelog.d/17458.misc Normal file
View File

@@ -0,0 +1 @@
Speed up generating sliding sync responses.

1
changelog.d/17460.misc Normal file
View File

@@ -0,0 +1 @@
Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync.

1
changelog.d/17468.misc Normal file
View File

@@ -0,0 +1 @@
Speed up sorting of the room list in sliding sync.

View File

@@ -27,7 +27,7 @@ ARG PYTHON_VERSION=3.11
###
# We hardcode the use of Debian bookworm here because this could change upstream
# and other Dockerfiles used for testing are expecting bookworm.
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as requirements
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
@@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
###
### Stage 1: builder
###
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as builder
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS builder
# install the OS build deps
RUN \

View File

@@ -24,7 +24,7 @@ ARG distro=""
# https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but
# it's not obviously easier to use that than to build our own.)
FROM docker.io/library/${distro} as builder
FROM docker.io/library/${distro} AS builder
RUN apt-get update -qq -o Acquire::Languages=none
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \

View File

@@ -0,0 +1,94 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="41.440346mm"
height="10.383124mm"
viewBox="0 0 41.440346 10.383125"
version="1.1"
id="svg1"
xml:space="preserve"
sodipodi:docname="element_logo_white_bg.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"><sodipodi:namedview
id="namedview1"
pagecolor="#ffffff"
bordercolor="#000000"
borderopacity="0.25"
inkscape:showpageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1"
inkscape:document-units="mm"
showgrid="false"
inkscape:export-bgcolor="#ffffffff" /><defs
id="defs1" /><g
id="layer1"
transform="translate(-84.803844,-143.2075)"
inkscape:export-filename="element_logo_white_bg.svg"
inkscape:export-xdpi="96"
inkscape:export-ydpi="96"><g
style="fill:none"
id="g1"
transform="matrix(0.26458333,0,0,0.26458333,85.841658,144.26667)"><rect
style="display:inline;fill:#ffffff;fill-opacity:1;stroke:#ffffff;stroke-width:1.31041;stroke-dasharray:none;stroke-opacity:1"
id="rect20"
width="155.31451"
height="37.932892"
x="-3.2672384"
y="-3.3479743"
rx="3.3718522"
ry="3.7915266"
transform="translate(-2.1259843e-6)"
inkscape:label="rect20"
inkscape:export-filename="rect20.svg"
inkscape:export-xdpi="96"
inkscape:export-ydpi="96" /><path
fill-rule="evenodd"
clip-rule="evenodd"
d="M 16,32 C 24.8366,32 32,24.8366 32,16 32,7.16344 24.8366,0 16,0 7.16344,0 0,7.16344 0,16 0,24.8366 7.16344,32 16,32 Z"
fill="#0dbd8b"
id="path1" /><path
fill-rule="evenodd"
clip-rule="evenodd"
d="m 13.0756,7.455 c 0,-0.64584 0.5247,-1.1694 1.1719,-1.1694 4.3864,0 7.9423,3.54853 7.9423,7.9259 0,0.6458 -0.5246,1.1694 -1.1718,1.1694 -0.6472,0 -1.1719,-0.5236 -1.1719,-1.1694 0,-3.0857 -2.5066,-5.58711 -5.5986,-5.58711 -0.6472,0 -1.1719,-0.52355 -1.1719,-1.16939 z"
fill="#ffffff"
id="path2" /><path
fill-rule="evenodd"
clip-rule="evenodd"
d="m 24.5424,13.042 c 0.6472,0 1.1719,0.5235 1.1719,1.1694 0,4.3773 -3.5559,7.9258 -7.9424,7.9258 -0.6472,0 -1.1718,-0.5235 -1.1718,-1.1693 0,-0.6459 0.5246,-1.1694 1.1718,-1.1694 3.0921,0 5.5987,-2.5015 5.5987,-5.5871 0,-0.6459 0.5247,-1.1694 1.1718,-1.1694 z"
fill="#ffffff"
id="path3" /><path
fill-rule="evenodd"
clip-rule="evenodd"
d="m 18.9446,24.5446 c 0,0.6459 -0.5247,1.1694 -1.1718,1.1694 -4.3865,0 -7.94239,-3.5485 -7.94239,-7.9258 0,-0.6459 0.52469,-1.1694 1.17179,-1.1694 0.6472,0 1.1719,0.5235 1.1719,1.1694 0,3.0856 2.5066,5.587 5.5987,5.587 0.6471,0 1.1718,0.5236 1.1718,1.1694 z"
fill="#ffffff"
id="path4" /><path
fill-rule="evenodd"
clip-rule="evenodd"
d="m 7.45823,18.9576 c -0.64718,0 -1.17183,-0.5235 -1.17183,-1.1694 0,-4.3773 3.55591,-7.92581 7.9423,-7.92581 0.6472,0 1.1719,0.52351 1.1719,1.16941 0,0.6458 -0.5247,1.1694 -1.1719,1.1694 -3.092,0 -5.59864,2.5014 -5.59864,5.587 0,0.6459 -0.52465,1.1694 -1.17183,1.1694 z"
fill="#ffffff"
id="path5" /><path
d="M 56.2856,18.1428 H 44.9998 c 0.1334,1.181 0.5619,2.1238 1.2858,2.8286 0.7238,0.6857 1.6761,1.0286 2.8571,1.0286 0.7809,0 1.4857,-0.1905 2.1143,-0.5715 0.6286,-0.3809 1.0762,-0.8952 1.3428,-1.5428 h 3.4286 c -0.4571,1.5047 -1.3143,2.7238 -2.5714,3.6571 -1.2381,0.9143 -2.7048,1.3715 -4.4,1.3715 -2.2095,0 -4,-0.7334 -5.3714,-2.2 -1.3524,-1.4667 -2.0286,-3.3239 -2.0286,-5.5715 0,-2.1905 0.6857,-4.0285 2.0571,-5.5143 1.3715,-1.4857 3.1429,-2.22853 5.3143,-2.22853 2.1714,0 3.9238,0.73333 5.2572,2.20003 1.3523,1.4476 2.0285,3.2762 2.0285,5.4857 z m -7.2572,-5.9714 c -1.0667,0 -1.9524,0.3143 -2.6571,0.9429 -0.7048,0.6285 -1.1429,1.4666 -1.3143,2.5142 h 7.8857 c -0.1524,-1.0476 -0.5714,-1.8857 -1.2571,-2.5142 -0.6858,-0.6286 -1.5715,-0.9429 -2.6572,-0.9429 z"
fill="#000000"
id="path6" /><path
d="M 58.6539,20.1428 V 3.14282 h 3.4 V 20.2 c 0,0.7619 0.419,1.1428 1.2571,1.1428 l 0.6,-0.0285 v 3.2285 c -0.3238,0.0572 -0.6667,0.0857 -1.0286,0.0857 -1.4666,0 -2.5428,-0.3714 -3.2285,-1.1142 -0.6667,-0.7429 -1,-1.8667 -1,-3.3715 z"
fill="#000000"
id="path7" /><path
d="M 79.7454,18.1428 H 68.4597 c 0.1333,1.181 0.5619,2.1238 1.2857,2.8286 0.7238,0.6857 1.6762,1.0286 2.8571,1.0286 0.781,0 1.4857,-0.1905 2.1143,-0.5715 0.6286,-0.3809 1.0762,-0.8952 1.3429,-1.5428 h 3.4285 c -0.4571,1.5047 -1.3143,2.7238 -2.5714,3.6571 -1.2381,0.9143 -2.7048,1.3715 -4.4,1.3715 -2.2095,0 -4,-0.7334 -5.3714,-2.2 -1.3524,-1.4667 -2.0286,-3.3239 -2.0286,-5.5715 0,-2.1905 0.6857,-4.0285 2.0571,-5.5143 1.3715,-1.4857 3.1429,-2.22853 5.3143,-2.22853 2.1715,0 3.9238,0.73333 5.2572,2.20003 1.3524,1.4476 2.0285,3.2762 2.0285,5.4857 z m -7.2572,-5.9714 c -1.0666,0 -1.9524,0.3143 -2.6571,0.9429 -0.7048,0.6285 -1.1429,1.4666 -1.3143,2.5142 h 7.8857 c -0.1524,-1.0476 -0.5714,-1.8857 -1.2571,-2.5142 -0.6857,-0.6286 -1.5715,-0.9429 -2.6572,-0.9429 z"
fill="#000000"
id="path8" /><path
d="m 95.0851,16.0571 v 8.5143 h -3.4 v -8.8857 c 0,-2.2476 -0.9333,-3.3714 -2.8,-3.3714 -1.0095,0 -1.819,0.3238 -2.4286,0.9714 -0.5904,0.6476 -0.8857,1.5333 -0.8857,2.6571 v 8.6286 h -3.4 V 9.74282 h 3.1429 v 1.97148 c 0.3619,-0.6667 0.9143,-1.2191 1.6571,-1.6572 0.7429,-0.43809 1.6667,-0.65713 2.7714,-0.65713 2.0572,0 3.5429,0.78093 4.4572,2.34283 1.2571,-1.5619 2.9333,-2.34283 5.0286,-2.34283 1.733,0 3.067,0.54285 4,1.62853 0.933,1.0667 1.4,2.4762 1.4,4.2286 v 9.3143 h -3.4 v -8.8857 c 0,-2.2476 -0.933,-3.3714 -2.8,-3.3714 -1.0286,0 -1.8477,0.3333 -2.4572,1 -0.5905,0.6476 -0.8857,1.5619 -0.8857,2.7428 z"
fill="#000000"
id="path9" /><path
d="m 121.537,18.1428 h -11.286 c 0.133,1.181 0.562,2.1238 1.286,2.8286 0.723,0.6857 1.676,1.0286 2.857,1.0286 0.781,0 1.486,-0.1905 2.114,-0.5715 0.629,-0.3809 1.076,-0.8952 1.343,-1.5428 h 3.429 c -0.458,1.5047 -1.315,2.7238 -2.572,3.6571 -1.238,0.9143 -2.705,1.3715 -4.4,1.3715 -2.209,0 -4,-0.7334 -5.371,-2.2 -1.353,-1.4667 -2.029,-3.3239 -2.029,-5.5715 0,-2.1905 0.686,-4.0285 2.057,-5.5143 1.372,-1.4857 3.143,-2.22853 5.315,-2.22853 2.171,0 3.923,0.73333 5.257,2.20003 1.352,1.4476 2.028,3.2762 2.028,5.4857 z m -7.257,-5.9714 c -1.067,0 -1.953,0.3143 -2.658,0.9429 -0.704,0.6285 -1.142,1.4666 -1.314,2.5142 h 7.886 c -0.153,-1.0476 -0.572,-1.8857 -1.257,-2.5142 -0.686,-0.6286 -1.572,-0.9429 -2.657,-0.9429 z"
fill="#000000"
id="path10" /><path
d="m 127.105,9.74282 v 1.97148 c 0.343,-0.6477 0.905,-1.1905 1.686,-1.6286 0.8,-0.45716 1.762,-0.68573 2.885,-0.68573 1.753,0 3.105,0.53333 4.058,1.60003 0.971,1.0666 1.457,2.4857 1.457,4.2571 v 9.3143 h -3.4 v -8.8857 c 0,-1.0476 -0.248,-1.8667 -0.743,-2.4572 -0.476,-0.6095 -1.21,-0.9142 -2.2,-0.9142 -1.086,0 -1.943,0.3238 -2.572,0.9714 -0.609,0.6476 -0.914,1.5428 -0.914,2.6857 v 8.6 h -3.4 V 9.74282 Z"
fill="#000000"
id="path11" /><path
d="m 147.12,21.5428 v 2.9429 c -0.419,0.1143 -1.009,0.1714 -1.771,0.1714 -2.895,0 -4.343,-1.4571 -4.343,-4.3714 v -7.8286 h -2.257 V 9.74282 h 2.257 V 5.88568 h 3.4 v 3.85714 h 2.772 v 2.71428 h -2.772 v 7.4857 c 0,1.1619 0.552,1.7429 1.657,1.7429 z"
fill="#000000"
id="path12" /></g></g></svg>

After

Width:  |  Height:  |  Size: 7.5 KiB

View File

@@ -67,7 +67,7 @@ in Synapse can be deactivated.
**NOTE**: This has an impact on security and is for testing purposes only!
To deactivate the certificate validation, the following setting must be added to
your [homserver.yaml](../usage/configuration/homeserver_sample_config.md).
your [homeserver.yaml](../usage/configuration/homeserver_sample_config.md).
```yaml
use_insecure_ssl_client_just_for_testing_do_not_use: true

View File

@@ -309,7 +309,62 @@ sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
python3-virtualenv libffi-devel openssl-devel python3-devel \
libicu-devel
sudo dnf groupinstall "Development Tools"
sudo dnf group install "Development Tools"
```
##### Red Hat Enterprise Linux / Rocky Linux
*Note: The term "RHEL" below refers to both Red Hat Enterprise Linux and Rocky Linux. The distributions are 1:1 binary compatible.*
It's recommended to use the latest Python versions.
RHEL 8 in particular ships with Python 3.6 by default which is EOL and therefore no longer supported by Synapse. RHEL 9 ship with Python 3.9 which is still supported by the Python core team as of this writing. However, newer Python versions provide significant performance improvements and they're available in official distributions' repositories. Therefore it's recommended to use them.
Python 3.11 and 3.12 are available for both RHEL 8 and 9.
These commands should be run as root user.
RHEL 8
```bash
# Enable PowerTools repository
dnf config-manager --set-enabled powertools
```
RHEL 9
```bash
# Enable CodeReady Linux Builder repository
crb enable
```
Install new version of Python. You only need one of these:
```bash
# Python 3.11
dnf install python3.11 python3.11-devel
```
```bash
# Python 3.12
dnf install python3.12 python3.12-devel
```
Finally, install common prerequisites
```bash
dnf install libicu libicu-devel libpq5 libpq5-devel lz4 pkgconf
dnf group install "Development Tools"
```
###### Using venv module instead of virtualenv command
It's recommended to use Python venv module directly rather than the virtualenv command.
* On RHEL 9, virtualenv is only available on [EPEL](https://docs.fedoraproject.org/en-US/epel/).
* On RHEL 8, virtualenv is based on Python 3.6. It does not support creating 3.11/3.12 virtual environments.
Here's an example of creating Python 3.12 virtual environment and installing Synapse from PyPI.
```bash
mkdir -p ~/synapse
# To use Python 3.11, simply use the command "python3.11" instead.
python3.12 -m venv ~/synapse/env
source ~/synapse/env/bin/activate
pip install --upgrade pip
pip install --upgrade setuptools
pip install matrix-synapse
```
##### macOS

View File

@@ -1863,6 +1863,18 @@ federation_rr_transactions_per_room_per_second: 40
## Media Store
Config options related to Synapse's media store.
---
### `enable_authenticated_media`
When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy
unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but
this will change to true in a future Synapse release.
Example configuration:
```yaml
enable_authenticated_media: true
```
---
### `enable_media_repo`
@@ -4134,6 +4146,38 @@ default_power_level_content_override:
trusted_private_chat: null
public_chat: null
```
The default power levels for each preset are:
```yaml
"m.room.name": 50
"m.room.power_levels": 100
"m.room.history_visibility": 100
"m.room.canonical_alias": 50
"m.room.avatar": 50
"m.room.tombstone": 100
"m.room.server_acl": 100
"m.room.encryption": 100
```
So a complete example where the default power-levels for a preset are maintained
but the power level for a new key is set is:
```yaml
default_power_level_content_override:
private_chat:
events:
"com.example.foo": 0
"m.room.name": 50
"m.room.power_levels": 100
"m.room.history_visibility": 100
"m.room.canonical_alias": 50
"m.room.avatar": 50
"m.room.tombstone": 100
"m.room.server_acl": 100
"m.room.encryption": 100
trusted_private_chat: null
public_chat: null
```
---
### `forget_rooms_on_leave`

100
poetry.lock generated
View File

@@ -934,13 +934,13 @@ i18n = ["Babel (>=2.7)"]
[[package]]
name = "jsonschema"
version = "4.22.0"
version = "4.23.0"
description = "An implementation of JSON Schema validation for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"},
{file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"},
{file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"},
{file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"},
]
[package.dependencies]
@@ -953,7 +953,7 @@ rpds-py = ">=0.7.1"
[package.extras]
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"]
[[package]]
name = "jsonschema-specifications"
@@ -1389,38 +1389,38 @@ files = [
[[package]]
name = "mypy"
version = "1.9.0"
version = "1.10.1"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"},
{file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"},
{file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"},
{file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"},
{file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"},
{file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"},
{file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"},
{file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"},
{file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"},
{file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"},
{file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"},
{file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"},
{file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"},
{file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"},
{file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"},
{file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"},
{file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"},
{file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"},
{file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"},
{file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"},
{file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"},
{file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"},
{file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"},
{file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"},
{file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"},
{file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"},
{file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"},
{file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"},
{file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"},
{file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"},
{file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"},
{file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"},
{file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"},
{file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"},
{file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"},
{file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"},
{file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"},
{file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"},
{file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"},
{file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"},
{file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"},
{file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"},
{file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"},
{file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"},
{file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"},
{file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"},
{file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"},
{file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"},
{file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"},
{file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"},
{file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"},
{file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"},
{file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"},
{file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"},
]
[package.dependencies]
@@ -2430,13 +2430,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
version = "2.6.0"
version = "2.8.0"
description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
files = [
{file = "sentry_sdk-2.6.0-py2.py3-none-any.whl", hash = "sha256:422b91cb49378b97e7e8d0e8d5a1069df23689d45262b86f54988a7db264e874"},
{file = "sentry_sdk-2.6.0.tar.gz", hash = "sha256:65cc07e9c6995c5e316109f138570b32da3bd7ff8d0d0ee4aaf2628c3dd8127d"},
{file = "sentry_sdk-2.8.0-py2.py3-none-any.whl", hash = "sha256:6051562d2cfa8087bb8b4b8b79dc44690f8a054762a29c07e22588b1f619bfb5"},
{file = "sentry_sdk-2.8.0.tar.gz", hash = "sha256:aa4314f877d9cd9add5a0c9ba18e3f27f99f7de835ce36bd150e48a41c7c646f"},
]
[package.dependencies]
@@ -2466,7 +2466,7 @@ langchain = ["langchain (>=0.0.210)"]
loguru = ["loguru (>=0.5)"]
openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"]
opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"]
opentelemetry-experimental = ["opentelemetry-instrumentation-aio-pika (==0.46b0)", "opentelemetry-instrumentation-aiohttp-client (==0.46b0)", "opentelemetry-instrumentation-aiopg (==0.46b0)", "opentelemetry-instrumentation-asgi (==0.46b0)", "opentelemetry-instrumentation-asyncio (==0.46b0)", "opentelemetry-instrumentation-asyncpg (==0.46b0)", "opentelemetry-instrumentation-aws-lambda (==0.46b0)", "opentelemetry-instrumentation-boto (==0.46b0)", "opentelemetry-instrumentation-boto3sqs (==0.46b0)", "opentelemetry-instrumentation-botocore (==0.46b0)", "opentelemetry-instrumentation-cassandra (==0.46b0)", "opentelemetry-instrumentation-celery (==0.46b0)", "opentelemetry-instrumentation-confluent-kafka (==0.46b0)", "opentelemetry-instrumentation-dbapi (==0.46b0)", "opentelemetry-instrumentation-django (==0.46b0)", "opentelemetry-instrumentation-elasticsearch (==0.46b0)", "opentelemetry-instrumentation-falcon (==0.46b0)", "opentelemetry-instrumentation-fastapi (==0.46b0)", "opentelemetry-instrumentation-flask (==0.46b0)", "opentelemetry-instrumentation-grpc (==0.46b0)", "opentelemetry-instrumentation-httpx (==0.46b0)", "opentelemetry-instrumentation-jinja2 (==0.46b0)", "opentelemetry-instrumentation-kafka-python (==0.46b0)", "opentelemetry-instrumentation-logging (==0.46b0)", "opentelemetry-instrumentation-mysql (==0.46b0)", "opentelemetry-instrumentation-mysqlclient (==0.46b0)", "opentelemetry-instrumentation-pika (==0.46b0)", "opentelemetry-instrumentation-psycopg (==0.46b0)", "opentelemetry-instrumentation-psycopg2 (==0.46b0)", "opentelemetry-instrumentation-pymemcache (==0.46b0)", "opentelemetry-instrumentation-pymongo (==0.46b0)", "opentelemetry-instrumentation-pymysql (==0.46b0)", "opentelemetry-instrumentation-pyramid (==0.46b0)", "opentelemetry-instrumentation-redis (==0.46b0)", "opentelemetry-instrumentation-remoulade (==0.46b0)", "opentelemetry-instrumentation-requests (==0.46b0)", "opentelemetry-instrumentation-sklearn (==0.46b0)", "opentelemetry-instrumentation-sqlalchemy (==0.46b0)", "opentelemetry-instrumentation-sqlite3 (==0.46b0)", "opentelemetry-instrumentation-starlette (==0.46b0)", "opentelemetry-instrumentation-system-metrics (==0.46b0)", "opentelemetry-instrumentation-threading (==0.46b0)", "opentelemetry-instrumentation-tornado (==0.46b0)", "opentelemetry-instrumentation-tortoiseorm (==0.46b0)", "opentelemetry-instrumentation-urllib (==0.46b0)", "opentelemetry-instrumentation-urllib3 (==0.46b0)", "opentelemetry-instrumentation-wsgi (==0.46b0)"]
pure-eval = ["asttokens", "executing", "pure-eval"]
pymongo = ["pymongo (>=3.1)"]
pyspark = ["pyspark (>=2.4.4)"]
@@ -2476,7 +2476,7 @@ sanic = ["sanic (>=0.8)"]
sqlalchemy = ["sqlalchemy (>=1.2)"]
starlette = ["starlette (>=0.19.1)"]
starlite = ["starlite (>=1.48)"]
tornado = ["tornado (>=5)"]
tornado = ["tornado (>=6)"]
[[package]]
name = "service-identity"
@@ -2704,19 +2704,19 @@ docs = ["sphinx (<7.0.0)"]
[[package]]
name = "twine"
version = "5.1.0"
version = "5.1.1"
description = "Collection of utilities for publishing packages on PyPI"
optional = false
python-versions = ">=3.8"
files = [
{file = "twine-5.1.0-py3-none-any.whl", hash = "sha256:fe1d814395bfe50cfbe27783cb74efe93abeac3f66deaeb6c8390e4e92bacb43"},
{file = "twine-5.1.0.tar.gz", hash = "sha256:4d74770c88c4fcaf8134d2a6a9d863e40f08255ff7d8e2acb3cbbd57d25f6e9d"},
{file = "twine-5.1.1-py3-none-any.whl", hash = "sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997"},
{file = "twine-5.1.1.tar.gz", hash = "sha256:9aa0825139c02b3434d913545c7b847a21c835e11597f5255842d457da2322db"},
]
[package.dependencies]
importlib-metadata = ">=3.6"
keyring = ">=15.1"
pkginfo = ">=1.8.1"
pkginfo = ">=1.8.1,<1.11"
readme-renderer = ">=35.0"
requests = ">=2.20"
requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0"
@@ -2851,13 +2851,13 @@ files = [
[[package]]
name = "types-jsonschema"
version = "4.22.0.20240610"
version = "4.23.0.20240712"
description = "Typing stubs for jsonschema"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-jsonschema-4.22.0.20240610.tar.gz", hash = "sha256:f82ab9fe756e3a2642ea9712c46b403ce61eb380b939b696cff3252af42f65b0"},
{file = "types_jsonschema-4.22.0.20240610-py3-none-any.whl", hash = "sha256:89996b9bd1928f820a0e252b2844be21cd2e55d062b6fa1048d88453006ad89e"},
{file = "types-jsonschema-4.23.0.20240712.tar.gz", hash = "sha256:b20db728dcf7ea3e80e9bdeb55e8b8420c6c040cda14e8cf284465adee71d217"},
{file = "types_jsonschema-4.23.0.20240712-py3-none-any.whl", hash = "sha256:8c33177ce95336241c1d61ccb56a9964d4361b99d5f1cd81a1ab4909b0dd7cf4"},
]
[package.dependencies]
@@ -3113,18 +3113,18 @@ docs = ["Sphinx", "elementpath (>=4.1.5,<5.0.0)", "jinja2", "sphinx-rtd-theme"]
[[package]]
name = "zipp"
version = "3.15.0"
version = "3.19.1"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.7"
python-versions = ">=3.8"
files = [
{file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"},
{file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"},
{file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"},
{file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"},
]
[package.extras]
docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"]
[[package]]
name = "zope-event"

View File

@@ -119,18 +119,19 @@ BOOLEAN_COLUMNS = {
"e2e_room_keys": ["is_verified"],
"event_edges": ["is_state"],
"events": ["processed", "outlier", "contains_url"],
"local_media_repository": ["safe_from_quarantine"],
"local_media_repository": ["safe_from_quarantine", "authenticated"],
"per_user_experimental_features": ["enabled"],
"presence_list": ["accepted"],
"presence_stream": ["currently_active"],
"public_room_list_stream": ["visibility"],
"pushers": ["enabled"],
"redactions": ["have_censored"],
"remote_media_cache": ["authenticated"],
"room_stats_state": ["is_federatable"],
"rooms": ["is_public", "has_auth_chain_index"],
"users": ["shadow_banned", "approved", "locked", "suspended"],
"un_partial_stated_event_stream": ["rejection_status_changed"],
"users_who_share_rooms": ["share_private"],
"per_user_experimental_features": ["enabled"],
}

View File

@@ -50,7 +50,7 @@ class Membership:
KNOCK: Final = "knock"
LEAVE: Final = "leave"
BAN: Final = "ban"
LIST: Final = {INVITE, JOIN, KNOCK, LEAVE, BAN}
LIST: Final = frozenset((INVITE, JOIN, KNOCK, LEAVE, BAN))
class PresenceState:

View File

@@ -236,9 +236,8 @@ class Ratelimiter:
requester: The requester that is doing the action, if any.
key: An arbitrary key used to classify an action. Defaults to the
requester's user ID.
n_actions: The number of times the user wants to do this action. If the user
cannot do all of the actions, the user's action count is not incremented
at all.
n_actions: The number of times the user performed the action. May be negative
to "refund" the rate limit.
_time_now_s: The current time. Optional, defaults to the current time according
to self.clock. Only used by tests.
"""

View File

@@ -217,7 +217,7 @@ class SynapseHomeServer(HomeServer):
)
if name in ["media", "federation", "client"]:
if self.config.server.enable_media_repo:
if self.config.media.can_load_media_repo:
media_repo = self.get_media_repository_resource()
resources.update(
{

View File

@@ -126,7 +126,7 @@ class ContentRepositoryConfig(Config):
# Only enable the media repo if either the media repo is enabled or the
# current worker app is the media repo.
if (
self.root.server.enable_media_repo is False
config.get("enable_media_repo", True) is False
and config.get("worker_app") != "synapse.app.media_repository"
):
self.can_load_media_repo = False
@@ -272,6 +272,10 @@ class ContentRepositoryConfig(Config):
remote_media_lifetime
)
self.enable_authenticated_media = config.get(
"enable_authenticated_media", False
)
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
assert data_dir_path is not None
media_store = os.path.join(data_dir_path, "media_store")

View File

@@ -395,12 +395,6 @@ class ServerConfig(Config):
self.presence_router_config,
) = load_module(presence_router_config, ("presence", "presence_router"))
# whether to enable the media repository endpoints. This should be set
# to false if the media repository is running as a separate endpoint;
# doing so ensures that we will not run cache cleanup jobs on the
# master, potentially causing inconsistency.
self.enable_media_repo = config.get("enable_media_repo", True)
# Whether to require authentication to retrieve profile data (avatars,
# display names) of other users through the client API.
self.require_auth_for_profile_requests = config.get(

View File

@@ -1188,6 +1188,8 @@ class RoomCreationHandler:
)
events_to_send.append((power_event, power_context))
else:
# Please update the docs for `default_power_level_content_override` when
# updating the `events` dict below
power_level_content: JsonDict = {
"users": {creator_id: 100},
"users_default": 0,

File diff suppressed because it is too large Load Diff

View File

@@ -90,7 +90,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import set_tag, start_active_span, tags
from synapse.types import JsonDict
from synapse.util import json_decoder
from synapse.util.async_helpers import AwakenableSleeper, timeout_deferred
from synapse.util.async_helpers import AwakenableSleeper, Linearizer, timeout_deferred
from synapse.util.metrics import Measure
from synapse.util.stringutils import parse_and_validate_server_name
@@ -475,6 +475,8 @@ class MatrixFederationHttpClient:
use_proxy=True,
)
self.remote_download_linearizer = Linearizer("remote_download_linearizer", 6)
def wake_destination(self, destination: str) -> None:
"""Called when the remote server may have come back online."""
@@ -1486,35 +1488,44 @@ class MatrixFederationHttpClient:
)
headers = dict(response.headers.getAllRawHeaders())
expected_size = response.length
# if we don't get an expected length then use the max length
if expected_size == UNKNOWN_LENGTH:
expected_size = max_size
logger.debug(
f"File size unknown, assuming file is max allowable size: {max_size}"
)
else:
if int(expected_size) > max_size:
msg = "Requested file is too large > %r bytes" % (max_size,)
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
)
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
read_body, _ = await download_ratelimiter.can_do_action(
requester=None,
key=ip_address,
n_actions=expected_size,
)
if not read_body:
msg = "Requested file size exceeds ratelimits"
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
read_body, _ = await download_ratelimiter.can_do_action(
requester=None,
key=ip_address,
n_actions=expected_size,
)
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
if not read_body:
msg = "Requested file size exceeds ratelimits"
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
)
raise SynapseError(
HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED
)
try:
# add a byte of headroom to max size as function errs at >=
d = read_body_with_max_size(response, output_stream, expected_size + 1)
d.addTimeout(self.default_timeout_seconds, self.reactor)
length = await make_deferred_yieldable(d)
async with self.remote_download_linearizer.queue(ip_address):
# add a byte of headroom to max size as function errs at >=
d = read_body_with_max_size(response, output_stream, expected_size + 1)
d.addTimeout(self.default_timeout_seconds, self.reactor)
length = await make_deferred_yieldable(d)
except BodyExceededMaxSize:
msg = "Requested file is too large > %r bytes" % (expected_size,)
logger.warning(
@@ -1560,6 +1571,13 @@ class MatrixFederationHttpClient:
request.method,
request.uri.decode("ascii"),
)
# if we didn't know the length upfront, decrement the actual size from ratelimiter
if response.length == UNKNOWN_LENGTH:
download_ratelimiter.record_action(
requester=None, key=ip_address, n_actions=length
)
return length, headers
async def federation_get_file(
@@ -1630,29 +1648,37 @@ class MatrixFederationHttpClient:
)
headers = dict(response.headers.getAllRawHeaders())
expected_size = response.length
# if we don't get an expected length then use the max length
if expected_size == UNKNOWN_LENGTH:
expected_size = max_size
logger.debug(
f"File size unknown, assuming file is max allowable size: {max_size}"
)
else:
if int(expected_size) > max_size:
msg = "Requested file is too large > %r bytes" % (max_size,)
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
)
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
read_body, _ = await download_ratelimiter.can_do_action(
requester=None,
key=ip_address,
n_actions=expected_size,
)
if not read_body:
msg = "Requested file size exceeds ratelimits"
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
read_body, _ = await download_ratelimiter.can_do_action(
requester=None,
key=ip_address,
n_actions=expected_size,
)
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
if not read_body:
msg = "Requested file size exceeds ratelimits"
logger.warning(
"{%s} [%s] %s",
request.txn_id,
request.destination,
msg,
)
raise SynapseError(
HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED
)
# this should be a multipart/mixed response with the boundary string in the header
try:
@@ -1672,11 +1698,12 @@ class MatrixFederationHttpClient:
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg)
try:
# add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >=
deferred = read_multipart_response(
response, output_stream, boundary, expected_size + 1
)
deferred.addTimeout(self.default_timeout_seconds, self.reactor)
async with self.remote_download_linearizer.queue(ip_address):
# add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >=
deferred = read_multipart_response(
response, output_stream, boundary, expected_size + 1
)
deferred.addTimeout(self.default_timeout_seconds, self.reactor)
except BodyExceededMaxSize:
msg = "Requested file is too large > %r bytes" % (expected_size,)
logger.warning(
@@ -1743,6 +1770,13 @@ class MatrixFederationHttpClient:
request.method,
request.uri.decode("ascii"),
)
# if we didn't know the length upfront, decrement the actual size from ratelimiter
if response.length == UNKNOWN_LENGTH:
download_ratelimiter.record_action(
requester=None, key=ip_address, n_actions=length
)
return length, headers, multipart_response.json

View File

@@ -430,6 +430,7 @@ class MediaRepository:
media_id: str,
name: Optional[str],
max_timeout_ms: int,
allow_authenticated: bool = True,
federation: bool = False,
) -> None:
"""Responds to requests for local media, if exists, or returns 404.
@@ -442,6 +443,7 @@ class MediaRepository:
the filename in the Content-Disposition header of the response.
max_timeout_ms: the maximum number of milliseconds to wait for the
media to be uploaded.
allow_authenticated: whether media marked as authenticated may be served to this request
federation: whether the local media being fetched is for a federation request
Returns:
@@ -451,6 +453,10 @@ class MediaRepository:
if not media_info:
return
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
if media_info.authenticated:
raise NotFoundError()
self.mark_recently_accessed(None, media_id)
media_type = media_info.media_type
@@ -481,6 +487,7 @@ class MediaRepository:
max_timeout_ms: int,
ip_address: str,
use_federation_endpoint: bool,
allow_authenticated: bool = True,
) -> None:
"""Respond to requests for remote media.
@@ -495,6 +502,8 @@ class MediaRepository:
ip_address: the IP address of the requester
use_federation_endpoint: whether to request the remote media over the new
federation `/download` endpoint
allow_authenticated: whether media marked as authenticated may be served to this
request
Returns:
Resolves once a response has successfully been written to request
@@ -526,6 +535,7 @@ class MediaRepository:
self.download_ratelimiter,
ip_address,
use_federation_endpoint,
allow_authenticated,
)
# We deliberately stream the file outside the lock
@@ -548,6 +558,7 @@ class MediaRepository:
max_timeout_ms: int,
ip_address: str,
use_federation: bool,
allow_authenticated: bool,
) -> RemoteMedia:
"""Gets the media info associated with the remote file, downloading
if necessary.
@@ -560,6 +571,8 @@ class MediaRepository:
ip_address: IP address of the requester
use_federation: if a download is necessary, whether to request the remote file
over the federation `/download` endpoint
allow_authenticated: whether media marked as authenticated may be served to this
request
Returns:
The media info of the file
@@ -581,6 +594,7 @@ class MediaRepository:
self.download_ratelimiter,
ip_address,
use_federation,
allow_authenticated,
)
# Ensure we actually use the responder so that it releases resources
@@ -598,6 +612,7 @@ class MediaRepository:
download_ratelimiter: Ratelimiter,
ip_address: str,
use_federation_endpoint: bool,
allow_authenticated: bool,
) -> Tuple[Optional[Responder], RemoteMedia]:
"""Looks for media in local cache, if not there then attempt to
download from remote server.
@@ -619,6 +634,11 @@ class MediaRepository:
"""
media_info = await self.store.get_cached_remote_media(server_name, media_id)
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
# if it isn't cached then don't fetch it or if it's authenticated then don't serve it
if not media_info or media_info.authenticated:
raise NotFoundError()
# file_id is the ID we use to track the file locally. If we've already
# seen the file then reuse the existing ID, otherwise generate a new
# one.
@@ -792,6 +812,11 @@ class MediaRepository:
logger.info("Stored remote media in file %r", fname)
if self.hs.config.media.enable_authenticated_media:
authenticated = True
else:
authenticated = False
return RemoteMedia(
media_origin=server_name,
media_id=media_id,
@@ -802,6 +827,7 @@ class MediaRepository:
filesystem_id=file_id,
last_access_ts=time_now_ms,
quarantined_by=None,
authenticated=authenticated,
)
async def _federation_download_remote_file(
@@ -915,6 +941,11 @@ class MediaRepository:
logger.debug("Stored remote media in file %r", fname)
if self.hs.config.media.enable_authenticated_media:
authenticated = True
else:
authenticated = False
return RemoteMedia(
media_origin=server_name,
media_id=media_id,
@@ -925,6 +956,7 @@ class MediaRepository:
filesystem_id=file_id,
last_access_ts=time_now_ms,
quarantined_by=None,
authenticated=authenticated,
)
def _get_thumbnail_requirements(
@@ -1030,7 +1062,12 @@ class MediaRepository:
t_len = os.path.getsize(output_path)
await self.store.store_local_thumbnail(
media_id, t_width, t_height, t_type, t_method, t_len
media_id,
t_width,
t_height,
t_type,
t_method,
t_len,
)
return output_path

View File

@@ -26,7 +26,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, Type
from PIL import Image
from synapse.api.errors import Codes, SynapseError, cs_error
from synapse.api.errors import Codes, NotFoundError, SynapseError, cs_error
from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP
from synapse.http.server import respond_with_json
from synapse.http.site import SynapseRequest
@@ -274,6 +274,7 @@ class ThumbnailProvider:
m_type: str,
max_timeout_ms: int,
for_federation: bool,
allow_authenticated: bool = True,
) -> None:
media_info = await self.media_repo.get_local_media_info(
request, media_id, max_timeout_ms
@@ -281,6 +282,12 @@ class ThumbnailProvider:
if not media_info:
return
# if the media the thumbnail is generated from is authenticated, don't serve the
# thumbnail over an unauthenticated endpoint
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
if media_info.authenticated:
raise NotFoundError()
thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
await self._select_and_respond_with_thumbnail(
request,
@@ -307,14 +314,20 @@ class ThumbnailProvider:
desired_type: str,
max_timeout_ms: int,
for_federation: bool,
allow_authenticated: bool = True,
) -> None:
media_info = await self.media_repo.get_local_media_info(
request, media_id, max_timeout_ms
)
if not media_info:
return
# if the media the thumbnail is generated from is authenticated, don't serve the
# thumbnail over an unauthenticated endpoint
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
if media_info.authenticated:
raise NotFoundError()
thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
for info in thumbnail_infos:
t_w = info.width == desired_width
@@ -381,14 +394,27 @@ class ThumbnailProvider:
max_timeout_ms: int,
ip_address: str,
use_federation: bool,
allow_authenticated: bool = True,
) -> None:
media_info = await self.media_repo.get_remote_media_info(
server_name, media_id, max_timeout_ms, ip_address, use_federation
server_name,
media_id,
max_timeout_ms,
ip_address,
use_federation,
allow_authenticated,
)
if not media_info:
respond_404(request)
return
# if the media the thumbnail is generated from is authenticated, don't serve the
# thumbnail over an unauthenticated endpoint
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
if media_info.authenticated:
respond_404(request)
return
thumbnail_infos = await self.store.get_remote_media_thumbnails(
server_name, media_id
)
@@ -446,16 +472,28 @@ class ThumbnailProvider:
max_timeout_ms: int,
ip_address: str,
use_federation: bool,
allow_authenticated: bool = True,
) -> None:
# TODO: Don't download the whole remote file
# We should proxy the thumbnail from the remote server instead of
# downloading the remote file and generating our own thumbnails.
media_info = await self.media_repo.get_remote_media_info(
server_name, media_id, max_timeout_ms, ip_address, use_federation
server_name,
media_id,
max_timeout_ms,
ip_address,
use_federation,
allow_authenticated,
)
if not media_info:
return
# if the media the thumbnail is generated from is authenticated, don't serve the
# thumbnail over an unauthenticated endpoint
if self.hs.config.media.enable_authenticated_media and not allow_authenticated:
if media_info.authenticated:
raise NotFoundError()
thumbnail_infos = await self.store.get_remote_media_thumbnails(
server_name, media_id
)
@@ -485,8 +523,8 @@ class ThumbnailProvider:
file_id: str,
url_cache: bool,
for_federation: bool,
server_name: Optional[str] = None,
media_info: Optional[LocalMedia] = None,
server_name: Optional[str] = None,
) -> None:
"""
Respond to a request with an appropriate thumbnail from the previously generated thumbnails.

View File

@@ -942,7 +942,9 @@ class SlidingSyncRestServlet(RestServlet):
response["rooms"] = await self.encode_rooms(
requester, sliding_sync_result.rooms
)
response["extensions"] = {} # TODO: sliding_sync_result.extensions
response["extensions"] = await self.encode_extensions(
requester, sliding_sync_result.extensions
)
return response
@@ -995,8 +997,21 @@ class SlidingSyncRestServlet(RestServlet):
if room_result.avatar:
serialized_rooms[room_id]["avatar"] = room_result.avatar
if room_result.heroes:
serialized_rooms[room_id]["heroes"] = room_result.heroes
if room_result.heroes is not None and len(room_result.heroes) > 0:
serialized_heroes = []
for hero in room_result.heroes:
serialized_hero = {
"user_id": hero.user_id,
}
if hero.display_name is not None:
# Not a typo, just how "displayname" is spelled in the spec
serialized_hero["displayname"] = hero.display_name
if hero.avatar_url is not None:
serialized_hero["avatar_url"] = hero.avatar_url
serialized_heroes.append(serialized_hero)
serialized_rooms[room_id]["heroes"] = serialized_heroes
# We should only include the `initial` key if it's `True` to save bandwidth.
# The absense of this flag means `False`.
@@ -1004,7 +1019,10 @@ class SlidingSyncRestServlet(RestServlet):
serialized_rooms[room_id]["initial"] = room_result.initial
# This will be omitted for invite/knock rooms with `stripped_state`
if room_result.required_state is not None:
if (
room_result.required_state is not None
and len(room_result.required_state) > 0
):
serialized_required_state = (
await self.event_serializer.serialize_events(
room_result.required_state,
@@ -1015,7 +1033,10 @@ class SlidingSyncRestServlet(RestServlet):
serialized_rooms[room_id]["required_state"] = serialized_required_state
# This will be omitted for invite/knock rooms with `stripped_state`
if room_result.timeline_events is not None:
if (
room_result.timeline_events is not None
and len(room_result.timeline_events) > 0
):
serialized_timeline = await self.event_serializer.serialize_events(
room_result.timeline_events,
time_now,
@@ -1043,7 +1064,10 @@ class SlidingSyncRestServlet(RestServlet):
serialized_rooms[room_id]["is_dm"] = room_result.is_dm
# Stripped state only applies to invite/knock rooms
if room_result.stripped_state is not None:
if (
room_result.stripped_state is not None
and len(room_result.stripped_state) > 0
):
# TODO: `knocked_state` but that isn't specced yet.
#
# TODO: Instead of adding `knocked_state`, it would be good to rename
@@ -1054,6 +1078,19 @@ class SlidingSyncRestServlet(RestServlet):
return serialized_rooms
async def encode_extensions(
self, requester: Requester, extensions: SlidingSyncResult.Extensions
) -> JsonDict:
result = {}
if extensions.to_device is not None:
result["to_device"] = {
"next_batch": extensions.to_device.next_batch,
"events": extensions.to_device.events,
}
return result
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
SyncRestServlet(hs).register(http_server)

View File

@@ -84,7 +84,7 @@ class DownloadResource(RestServlet):
if self._is_mine_server_name(server_name):
await self.media_repo.get_local_media(
request, media_id, file_name, max_timeout_ms
request, media_id, file_name, max_timeout_ms, allow_authenticated=False
)
else:
allow_remote = parse_boolean(request, "allow_remote", default=True)
@@ -106,4 +106,5 @@ class DownloadResource(RestServlet):
max_timeout_ms,
ip_address,
False,
allow_authenticated=False,
)

View File

@@ -96,6 +96,7 @@ class ThumbnailResource(RestServlet):
m_type,
max_timeout_ms,
False,
allow_authenticated=False,
)
else:
await self.thumbnail_provider.respond_local_thumbnail(
@@ -107,6 +108,7 @@ class ThumbnailResource(RestServlet):
m_type,
max_timeout_ms,
False,
allow_authenticated=False,
)
self.media_repo.mark_recently_accessed(None, media_id)
else:
@@ -134,6 +136,7 @@ class ThumbnailResource(RestServlet):
m_type,
max_timeout_ms,
ip_address,
False,
use_federation=False,
allow_authenticated=False,
)
self.media_repo.mark_recently_accessed(server_name, media_id)

View File

@@ -120,6 +120,9 @@ class SQLBaseStore(metaclass=ABCMeta):
"get_user_in_room_with_profile", (room_id, user_id)
)
self._attempt_to_invalidate_cache("get_rooms_for_user", (user_id,))
self._attempt_to_invalidate_cache(
"_get_rooms_for_local_user_where_membership_is_inner", (user_id,)
)
# Purge other caches based on room state.
self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
@@ -146,6 +149,9 @@ class SQLBaseStore(metaclass=ABCMeta):
self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None)
self._attempt_to_invalidate_cache("get_user_in_room_with_profile", None)
self._attempt_to_invalidate_cache("get_rooms_for_user", None)
self._attempt_to_invalidate_cache(
"_get_rooms_for_local_user_where_membership_is_inner", None
)
self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
def _attempt_to_invalidate_cache(

View File

@@ -331,6 +331,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
"get_invited_rooms_for_local_user", (state_key,)
)
self._attempt_to_invalidate_cache("get_rooms_for_user", (state_key,))
self._attempt_to_invalidate_cache(
"_get_rooms_for_local_user_where_membership_is_inner", (state_key,)
)
self._attempt_to_invalidate_cache(
"did_forget",
@@ -393,6 +396,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._attempt_to_invalidate_cache("get_thread_id_for_receipts", None)
self._attempt_to_invalidate_cache("get_invited_rooms_for_local_user", None)
self._attempt_to_invalidate_cache("get_rooms_for_user", None)
self._attempt_to_invalidate_cache(
"_get_rooms_for_local_user_where_membership_is_inner", None
)
self._attempt_to_invalidate_cache("did_forget", None)
self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None)
self._attempt_to_invalidate_cache("get_references_for_event", None)

View File

@@ -1313,6 +1313,11 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
# We want to make the cache more effective, so we clamp to the last
# change before the given ordering.
last_change = self._events_stream_cache.get_max_pos_of_last_change(room_id) # type: ignore[attr-defined]
if last_change is None:
# If the room isn't in the cache we know that the last change was
# somewhere before the earliest known position of the cache, so we
# can clamp to that.
last_change = self._events_stream_cache.get_earliest_known_position() # type: ignore[attr-defined]
# We don't always have a full stream_to_exterm_id table, e.g. after
# the upgrade that introduced it, so we make sure we never ask for a

View File

@@ -64,6 +64,7 @@ class LocalMedia:
quarantined_by: Optional[str]
safe_from_quarantine: bool
user_id: Optional[str]
authenticated: Optional[bool]
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -77,6 +78,7 @@ class RemoteMedia:
created_ts: int
last_access_ts: int
quarantined_by: Optional[str]
authenticated: Optional[bool]
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -218,6 +220,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
"last_access_ts",
"safe_from_quarantine",
"user_id",
"authenticated",
),
allow_none=True,
desc="get_local_media",
@@ -235,6 +238,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
last_access_ts=row[6],
safe_from_quarantine=row[7],
user_id=row[8],
authenticated=row[9],
)
async def get_local_media_by_user_paginate(
@@ -290,7 +294,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
last_access_ts,
quarantined_by,
safe_from_quarantine,
user_id
user_id,
authenticated
FROM local_media_repository
WHERE user_id = ?
ORDER BY {order_by_column} {order}, media_id ASC
@@ -314,6 +319,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
quarantined_by=row[7],
safe_from_quarantine=bool(row[8]),
user_id=row[9],
authenticated=row[10],
)
for row in txn
]
@@ -417,12 +423,18 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
time_now_ms: int,
user_id: UserID,
) -> None:
if self.hs.config.media.enable_authenticated_media:
authenticated = True
else:
authenticated = False
await self.db_pool.simple_insert(
"local_media_repository",
{
"media_id": media_id,
"created_ts": time_now_ms,
"user_id": user_id.to_string(),
"authenticated": authenticated,
},
desc="store_local_media_id",
)
@@ -438,6 +450,11 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
user_id: UserID,
url_cache: Optional[str] = None,
) -> None:
if self.hs.config.media.enable_authenticated_media:
authenticated = True
else:
authenticated = False
await self.db_pool.simple_insert(
"local_media_repository",
{
@@ -448,6 +465,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
"media_length": media_length,
"user_id": user_id.to_string(),
"url_cache": url_cache,
"authenticated": authenticated,
},
desc="store_local_media",
)
@@ -638,6 +656,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
"filesystem_id",
"last_access_ts",
"quarantined_by",
"authenticated",
),
allow_none=True,
desc="get_cached_remote_media",
@@ -654,6 +673,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
filesystem_id=row[4],
last_access_ts=row[5],
quarantined_by=row[6],
authenticated=row[7],
)
async def store_cached_remote_media(
@@ -666,6 +686,11 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
upload_name: Optional[str],
filesystem_id: str,
) -> None:
if self.hs.config.media.enable_authenticated_media:
authenticated = True
else:
authenticated = False
await self.db_pool.simple_insert(
"remote_media_cache",
{
@@ -677,6 +702,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
"upload_name": upload_name,
"filesystem_id": filesystem_id,
"last_access_ts": time_now_ms,
"authenticated": authenticated,
},
desc="store_cached_remote_media",
)

View File

@@ -279,8 +279,19 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
@cached(max_entries=100000) # type: ignore[synapse-@cached-mutable]
async def get_room_summary(self, room_id: str) -> Mapping[str, MemberSummary]:
"""Get the details of a room roughly suitable for use by the room
"""
Get the details of a room roughly suitable for use by the room
summary extension to /sync. Useful when lazy loading room members.
Returns the total count of members in the room by membership type, and a
truncated list of members (the heroes). This will be the first 6 members of the
room:
- We want 5 heroes plus 1, in case one of them is the
calling user.
- They are ordered by `stream_ordering`, which are joined or
invited. When no joined or invited members are available, this also includes
banned and left users.
Args:
room_id: The room ID to query
Returns:
@@ -308,23 +319,36 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
for count, membership in txn:
res.setdefault(membership, MemberSummary([], count))
# we order by membership and then fairly arbitrarily by event_id so
# heroes are consistent
# Note, rejected events will have a null membership field, so
# we we manually filter them out.
# Order by membership (joins -> invites -> leave (former insiders) ->
# everything else (outsiders like bans/knocks), then by `stream_ordering` so
# the first members in the room show up first and to make the sort stable
# (consistent heroes).
#
# Note: rejected events will have a null membership field, so we we manually
# filter them out.
sql = """
SELECT state_key, membership, event_id
FROM current_state_events
WHERE type = 'm.room.member' AND room_id = ?
AND membership IS NOT NULL
ORDER BY
CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC,
event_id ASC
CASE membership WHEN ? THEN 1 WHEN ? THEN 2 WHEN ? THEN 3 ELSE 4 END ASC,
event_stream_ordering ASC
LIMIT ?
"""
# 6 is 5 (number of heroes) plus 1, in case one of them is the calling user.
txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6))
txn.execute(
sql,
(
room_id,
# Sort order
Membership.JOIN,
Membership.INVITE,
Membership.LEAVE,
# 6 is 5 (number of heroes) plus 1, in case one of them is the calling user.
6,
),
)
for user_id, membership, event_id in txn:
summary = res[membership]
# we will always have a summary for this membership type at this
@@ -421,9 +445,11 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
if not membership_list:
return []
rooms = await self.db_pool.runInteraction(
"get_rooms_for_local_user_where_membership_is",
self._get_rooms_for_local_user_where_membership_is_txn,
# Convert membership list to frozen set as a) it needs to be hashable,
# and b) we don't care about the order.
membership_list = frozenset(membership_list)
rooms = await self._get_rooms_for_local_user_where_membership_is_inner(
user_id,
membership_list,
)
@@ -442,6 +468,24 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
return [room for room in rooms if room.room_id not in rooms_to_exclude]
@cached(max_entries=1000, tree=True)
async def _get_rooms_for_local_user_where_membership_is_inner(
self,
user_id: str,
membership_list: Collection[str],
) -> Sequence[RoomsForUser]:
if not membership_list:
return []
rooms = await self.db_pool.runInteraction(
"get_rooms_for_local_user_where_membership_is",
self._get_rooms_for_local_user_where_membership_is_txn,
user_id,
membership_list,
)
return rooms
def _get_rooms_for_local_user_where_membership_is_txn(
self,
txn: LoggingTransaction,
@@ -1509,10 +1553,19 @@ def extract_heroes_from_room_summary(
) -> List[str]:
"""Determine the users that represent a room, from the perspective of the `me` user.
This function expects `MemberSummary.members` to already be sorted by
`stream_ordering` like the results from `get_room_summary(...)`.
The rules which say which users we select are specified in the "Room Summary"
section of
https://spec.matrix.org/v1.4/client-server-api/#get_matrixclientv3sync
Args:
details: Mapping from membership type to member summary. We expect
`MemberSummary.members` to already be sorted by `stream_ordering`.
me: The user for whom we are determining the heroes for.
Returns a list (possibly empty) of heroes' mxids.
"""
empty_ms = MemberSummary([], 0)
@@ -1527,11 +1580,11 @@ def extract_heroes_from_room_summary(
r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me
] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me]
# FIXME: order by stream ordering rather than as returned by SQL
# We expect `MemberSummary.members` to already be sorted by `stream_ordering`
if joined_user_ids or invited_user_ids:
return sorted(joined_user_ids + invited_user_ids)[0:5]
return (joined_user_ids + invited_user_ids)[0:5]
else:
return sorted(gone_user_ids)[0:5]
return gone_user_ids[0:5]
@attr.s(slots=True, auto_attribs=True)

View File

@@ -41,7 +41,7 @@ from typing import (
import attr
from synapse.api.constants import EventTypes, Membership
from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.events import EventBase
@@ -298,6 +298,56 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
create_event = await self.get_event(create_id)
return create_event
@cached(max_entries=10000)
async def get_room_type(self, room_id: str) -> Optional[str]:
"""Get the room type for a given room. The server must be joined to the
given room.
"""
row = await self.db_pool.simple_select_one(
table="room_stats_state",
keyvalues={"room_id": room_id},
retcols=("room_type",),
allow_none=True,
desc="get_room_type",
)
if row is not None:
return row[0]
# If we haven't updated `room_stats_state` with the room yet, query the
# create event directly.
create_event = await self.get_create_event_for_room(room_id)
room_type = create_event.content.get(EventContentFields.ROOM_TYPE)
return room_type
@cachedList(cached_method_name="get_room_type", list_name="room_ids")
async def bulk_get_room_type(
self, room_ids: Set[str]
) -> Mapping[str, Optional[str]]:
"""Bulk fetch room types for the given rooms, the server must be in all
the rooms given.
"""
rows = await self.db_pool.simple_select_many_batch(
table="room_stats_state",
column="room_id",
iterable=room_ids,
retcols=("room_id", "room_type"),
desc="bulk_get_room_type",
)
# If we haven't updated `room_stats_state` with the room yet, query the
# create events directly. This should happen only rarely so we don't
# mind if we do this in a loop.
results = dict(rows)
for room_id in room_ids - results.keys():
create_event = await self.get_create_event_for_room(room_id)
room_type = create_event.content.get(EventContentFields.ROOM_TYPE)
results[room_id] = room_type
return results
@cached(max_entries=100000, iterable=True)
async def get_partial_current_state_ids(self, room_id: str) -> StateMap[str]:
"""Get the current state event ids for a room based on the

View File

@@ -78,10 +78,11 @@ from synapse.storage.database import (
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
from synapse.storage.util.id_generators import MultiWriterIdGenerator
from synapse.types import PersistedEventPosition, RoomStreamToken
from synapse.types import PersistedEventPosition, RoomStreamToken, StrCollection
from synapse.util.caches.descriptors import cached
from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -1293,6 +1294,122 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
get_last_event_pos_in_room_before_stream_ordering_txn,
)
async def bulk_get_last_event_pos_in_room_before_stream_ordering(
self,
room_ids: StrCollection,
end_token: RoomStreamToken,
) -> Dict[str, int]:
"""Bulk fetch the stream position of the latest events in the given
rooms
"""
min_token = end_token.stream
max_token = end_token.get_max_stream_pos()
results: Dict[str, int] = {}
# First, we check for the rooms in the stream change cache to see if we
# can just use the latetst position from it.
missing_room_ids: Set[str] = set()
for room_id in room_ids:
stream_pos = self._events_stream_cache.get_max_pos_of_last_change(room_id)
if stream_pos and stream_pos <= min_token:
results[room_id] = stream_pos
else:
missing_room_ids.add(room_id)
# Next, we query the stream position from the DB. At first we fetch all
# positions less than the *max* stream pos in the token, then filter
# them down. We do this as a) this is a cheaper query, and b) the vast
# majority of rooms will have a latest token from before the min stream
# pos.
def bulk_get_last_event_pos_txn(
txn: LoggingTransaction, batch_room_ids: StrCollection
) -> Dict[str, int]:
# This query fetches the latest stream position in the rooms before
# the given max position.
clause, args = make_in_list_sql_clause(
self.database_engine, "room_id", batch_room_ids
)
sql = f"""
SELECT room_id, (
SELECT stream_ordering FROM events AS e
LEFT JOIN rejections USING (event_id)
WHERE e.room_id = r.room_id
AND stream_ordering <= ?
AND NOT outlier
AND rejection_reason IS NULL
ORDER BY stream_ordering DESC
LIMIT 1
)
FROM rooms AS r
WHERE {clause}
"""
txn.execute(sql, [max_token] + args)
return {row[0]: row[1] for row in txn}
recheck_rooms: Set[str] = set()
for batched in batch_iter(missing_room_ids, 1000):
result = await self.db_pool.runInteraction(
"bulk_get_last_event_pos_in_room_before_stream_ordering",
bulk_get_last_event_pos_txn,
batched,
)
# Check that the stream position for the rooms are from before the
# minimum position of the token. If not then we need to fetch more
# rows.
for room_id, stream in result.items():
if min_token < stream:
recheck_rooms.add(room_id)
else:
results[room_id] = stream
if not recheck_rooms:
return results
# For the remaining rooms we need to fetch all rows between the min and
# max stream positions in the end token, and filter out the rows that
# are after the end token.
#
# This query should be fast as the range between the min and max should
# be small.
def bulk_get_last_event_pos_recheck_txn(
txn: LoggingTransaction, batch_room_ids: StrCollection
) -> Dict[str, int]:
clause, args = make_in_list_sql_clause(
self.database_engine, "room_id", batch_room_ids
)
sql = f"""
SELECT room_id, instance_name, stream_ordering
FROM events
WHERE ? < stream_ordering AND stream_ordering <= ?
AND NOT outlier
AND rejection_reason IS NULL
AND {clause}
ORDER BY stream_ordering ASC
"""
txn.execute(sql, [min_token, max_token] + args)
results: Dict[str, int] = {}
for row in txn:
room_id = row[0]
event_pos = PersistedEventPosition(row[1], row[2])
if not event_pos.persisted_after(end_token):
results[room_id] = event_pos.stream
return results
for batched in batch_iter(recheck_rooms, 1000):
recheck_result = await self.db_pool.runInteraction(
"bulk_get_last_event_pos_in_room_before_stream_ordering_recheck",
bulk_get_last_event_pos_recheck_txn,
batched,
)
results.update(recheck_result)
return results
async def get_current_room_stream_token_for_room_id(
self, room_id: str
) -> RoomStreamToken:

View File

@@ -19,7 +19,7 @@
#
#
SCHEMA_VERSION = 85 # remember to update the list below when updating
SCHEMA_VERSION = 86 # remember to update the list below when updating
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
@@ -139,6 +139,9 @@ Changes in SCHEMA_VERSION = 84
Changes in SCHEMA_VERSION = 85
- Add a column `suspended` to the `users` table
Changes in SCHEMA_VERSION = 86
- Add a column `authenticated` to the tables `local_media_repository` and `remote_media_cache`
"""

View File

@@ -0,0 +1,15 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2024 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
ALTER TABLE remote_media_cache ADD COLUMN authenticated BOOLEAN DEFAULT FALSE NOT NULL;
ALTER TABLE local_media_repository ADD COLUMN authenticated BOOLEAN DEFAULT FALSE NOT NULL;

View File

@@ -20,6 +20,7 @@
#
#
import abc
import logging
import re
import string
from enum import Enum
@@ -74,6 +75,9 @@ if TYPE_CHECKING:
from synapse.storage.databases.main import DataStore, PurgeEventsStore
from synapse.storage.databases.main.appservice import ApplicationServiceWorkerStore
logger = logging.getLogger(__name__)
# Define a state map type from type/state_key to T (usually an event ID or
# event)
T = TypeVar("T")
@@ -454,6 +458,8 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta):
represented by a default `stream` attribute and a map of instance name to
stream position of any writers that are ahead of the default stream
position.
The values in `instance_map` must be greater than the `stream` attribute.
"""
stream: int = attr.ib(validator=attr.validators.instance_of(int), kw_only=True)
@@ -468,6 +474,15 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta):
kw_only=True,
)
def __attrs_post_init__(self) -> None:
# Enforce that all instances have a value greater than the min stream
# position.
for i, v in self.instance_map.items():
if v <= self.stream:
raise ValueError(
f"'instance_map' includes a stream position before the main 'stream' attribute. Instance: {i}"
)
@classmethod
@abc.abstractmethod
async def parse(cls, store: "DataStore", string: str) -> "Self":
@@ -494,6 +509,9 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta):
for instance in set(self.instance_map).union(other.instance_map)
}
# Filter out any redundant entries.
instance_map = {i: s for i, s in instance_map.items() if s > max_stream}
return attr.evolve(
self, stream=max_stream, instance_map=immutabledict(instance_map)
)
@@ -539,10 +557,15 @@ class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta):
def bound_stream_token(self, max_stream: int) -> "Self":
"""Bound the stream positions to a maximum value"""
min_pos = min(self.stream, max_stream)
return type(self)(
stream=min(self.stream, max_stream),
stream=min_pos,
instance_map=immutabledict(
{k: min(s, max_stream) for k, s in self.instance_map.items()}
{
k: min(s, max_stream)
for k, s in self.instance_map.items()
if min(s, max_stream) > min_pos
}
),
)
@@ -637,6 +660,8 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
"Cannot set both 'topological' and 'instance_map' on 'RoomStreamToken'."
)
super().__attrs_post_init__()
@classmethod
async def parse(cls, store: "PurgeEventsStore", string: str) -> "RoomStreamToken":
try:
@@ -651,6 +676,11 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
instance_map = {}
for part in parts[1:]:
if not part:
# Handle tokens of the form `m5~`, which were created by
# a bug
continue
key, value = part.split(".")
instance_id = int(key)
pos = int(value)
@@ -666,7 +696,10 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
except CancelledError:
raise
except Exception:
pass
# We log an exception here as even though this *might* be a client
# handing a bad token, its more likely that Synapse returned a bad
# token (and we really want to catch those!).
logger.exception("Failed to parse stream token: %r", string)
raise SynapseError(400, "Invalid room stream token %r" % (string,))
@classmethod
@@ -713,6 +746,8 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
return self.instance_map.get(instance_name, self.stream)
async def to_string(self, store: "DataStore") -> str:
"""See class level docstring for information about the format."""
if self.topological is not None:
return "t%d-%d" % (self.topological, self.stream)
elif self.instance_map:
@@ -727,8 +762,10 @@ class RoomStreamToken(AbstractMultiWriterStreamToken):
instance_id = await store.get_id_for_instance(name)
entries.append(f"{instance_id}.{pos}")
encoded_map = "~".join(entries)
return f"m{self.stream}~{encoded_map}"
if entries:
encoded_map = "~".join(entries)
return f"m{self.stream}~{encoded_map}"
return f"s{self.stream}"
else:
return "s%d" % (self.stream,)
@@ -756,6 +793,11 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
instance_map = {}
for part in parts[1:]:
if not part:
# Handle tokens of the form `m5~`, which were created by
# a bug
continue
key, value = part.split(".")
instance_id = int(key)
pos = int(value)
@@ -770,10 +812,15 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
except CancelledError:
raise
except Exception:
pass
# We log an exception here as even though this *might* be a client
# handing a bad token, its more likely that Synapse returned a bad
# token (and we really want to catch those!).
logger.exception("Failed to parse stream token: %r", string)
raise SynapseError(400, "Invalid stream token %r" % (string,))
async def to_string(self, store: "DataStore") -> str:
"""See class level docstring for information about the format."""
if self.instance_map:
entries = []
for name, pos in self.instance_map.items():
@@ -786,8 +833,10 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
instance_id = await store.get_id_for_instance(name)
entries.append(f"{instance_id}.{pos}")
encoded_map = "~".join(entries)
return f"m{self.stream}~{encoded_map}"
if entries:
encoded_map = "~".join(entries)
return f"m{self.stream}~{encoded_map}"
return str(self.stream)
else:
return str(self.stream)

View File

@@ -18,7 +18,7 @@
#
#
from enum import Enum
from typing import TYPE_CHECKING, Dict, Final, List, Optional, Tuple
from typing import TYPE_CHECKING, Dict, Final, List, Optional, Sequence, Tuple
import attr
from typing_extensions import TypedDict
@@ -200,18 +200,24 @@ class SlidingSyncResult:
flag set. (same as sync v2)
"""
@attr.s(slots=True, frozen=True, auto_attribs=True)
class StrippedHero:
user_id: str
display_name: Optional[str]
avatar_url: Optional[str]
name: Optional[str]
avatar: Optional[str]
heroes: Optional[List[EventBase]]
heroes: Optional[List[StrippedHero]]
is_dm: bool
initial: bool
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
required_state: Optional[List[EventBase]]
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
timeline_events: Optional[List[EventBase]]
# Should be empty for invite/knock rooms with `stripped_state`
required_state: List[EventBase]
# Should be empty for invite/knock rooms with `stripped_state`
timeline_events: List[EventBase]
bundled_aggregations: Optional[Dict[str, "BundledAggregations"]]
# Optional because it's only relevant to invite/knock rooms
stripped_state: Optional[List[JsonDict]]
stripped_state: List[JsonDict]
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
prev_batch: Optional[StreamToken]
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
@@ -252,10 +258,39 @@ class SlidingSyncResult:
count: int
ops: List[Operation]
@attr.s(slots=True, frozen=True, auto_attribs=True)
class Extensions:
"""Responses for extensions
Attributes:
to_device: The to-device extension (MSC3885)
"""
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ToDeviceExtension:
"""The to-device extension (MSC3885)
Attributes:
next_batch: The to-device stream token the client should use
to get more results
events: A list of to-device messages for the client
"""
next_batch: str
events: Sequence[JsonMapping]
def __bool__(self) -> bool:
return bool(self.events)
to_device: Optional[ToDeviceExtension] = None
def __bool__(self) -> bool:
return bool(self.to_device)
next_pos: StreamToken
lists: Dict[str, SlidingWindowList]
rooms: Dict[str, RoomResult]
extensions: JsonMapping
extensions: Extensions
def __bool__(self) -> bool:
"""Make the result appear empty if there are no updates. This is used
@@ -271,5 +306,5 @@ class SlidingSyncResult:
next_pos=next_pos,
lists={},
rooms={},
extensions={},
extensions=SlidingSyncResult.Extensions(),
)

View File

@@ -200,9 +200,6 @@ class SlidingSyncBody(RequestBodyModel):
}
timeline_limit: The maximum number of timeline events to return per response.
include_heroes: Return a stripped variant of membership events (containing
`user_id` and optionally `avatar_url` and `displayname`) for the users used
to calculate the room name.
filters: Filters to apply to the list before sorting.
"""
@@ -270,16 +267,53 @@ class SlidingSyncBody(RequestBodyModel):
else:
ranges: Optional[List[Tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]]] = None # type: ignore[valid-type]
slow_get_all_rooms: Optional[StrictBool] = False
include_heroes: Optional[StrictBool] = False
filters: Optional[Filters] = None
class RoomSubscription(CommonRoomParameters):
pass
class Extension(RequestBodyModel):
enabled: Optional[StrictBool] = False
lists: Optional[List[StrictStr]] = None
rooms: Optional[List[StrictStr]] = None
class Extensions(RequestBodyModel):
"""The extensions section of the request.
Extensions MUST have an `enabled` flag which defaults to `false`. If a client
sends an unknown extension name, the server MUST ignore it (or else backwards
compatibility between clients and servers is broken when a newer client tries to
communicate with an older server).
"""
class ToDeviceExtension(RequestBodyModel):
"""The to-device extension (MSC3885)
Attributes:
enabled
limit: Maximum number of to-device messages to return
since: The `next_batch` from the previous sync response
"""
enabled: Optional[StrictBool] = False
limit: StrictInt = 100
since: Optional[StrictStr] = None
@validator("since")
def since_token_check(
cls, value: Optional[StrictStr]
) -> Optional[StrictStr]:
# `since` comes in as an opaque string token but we know that it's just
# an integer representing the position in the device inbox stream. We
# want to pre-validate it to make sure it works fine in downstream code.
if value is None:
return value
try:
int(value)
except ValueError:
raise ValueError(
"'extensions.to_device.since' is invalid (should look like an int)"
)
return value
to_device: Optional[ToDeviceExtension] = None
# mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884
if TYPE_CHECKING:
@@ -287,7 +321,7 @@ class SlidingSyncBody(RequestBodyModel):
else:
lists: Optional[Dict[constr(max_length=64, strict=True), SlidingSyncList]] = None # type: ignore[valid-type]
room_subscriptions: Optional[Dict[StrictStr, RoomSubscription]] = None
extensions: Optional[Dict[StrictStr, Extension]] = None
extensions: Optional[Extensions] = None
@validator("lists")
def lists_length_check(

View File

@@ -327,7 +327,7 @@ class StreamChangeCache:
for entity in r:
self._entity_to_key.pop(entity, None)
def get_max_pos_of_last_change(self, entity: EntityType) -> int:
def get_max_pos_of_last_change(self, entity: EntityType) -> Optional[int]:
"""Returns an upper bound of the stream id of the last change to an
entity.
@@ -335,7 +335,11 @@ class StreamChangeCache:
entity: The entity to check.
Return:
The stream position of the latest change for the given entity or
the earliest known stream position if the entitiy is unknown.
The stream position of the latest change for the given entity, if
known
"""
return self._entity_to_key.get(entity, self._earliest_known_stream_pos)
return self._entity_to_key.get(entity)
def get_earliest_known_position(self) -> int:
"""Returns the earliest position in the cache."""
return self._earliest_known_stream_pos

File diff suppressed because it is too large Load Diff

View File

@@ -211,6 +211,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# Blow away caches (supported room versions can only change due to a restart).
self.store.get_rooms_for_user.invalidate_all()
self.store._get_rooms_for_local_user_where_membership_is_inner.invalidate_all()
self.store._get_event_cache.clear()
self.store._event_ref.clear()

View File

@@ -1057,13 +1057,15 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
)
assert channel.code == 200
@override_config({"remote_media_download_burst_count": "87M"})
@patch(
"synapse.http.matrixfederationclient.read_body_with_max_size",
read_body_with_max_size_30MiB,
)
def test_download_ratelimit_max_size_sub(self) -> None:
def test_download_ratelimit_unknown_length(self) -> None:
"""
Test that if no content-length is provided, the default max size is applied instead
Test that if no content-length is provided, ratelimit will still be applied after
download once length is known
"""
# mock out actually sending the request
@@ -1077,19 +1079,48 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
self.client._send_request = _send_request # type: ignore
# ten requests should go through using the max size (500MB/50MB)
for i in range(10):
channel2 = self.make_request(
# 3 requests should go through (note 3rd one would technically violate ratelimit but
# is applied *after* download - the next one will be ratelimited)
for i in range(3):
channel = self.make_request(
"GET",
f"/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxy{i}",
shorthand=False,
)
assert channel2.code == 200
assert channel.code == 200
# eleventh will hit ratelimit
channel3 = self.make_request(
# 4th will hit ratelimit
channel2 = self.make_request(
"GET",
"/_matrix/media/v3/download/remote.org/abcdefghijklmnopqrstuvwxyx",
shorthand=False,
)
assert channel3.code == 429
assert channel2.code == 429
@override_config({"max_upload_size": "29M"})
@patch(
"synapse.http.matrixfederationclient.read_body_with_max_size",
read_body_with_max_size_30MiB,
)
def test_max_download_respected(self) -> None:
"""
Test that the max download size is enforced - note that max download size is determined
by the max_upload_size
"""
# mock out actually sending the request
async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
resp = MagicMock(spec=IResponse)
resp.code = 200
resp.length = 31457280
resp.headers = Headers({"Content-Type": ["application/octet-stream"]})
resp.phrase = b"OK"
return resp
self.client._send_request = _send_request # type: ignore
channel = self.make_request(
"GET", "/_matrix/media/v3/download/remote.org/abcd", shorthand=False
)
assert channel.code == 502
assert channel.json_body["errcode"] == "M_TOO_LARGE"

View File

@@ -43,6 +43,7 @@ from twisted.python.failure import Failure
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor
from twisted.web.http_headers import Headers
from twisted.web.iweb import UNKNOWN_LENGTH, IResponse
from twisted.web.resource import Resource
from synapse.api.errors import HttpResponseException
from synapse.api.ratelimiting import Ratelimiter
@@ -1809,13 +1810,19 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
)
assert channel.code == 200
@override_config(
{
"remote_media_download_burst_count": "87M",
}
)
@patch(
"synapse.http.matrixfederationclient.read_multipart_response",
read_multipart_response_30MiB,
)
def test_download_ratelimit_max_size_sub(self) -> None:
def test_download_ratelimit_unknown_length(self) -> None:
"""
Test that if no content-length is provided, the default max size is applied instead
Test that if no content-length is provided, ratelimiting is still applied after
media is downloaded and length is known
"""
# mock out actually sending the request
@@ -1831,8 +1838,9 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
self.client._send_request = _send_request # type: ignore
# ten requests should go through using the max size (500MB/50MB)
for i in range(10):
# first 3 will go through (note that 3rd request technically violates rate limit but
# that since the ratelimiting is applied *after* download it goes through, but next one fails)
for i in range(3):
channel2 = self.make_request(
"GET",
f"/_matrix/client/v1/media/download/remote.org/abc{i}",
@@ -1841,7 +1849,7 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
)
assert channel2.code == 200
# eleventh will hit ratelimit
# 4th will hit ratelimit
channel3 = self.make_request(
"GET",
"/_matrix/client/v1/media/download/remote.org/abcd",
@@ -1850,6 +1858,39 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase):
)
assert channel3.code == 429
@override_config({"max_upload_size": "29M"})
@patch(
"synapse.http.matrixfederationclient.read_multipart_response",
read_multipart_response_30MiB,
)
def test_max_download_respected(self) -> None:
"""
Test that the max download size is enforced - note that max download size is determined
by the max_upload_size
"""
# mock out actually sending the request, returns a 30MiB response
async def _send_request(*args: Any, **kwargs: Any) -> IResponse:
resp = MagicMock(spec=IResponse)
resp.code = 200
resp.length = 31457280
resp.headers = Headers(
{"Content-Type": ["multipart/mixed; boundary=gc0p4Jq0M2Yt08jU534c0p"]}
)
resp.phrase = b"OK"
return resp
self.client._send_request = _send_request # type: ignore
channel = self.make_request(
"GET",
"/_matrix/client/v1/media/download/remote.org/abcd",
shorthand=False,
access_token=self.tok,
)
assert channel.code == 502
assert channel.json_body["errcode"] == "M_TOO_LARGE"
def test_file_download(self) -> None:
content = io.BytesIO(b"file_to_stream")
content_uri = self.get_success(
@@ -2426,3 +2467,211 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase):
server_name=None,
)
)
configs = [
{"extra_config": {"dynamic_thumbnails": True}},
{"extra_config": {"dynamic_thumbnails": False}},
]
@parameterized_class(configs)
class AuthenticatedMediaTestCase(unittest.HomeserverTestCase):
extra_config: Dict[str, Any]
servlets = [
media.register_servlets,
login.register_servlets,
admin.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
config = self.default_config()
self.clock = clock
self.storage_path = self.mktemp()
self.media_store_path = self.mktemp()
os.mkdir(self.storage_path)
os.mkdir(self.media_store_path)
config["media_store_path"] = self.media_store_path
config["enable_authenticated_media"] = True
provider_config = {
"module": "synapse.media.storage_provider.FileStorageProviderBackend",
"store_local": True,
"store_synchronous": False,
"store_remote": True,
"config": {"directory": self.storage_path},
}
config["media_storage_providers"] = [provider_config]
config.update(self.extra_config)
return self.setup_test_homeserver(config=config)
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.repo = hs.get_media_repository()
self.client = hs.get_federation_http_client()
self.store = hs.get_datastores().main
self.user = self.register_user("user", "pass")
self.tok = self.login("user", "pass")
def create_resource_dict(self) -> Dict[str, Resource]:
resources = super().create_resource_dict()
resources["/_matrix/media"] = self.hs.get_media_repository_resource()
return resources
def test_authenticated_media(self) -> None:
# upload some local media with authentication on
channel = self.make_request(
"POST",
"_matrix/media/v3/upload?filename=test_png_upload",
SMALL_PNG,
self.tok,
shorthand=False,
content_type=b"image/png",
custom_headers=[("Content-Length", str(67))],
)
self.assertEqual(channel.code, 200)
res = channel.json_body.get("content_uri")
assert res is not None
uri = res.split("mxc://")[1]
# request media over authenticated endpoint, should be found
channel2 = self.make_request(
"GET",
f"_matrix/client/v1/media/download/{uri}",
access_token=self.tok,
shorthand=False,
)
self.assertEqual(channel2.code, 200)
# request same media over unauthenticated media, should raise 404 not found
channel3 = self.make_request(
"GET", f"_matrix/media/v3/download/{uri}", shorthand=False
)
self.assertEqual(channel3.code, 404)
# check thumbnails as well
params = "?width=32&height=32&method=crop"
channel4 = self.make_request(
"GET",
f"/_matrix/client/v1/media/thumbnail/{uri}{params}",
shorthand=False,
access_token=self.tok,
)
self.assertEqual(channel4.code, 200)
params = "?width=32&height=32&method=crop"
channel5 = self.make_request(
"GET",
f"/_matrix/media/r0/thumbnail/{uri}{params}",
shorthand=False,
access_token=self.tok,
)
self.assertEqual(channel5.code, 404)
# Inject a piece of remote media.
file_id = "abcdefg12345"
file_info = FileInfo(server_name="lonelyIsland", file_id=file_id)
media_storage = self.hs.get_media_repository().media_storage
ctx = media_storage.store_into_file(file_info)
(f, fname) = self.get_success(ctx.__aenter__())
f.write(SMALL_PNG)
self.get_success(ctx.__aexit__(None, None, None))
# we write the authenticated status when storing media, so this should pick up
# config and authenticate the media
self.get_success(
self.store.store_cached_remote_media(
origin="lonelyIsland",
media_id="52",
media_type="image/png",
media_length=1,
time_now_ms=self.clock.time_msec(),
upload_name="remote_test.png",
filesystem_id=file_id,
)
)
# ensure we have thumbnails for the non-dynamic code path
if self.extra_config == {"dynamic_thumbnails": False}:
self.get_success(
self.repo._generate_thumbnails(
"lonelyIsland", "52", file_id, "image/png"
)
)
channel6 = self.make_request(
"GET",
"_matrix/client/v1/media/download/lonelyIsland/52",
access_token=self.tok,
shorthand=False,
)
self.assertEqual(channel6.code, 200)
channel7 = self.make_request(
"GET", f"_matrix/media/v3/download/{uri}", shorthand=False
)
self.assertEqual(channel7.code, 404)
params = "?width=32&height=32&method=crop"
channel8 = self.make_request(
"GET",
f"/_matrix/client/v1/media/thumbnail/lonelyIsland/52{params}",
shorthand=False,
access_token=self.tok,
)
self.assertEqual(channel8.code, 200)
channel9 = self.make_request(
"GET",
f"/_matrix/media/r0/thumbnail/lonelyIsland/52{params}",
shorthand=False,
access_token=self.tok,
)
self.assertEqual(channel9.code, 404)
# Inject a piece of local media that isn't authenticated
file_id = "abcdefg123456"
file_info = FileInfo(None, file_id=file_id)
ctx = media_storage.store_into_file(file_info)
(f, fname) = self.get_success(ctx.__aenter__())
f.write(SMALL_PNG)
self.get_success(ctx.__aexit__(None, None, None))
self.get_success(
self.store.db_pool.simple_insert(
"local_media_repository",
{
"media_id": "abcdefg123456",
"media_type": "image/png",
"created_ts": self.clock.time_msec(),
"upload_name": "test_local",
"media_length": 1,
"user_id": "someone",
"url_cache": None,
"authenticated": False,
},
desc="store_local_media",
)
)
# check that unauthenticated media is still available over both endpoints
channel9 = self.make_request(
"GET",
"/_matrix/client/v1/media/download/test/abcdefg123456",
shorthand=False,
access_token=self.tok,
)
self.assertEqual(channel9.code, 200)
channel10 = self.make_request(
"GET",
"/_matrix/media/r0/download/test/abcdefg123456",
shorthand=False,
access_token=self.tok,
)
self.assertEqual(channel10.code, 200)

File diff suppressed because it is too large Load Diff

View File

@@ -289,10 +289,6 @@ class FakeChannel:
self._reactor.run()
while not self.is_finished():
# If there's a producer, tell it to resume producing so we get content
if self._producer:
self._producer.resumeProducing()
if self._reactor.seconds() > end_time:
raise TimedOutException("Timed out waiting for request to finish.")

View File

@@ -19,20 +19,28 @@
# [This file includes modifications made by New Vector Limited]
#
#
import logging
from typing import List, Optional, Tuple, cast
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import Membership
from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.room_versions import RoomVersions
from synapse.rest import admin
from synapse.rest.admin import register_servlets_for_client_rest_resource
from synapse.rest.client import login, room
from synapse.rest.client import knock, login, room
from synapse.server import HomeServer
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.roommember import MemberSummary
from synapse.types import UserID, create_requester
from synapse.util import Clock
from tests import unittest
from tests.server import TestHomeServer
from tests.test_utils import event_injection
from tests.unittest import skip_unless
logger = logging.getLogger(__name__)
class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
@@ -240,6 +248,397 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
)
class RoomSummaryTestCase(unittest.HomeserverTestCase):
"""
Test `/sync` room summary related logic like `get_room_summary(...)` and
`extract_heroes_from_room_summary(...)`
"""
servlets = [
admin.register_servlets,
knock.register_servlets,
login.register_servlets,
room.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.sliding_sync_handler = self.hs.get_sliding_sync_handler()
self.store = self.hs.get_datastores().main
def _assert_member_summary(
self,
actual_member_summary: MemberSummary,
expected_member_list: List[str],
*,
expected_member_count: Optional[int] = None,
) -> None:
"""
Assert that the `MemberSummary` object has the expected members.
"""
self.assertListEqual(
[
user_id
for user_id, _membership_event_id in actual_member_summary.members
],
expected_member_list,
)
self.assertEqual(
actual_member_summary.count,
(
expected_member_count
if expected_member_count is not None
else len(expected_member_list)
),
)
def test_get_room_summary_membership(self) -> None:
"""
Test that `get_room_summary(...)` gets every kind of membership when there
aren't that many members in the room.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
_user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
user5_id = self.register_user("user5", "pass")
user5_tok = self.login(user5_id, "pass")
# Setup a room (user1 is the creator and is joined to the room)
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# User2 is banned
self.helper.join(room_id, user2_id, tok=user2_tok)
self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok)
# User3 is invited by user1
self.helper.invite(room_id, targ=user3_id, tok=user1_tok)
# User4 leaves
self.helper.join(room_id, user4_id, tok=user4_tok)
self.helper.leave(room_id, user4_id, tok=user4_tok)
# User5 joins
self.helper.join(room_id, user5_id, tok=user5_tok)
room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
empty_ms = MemberSummary([], 0)
self._assert_member_summary(
room_membership_summary.get(Membership.JOIN, empty_ms),
[user1_id, user5_id],
)
self._assert_member_summary(
room_membership_summary.get(Membership.INVITE, empty_ms), [user3_id]
)
self._assert_member_summary(
room_membership_summary.get(Membership.LEAVE, empty_ms), [user4_id]
)
self._assert_member_summary(
room_membership_summary.get(Membership.BAN, empty_ms), [user2_id]
)
self._assert_member_summary(
room_membership_summary.get(Membership.KNOCK, empty_ms),
[
# No one knocked
],
)
def test_get_room_summary_membership_order(self) -> None:
"""
Test that `get_room_summary(...)` stacks our limit of 6 in this order: joins ->
invites -> leave -> everything else (bans/knocks)
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
_user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
user5_id = self.register_user("user5", "pass")
user5_tok = self.login(user5_id, "pass")
user6_id = self.register_user("user6", "pass")
user6_tok = self.login(user6_id, "pass")
user7_id = self.register_user("user7", "pass")
user7_tok = self.login(user7_id, "pass")
# Setup the room (user1 is the creator and is joined to the room)
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# We expect the order to be joins -> invites -> leave -> bans so setup the users
# *NOT* in that same order to make sure we're actually sorting them.
# User2 is banned
self.helper.join(room_id, user2_id, tok=user2_tok)
self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok)
# User3 is invited by user1
self.helper.invite(room_id, targ=user3_id, tok=user1_tok)
# User4 leaves
self.helper.join(room_id, user4_id, tok=user4_tok)
self.helper.leave(room_id, user4_id, tok=user4_tok)
# User5, User6, User7 joins
self.helper.join(room_id, user5_id, tok=user5_tok)
self.helper.join(room_id, user6_id, tok=user6_tok)
self.helper.join(room_id, user7_id, tok=user7_tok)
room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
empty_ms = MemberSummary([], 0)
self._assert_member_summary(
room_membership_summary.get(Membership.JOIN, empty_ms),
[user1_id, user5_id, user6_id, user7_id],
)
self._assert_member_summary(
room_membership_summary.get(Membership.INVITE, empty_ms), [user3_id]
)
self._assert_member_summary(
room_membership_summary.get(Membership.LEAVE, empty_ms), [user4_id]
)
self._assert_member_summary(
room_membership_summary.get(Membership.BAN, empty_ms),
[
# The banned user is not in the summary because the summary can only fit
# 6 members and prefers everything else before bans
#
# user2_id
],
# But we still see the count of banned users
expected_member_count=1,
)
self._assert_member_summary(
room_membership_summary.get(Membership.KNOCK, empty_ms),
[
# No one knocked
],
)
def test_extract_heroes_from_room_summary_excludes_self(self) -> None:
"""
Test that `extract_heroes_from_room_summary(...)` does not include the user
itself.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
# Setup the room (user1 is the creator and is joined to the room)
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# User2 joins
self.helper.join(room_id, user2_id, tok=user2_tok)
room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
# We first ask from the perspective of a random fake user
hero_user_ids = extract_heroes_from_room_summary(
room_membership_summary, me="@fakeuser"
)
# Make sure user1 is in the room (ensure our test setup is correct)
self.assertListEqual(hero_user_ids, [user1_id, user2_id])
# Now, we ask for the room summary from the perspective of user1
hero_user_ids = extract_heroes_from_room_summary(
room_membership_summary, me=user1_id
)
# User1 should not be included in the list of heroes because they are the one
# asking
self.assertListEqual(hero_user_ids, [user2_id])
def test_extract_heroes_from_room_summary_first_five_joins(self) -> None:
"""
Test that `extract_heroes_from_room_summary(...)` returns the first 5 joins.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
user5_id = self.register_user("user5", "pass")
user5_tok = self.login(user5_id, "pass")
user6_id = self.register_user("user6", "pass")
user6_tok = self.login(user6_id, "pass")
user7_id = self.register_user("user7", "pass")
user7_tok = self.login(user7_id, "pass")
# Setup the room (user1 is the creator and is joined to the room)
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# User2 -> User7 joins
self.helper.join(room_id, user2_id, tok=user2_tok)
self.helper.join(room_id, user3_id, tok=user3_tok)
self.helper.join(room_id, user4_id, tok=user4_tok)
self.helper.join(room_id, user5_id, tok=user5_tok)
self.helper.join(room_id, user6_id, tok=user6_tok)
self.helper.join(room_id, user7_id, tok=user7_tok)
room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
hero_user_ids = extract_heroes_from_room_summary(
room_membership_summary, me="@fakuser"
)
# First 5 users to join the room
self.assertListEqual(
hero_user_ids, [user1_id, user2_id, user3_id, user4_id, user5_id]
)
def test_extract_heroes_from_room_summary_membership_order(self) -> None:
"""
Test that `extract_heroes_from_room_summary(...)` prefers joins/invites over
everything else.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
_user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
user5_id = self.register_user("user5", "pass")
user5_tok = self.login(user5_id, "pass")
# Setup the room (user1 is the creator and is joined to the room)
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# We expect the order to be joins -> invites -> leave -> bans so setup the users
# *NOT* in that same order to make sure we're actually sorting them.
# User2 is banned
self.helper.join(room_id, user2_id, tok=user2_tok)
self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok)
# User3 is invited by user1
self.helper.invite(room_id, targ=user3_id, tok=user1_tok)
# User4 leaves
self.helper.join(room_id, user4_id, tok=user4_tok)
self.helper.leave(room_id, user4_id, tok=user4_tok)
# User5 joins
self.helper.join(room_id, user5_id, tok=user5_tok)
room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
hero_user_ids = extract_heroes_from_room_summary(
room_membership_summary, me="@fakeuser"
)
# Prefer joins -> invites, over everything else
self.assertListEqual(
hero_user_ids,
[
# The joins
user1_id,
user5_id,
# The invites
user3_id,
],
)
@skip_unless(
False,
"Test is not possible because when everyone leaves the room, "
+ "the server is `no_longer_in_room` and we don't have any `current_state_events` to query",
)
def test_extract_heroes_from_room_summary_fallback_leave_ban(self) -> None:
"""
Test that `extract_heroes_from_room_summary(...)` falls back to leave/ban if
there aren't any joins/invites.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
# Setup the room (user1 is the creator and is joined to the room)
room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
# User2 is banned
self.helper.join(room_id, user2_id, tok=user2_tok)
self.helper.ban(room_id, src=user1_id, targ=user2_id, tok=user1_tok)
# User3 leaves
self.helper.join(room_id, user3_id, tok=user3_tok)
self.helper.leave(room_id, user3_id, tok=user3_tok)
# User1 leaves (we're doing this last because they're the room creator)
self.helper.leave(room_id, user1_id, tok=user1_tok)
room_membership_summary = self.get_success(self.store.get_room_summary(room_id))
hero_user_ids = extract_heroes_from_room_summary(
room_membership_summary, me="@fakeuser"
)
# Fallback to people who left -> banned
self.assertListEqual(
hero_user_ids,
[user3_id, user1_id, user3_id],
)
def test_extract_heroes_from_room_summary_excludes_knocks(self) -> None:
"""
People who knock on the room have (potentially) never been in the room before
and are total outsiders. Plus the spec doesn't mention them at all for heroes.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
# Setup the knock room (user1 is the creator and is joined to the room)
knock_room_id = self.helper.create_room_as(
user1_id, tok=user1_tok, room_version=RoomVersions.V7.identifier
)
self.helper.send_state(
knock_room_id,
EventTypes.JoinRules,
{"join_rule": JoinRules.KNOCK},
tok=user1_tok,
)
# User2 knocks on the room
knock_channel = self.make_request(
"POST",
"/_matrix/client/r0/knock/%s" % (knock_room_id,),
b"{}",
user2_tok,
)
self.assertEqual(knock_channel.code, 200, knock_channel.result)
room_membership_summary = self.get_success(
self.store.get_room_summary(knock_room_id)
)
hero_user_ids = extract_heroes_from_room_summary(
room_membership_summary, me="@fakeuser"
)
# user1 is the creator and is joined to the room (should show up as a hero)
# user2 is knocking on the room (should not show up as a hero)
self.assertListEqual(
hero_user_ids,
[user1_id],
)
class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main

View File

@@ -19,9 +19,18 @@
#
#
from typing import Type
from unittest import skipUnless
from immutabledict import immutabledict
from parameterized import parameterized_class
from synapse.api.errors import SynapseError
from synapse.types import (
AbstractMultiWriterStreamToken,
MultiWriterStreamToken,
RoomAlias,
RoomStreamToken,
UserID,
get_domain_from_id,
get_localpart_from_id,
@@ -29,6 +38,7 @@ from synapse.types import (
)
from tests import unittest
from tests.utils import USE_POSTGRES_FOR_TESTS
class IsMineIDTests(unittest.HomeserverTestCase):
@@ -127,3 +137,64 @@ class MapUsernameTestCase(unittest.TestCase):
# this should work with either a unicode or a bytes
self.assertEqual(map_username_to_mxid_localpart("têst"), "t=c3=aast")
self.assertEqual(map_username_to_mxid_localpart("têst".encode()), "t=c3=aast")
@parameterized_class(
("token_type",),
[
(MultiWriterStreamToken,),
(RoomStreamToken,),
],
class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{params_dict['token_type'].__name__}",
)
class MultiWriterTokenTestCase(unittest.HomeserverTestCase):
"""Tests for the different types of multi writer tokens."""
token_type: Type[AbstractMultiWriterStreamToken]
def test_basic_token(self) -> None:
"""Test that a simple stream token can be serialized and unserialized"""
store = self.hs.get_datastores().main
token = self.token_type(stream=5)
string_token = self.get_success(token.to_string(store))
if isinstance(token, RoomStreamToken):
self.assertEqual(string_token, "s5")
else:
self.assertEqual(string_token, "5")
parsed_token = self.get_success(self.token_type.parse(store, string_token))
self.assertEqual(parsed_token, token)
@skipUnless(USE_POSTGRES_FOR_TESTS, "Requires Postgres")
def test_instance_map(self) -> None:
"""Test for stream token with instance map"""
store = self.hs.get_datastores().main
token = self.token_type(stream=5, instance_map=immutabledict({"foo": 6}))
string_token = self.get_success(token.to_string(store))
self.assertEqual(string_token, "m5~1.6")
parsed_token = self.get_success(self.token_type.parse(store, string_token))
self.assertEqual(parsed_token, token)
def test_instance_map_assertion(self) -> None:
"""Test that we assert values in the instance map are greater than the
min stream position"""
with self.assertRaises(ValueError):
self.token_type(stream=5, instance_map=immutabledict({"foo": 4}))
with self.assertRaises(ValueError):
self.token_type(stream=5, instance_map=immutabledict({"foo": 5}))
def test_parse_bad_token(self) -> None:
"""Test that we can parse tokens produced by a bug in Synapse of the
form `m5~`"""
store = self.hs.get_datastores().main
parsed_token = self.get_success(self.token_type.parse(store, "m5~"))
self.assertEqual(parsed_token, self.token_type(stream=5))

View File

@@ -28,6 +28,7 @@ import logging
import secrets
import time
from typing import (
AbstractSet,
Any,
Awaitable,
Callable,
@@ -269,6 +270,56 @@ class TestCase(unittest.TestCase):
required[key], actual[key], msg="%s mismatch. %s" % (key, actual)
)
def assertIncludes(
self,
actual_items: AbstractSet[str],
expected_items: AbstractSet[str],
exact: bool = False,
message: Optional[str] = None,
) -> None:
"""
Assert that all of the `expected_items` are included in the `actual_items`.
This assert could also be called `assertContains`, `assertItemsInSet`
Args:
actual_items: The container
expected_items: The items to check for in the container
exact: Whether the actual state should be exactly equal to the expected
state (no extras).
message: Optional message to include in the failure message.
"""
# Check that each set has the same items
if exact and actual_items == expected_items:
return
# Check for a superset
elif not exact and actual_items >= expected_items:
return
expected_lines: List[str] = []
for expected_item in expected_items:
is_expected_in_actual = expected_item in actual_items
expected_lines.append(
"{} {}".format(" " if is_expected_in_actual else "?", expected_item)
)
actual_lines: List[str] = []
for actual_item in actual_items:
is_actual_in_expected = actual_item in expected_items
actual_lines.append(
"{} {}".format("+" if is_actual_in_expected else " ", actual_item)
)
newline = "\n"
expected_string = f"Expected items to be in actual ('?' = missing expected items):\n {{\n{newline.join(expected_lines)}\n }}"
actual_string = f"Actual ('+' = found expected items):\n {{\n{newline.join(actual_lines)}\n }}"
first_message = (
"Items must match exactly" if exact else "Some expected items are missing."
)
diff_message = f"{first_message}\n{expected_string}\n{actual_string}"
self.fail(f"{diff_message}\n{message}")
def DEBUG(target: TV) -> TV:
"""A decorator to set the .loglevel attribute to logging.DEBUG.

View File

@@ -21,6 +21,7 @@
from contextlib import contextmanager
from os import PathLike
from pathlib import Path
from typing import Generator, Optional, Union
from unittest.mock import patch
@@ -41,7 +42,7 @@ class DummyDistribution(metadata.Distribution):
def version(self) -> str:
return self._version
def locate_file(self, path: Union[str, PathLike]) -> PathLike:
def locate_file(self, path: Union[str, PathLike]) -> Path:
raise NotImplementedError()
def read_text(self, filename: str) -> None:

View File

@@ -249,5 +249,5 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
self.assertEqual(cache.get_max_pos_of_last_change("bar@baz.net"), 3)
self.assertEqual(cache.get_max_pos_of_last_change("user@elsewhere.org"), 4)
# Unknown entities will return the stream start position.
self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), 1)
# Unknown entities will return None
self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), None)