mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
1 Commits
ts/spam-er
...
anoa/docs_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
793a5bfd12 |
28
.ci/scripts/record_available_doc_versions.py
Executable file
28
.ci/scripts/record_available_doc_versions.py
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env python3
|
||||
# This script will write a json file to $OUTPUT_FILE that contains the name of
|
||||
# each available Synapse version with documentation.
|
||||
#
|
||||
# This script assumes that any top-level directory in the "gh-pages" branch is
|
||||
# named after a documentation version and contains documentation website files.
|
||||
|
||||
import os.path
|
||||
import json
|
||||
|
||||
OUTPUT_FILE = "versions.json"
|
||||
|
||||
# Determine the list of Synapse versions that have documentation.
|
||||
doc_versions = []
|
||||
for filepath in os.listdir():
|
||||
if os.path.isdir(filepath):
|
||||
doc_versions.append(filepath)
|
||||
|
||||
# Record the documentation versions in a json file, such that the
|
||||
# frontend javascript is aware of what versions exist.
|
||||
to_write = {
|
||||
"versions": doc_versions,
|
||||
"default_version": "latest",
|
||||
}
|
||||
|
||||
# Write the file.
|
||||
with open(OUTPUT_FILE, "w") as f:
|
||||
f.write(json.dumps(to_write))
|
||||
30
.github/workflows/docker.yml
vendored
30
.github/workflows/docker.yml
vendored
@@ -34,24 +34,32 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# TODO: consider using https://github.com/docker/metadata-action instead of this
|
||||
# custom magic
|
||||
- name: Calculate docker image tag
|
||||
id: set-tag
|
||||
uses: docker/metadata-action@master
|
||||
with:
|
||||
images: matrixdotorg/synapse
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=raw,value=develop,enable=${{ github.ref == 'refs/heads/develop' }}
|
||||
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/master' }}
|
||||
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }}
|
||||
type=pep440,pattern={{raw}}
|
||||
run: |
|
||||
case "${GITHUB_REF}" in
|
||||
refs/heads/develop)
|
||||
tag=develop
|
||||
;;
|
||||
refs/heads/master|refs/heads/main)
|
||||
tag=latest
|
||||
;;
|
||||
refs/tags/*)
|
||||
tag=${GITHUB_REF#refs/tags/}
|
||||
;;
|
||||
*)
|
||||
tag=${GITHUB_SHA}
|
||||
;;
|
||||
esac
|
||||
echo "::set-output name=tag::$tag"
|
||||
|
||||
- name: Build and push all platforms
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
tags: "${{ steps.set-tag.outputs.tags }}"
|
||||
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
|
||||
file: "docker/Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
28
.github/workflows/docs.yaml
vendored
28
.github/workflows/docs.yaml
vendored
@@ -14,7 +14,7 @@ on:
|
||||
|
||||
jobs:
|
||||
pages:
|
||||
name: GitHub Pages
|
||||
name: Build and deploy docs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -63,3 +63,29 @@ jobs:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./book
|
||||
destination_dir: ./${{ steps.vars.outputs.branch-version }}
|
||||
|
||||
list_available_versions:
|
||||
needs: pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Check out the current branch
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Save the script
|
||||
run: cp .ci/scripts/record_available_doc_versions.py /
|
||||
|
||||
- uses: actions/setup-python@v3
|
||||
|
||||
# Check out the gh-pages branch, which we'll be pushing the doc versions to
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
# Check out the gh-pages branch
|
||||
ref: 'gh-pages'
|
||||
|
||||
- name: Record the available documentation versions
|
||||
run: |
|
||||
# Download the script
|
||||
/record_available_doc_versions
|
||||
|
||||
115
CHANGES.md
115
CHANGES.md
@@ -1,116 +1,7 @@
|
||||
Synapse 1.59.0rc1 (2022-05-10)
|
||||
==============================
|
||||
Synapse 1.59.0
|
||||
==============
|
||||
|
||||
This release makes several changes that server administrators should be aware of:
|
||||
|
||||
- Device name lookup over federation is now disabled by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616))
|
||||
- The `synapse.app.appservice` and `synapse.app.user_dir` worker application types are now deprecated. ([\#12452](https://github.com/matrix-org/synapse/issues/12452), [\#12654](https://github.com/matrix-org/synapse/issues/12654))
|
||||
|
||||
See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1590) for more details.
|
||||
|
||||
Additionally, this release removes the non-standard `m.login.jwt` login type from Synapse. It can be replaced with `org.matrix.login.jwt` for identical behaviour. This is only used if `jwt_config.enabled` is set to `true` in the configuration. ([\#12597](https://github.com/matrix-org/synapse/issues/12597))
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Support [MSC3266](https://github.com/matrix-org/matrix-doc/pull/3266) room summaries over federation. ([\#11507](https://github.com/matrix-org/synapse/issues/11507))
|
||||
- Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. ([\#12168](https://github.com/matrix-org/synapse/issues/12168), [\#12635](https://github.com/matrix-org/synapse/issues/12635), [\#12636](https://github.com/matrix-org/synapse/issues/12636), [\#12670](https://github.com/matrix-org/synapse/issues/12670))
|
||||
- Extend the [module API](https://github.com/matrix-org/synapse/blob/release-v1.59/synapse/module_api/__init__.py) to allow modules to change actions for existing push rules of local users. ([\#12406](https://github.com/matrix-org/synapse/issues/12406))
|
||||
- Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. ([\#12452](https://github.com/matrix-org/synapse/issues/12452))
|
||||
- Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. ([\#12654](https://github.com/matrix-org/synapse/issues/12654))
|
||||
- Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. ([\#12526](https://github.com/matrix-org/synapse/issues/12526))
|
||||
- Implement [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786): Add a default push rule to ignore `m.room.server_acl` events. ([\#12601](https://github.com/matrix-org/synapse/issues/12601))
|
||||
- Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice. ([\#12619](https://github.com/matrix-org/synapse/issues/12619))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.48.0 where the latest thread reply provided failed to include the proper bundled aggregations. ([\#12273](https://github.com/matrix-org/synapse/issues/12273))
|
||||
- Fix a bug introduced in Synapse 1.22.0 where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. ([\#12544](https://github.com/matrix-org/synapse/issues/12544))
|
||||
- Fix a bug introduced in Synapse 1.57.0 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. ([\#12570](https://github.com/matrix-org/synapse/issues/12570))
|
||||
- Fix a long-standing bug where status codes would almost always get logged as `200!`, irrespective of the actual status code, when clients disconnect before a request has finished processing. ([\#12580](https://github.com/matrix-org/synapse/issues/12580))
|
||||
- Fix race when persisting an event and deleting a room that could lead to outbound federation breaking. ([\#12594](https://github.com/matrix-org/synapse/issues/12594))
|
||||
- Fix a bug introduced in Synapse 1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated. ([\#12633](https://github.com/matrix-org/synapse/issues/12633))
|
||||
- Fix a long-standing bug where rooms containing power levels with string values could not be upgraded. ([\#12657](https://github.com/matrix-org/synapse/issues/12657))
|
||||
- Prevent memory leak from reoccurring when presence is disabled. ([\#12656](https://github.com/matrix-org/synapse/issues/12656))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Explicitly opt-in to using [BuildKit-specific features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md) in the Dockerfile. This fixes issues with building images in some GitLab CI environments. ([\#12541](https://github.com/matrix-org/synapse/issues/12541))
|
||||
- Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Contributed by @henryclw. ([\#12573](https://github.com/matrix-org/synapse/issues/12573))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Update SQL statements and replace use of old table `user_stats_historical` in docs for Synapse Admins. ([\#12536](https://github.com/matrix-org/synapse/issues/12536))
|
||||
- Add missing linebreak to `pipx` install instructions. ([\#12579](https://github.com/matrix-org/synapse/issues/12579))
|
||||
- Add information about the TCP replication module to docs. ([\#12621](https://github.com/matrix-org/synapse/issues/12621))
|
||||
- Fixes to the formatting of `README.rst`. ([\#12627](https://github.com/matrix-org/synapse/issues/12627))
|
||||
- Fix docs on how to run specific Complement tests using the `complement.sh` test runner. ([\#12664](https://github.com/matrix-org/synapse/issues/12664))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). ([\#12596](https://github.com/matrix-org/synapse/issues/12596))
|
||||
- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from
|
||||
[MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). ([\#12597](https://github.com/matrix-org/synapse/issues/12597))
|
||||
- Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. ([\#12613](https://github.com/matrix-org/synapse/issues/12613))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time. ([\#12480](https://github.com/matrix-org/synapse/issues/12480))
|
||||
- Immediately retry any requests that have backed off when a server comes back online. ([\#12500](https://github.com/matrix-org/synapse/issues/12500))
|
||||
- Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests. ([\#12505](https://github.com/matrix-org/synapse/issues/12505))
|
||||
- Consistently check if an object is a `frozendict`. ([\#12564](https://github.com/matrix-org/synapse/issues/12564))
|
||||
- Protect module callbacks with read semantics against cancellation. ([\#12568](https://github.com/matrix-org/synapse/issues/12568))
|
||||
- Improve comments and error messages around access tokens. ([\#12577](https://github.com/matrix-org/synapse/issues/12577))
|
||||
- Improve docstrings for the receipts store. ([\#12581](https://github.com/matrix-org/synapse/issues/12581))
|
||||
- Use constants for read-receipts in tests. ([\#12582](https://github.com/matrix-org/synapse/issues/12582))
|
||||
- Log status code of cancelled requests as 499 and avoid logging stack traces for them. ([\#12587](https://github.com/matrix-org/synapse/issues/12587), [\#12663](https://github.com/matrix-org/synapse/issues/12663))
|
||||
- Remove special-case for `twisted` logger from default log config. ([\#12589](https://github.com/matrix-org/synapse/issues/12589))
|
||||
- Use `getClientAddress` instead of the deprecated `getClientIP`. ([\#12599](https://github.com/matrix-org/synapse/issues/12599))
|
||||
- Add link to documentation in Grafana Dashboard. ([\#12602](https://github.com/matrix-org/synapse/issues/12602))
|
||||
- Reduce log spam when running multiple event persisters. ([\#12610](https://github.com/matrix-org/synapse/issues/12610))
|
||||
- Add extra debug logging to federation sender. ([\#12614](https://github.com/matrix-org/synapse/issues/12614))
|
||||
- Prevent remote homeservers from requesting local user device names by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616))
|
||||
- Add a consistency check on events which we read from the database. ([\#12620](https://github.com/matrix-org/synapse/issues/12620))
|
||||
- Remove use of the `constantly` library and switch to enums for `EventRedactBehaviour`. Contributed by @andrewdoh. ([\#12624](https://github.com/matrix-org/synapse/issues/12624))
|
||||
- Remove unused code related to receipts. ([\#12632](https://github.com/matrix-org/synapse/issues/12632))
|
||||
- Minor improvements to the scripts for running Synapse in worker mode under Complement. ([\#12637](https://github.com/matrix-org/synapse/issues/12637))
|
||||
- Move `pympler` back in to the `all` extras. ([\#12652](https://github.com/matrix-org/synapse/issues/12652))
|
||||
- Fix spelling of `M_UNRECOGNIZED` in comments. ([\#12665](https://github.com/matrix-org/synapse/issues/12665))
|
||||
- Release script: confirm the commit to be tagged before tagging. ([\#12556](https://github.com/matrix-org/synapse/issues/12556))
|
||||
- Fix a typo in the announcement text generated by the Synapse release development script. ([\#12612](https://github.com/matrix-org/synapse/issues/12612))
|
||||
|
||||
### Typechecking
|
||||
|
||||
- Fix scripts-dev to pass typechecking. ([\#12356](https://github.com/matrix-org/synapse/issues/12356))
|
||||
- Add some type hints to datastore. ([\#12485](https://github.com/matrix-org/synapse/issues/12485))
|
||||
- Remove unused `# type: ignore`s. ([\#12531](https://github.com/matrix-org/synapse/issues/12531))
|
||||
- Allow unused `# type: ignore` comments in bleeding edge CI jobs. ([\#12576](https://github.com/matrix-org/synapse/issues/12576))
|
||||
- Remove redundant lines of config from `mypy.ini`. ([\#12608](https://github.com/matrix-org/synapse/issues/12608))
|
||||
- Update to mypy 0.950. ([\#12650](https://github.com/matrix-org/synapse/issues/12650))
|
||||
- Use `Concatenate` to better annotate `_do_execute`. ([\#12666](https://github.com/matrix-org/synapse/issues/12666))
|
||||
- Use `ParamSpec` to refine type hints. ([\#12667](https://github.com/matrix-org/synapse/issues/12667))
|
||||
- Fix mypy against latest pillow stubs. ([\#12671](https://github.com/matrix-org/synapse/issues/12671))
|
||||
|
||||
Synapse 1.58.1 (2022-05-05)
|
||||
===========================
|
||||
|
||||
This patch release includes a fix to the Debian packages, installing the
|
||||
`systemd` and `cache_memory` extra package groups, which were incorrectly
|
||||
omitted in v1.58.0. This primarily prevented Synapse from starting
|
||||
when the `systemd.journal.JournalHandler` log handler was configured.
|
||||
See [#12631](https://github.com/matrix-org/synapse/issues/12631) for further information.
|
||||
|
||||
Otherwise, no significant changes since 1.58.0.
|
||||
The non-standard `m.login.jwt` login type has been removed from Synapse. It can be replaced with `org.matrix.login.jwt` for identical behaviour. This is only used if `jwt_config.enabled` is set to `true` in the configuration.
|
||||
|
||||
|
||||
Synapse 1.58.0 (2022-05-03)
|
||||
|
||||
1
changelog.d/12273.bugfix
Normal file
1
changelog.d/12273.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in Synapse v1.48.0 where latest thread reply provided failed to include the proper bundled aggregations.
|
||||
1
changelog.d/12356.misc
Normal file
1
changelog.d/12356.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix scripts-dev to pass typechecking.
|
||||
1
changelog.d/12406.feature
Normal file
1
changelog.d/12406.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add a module API to allow modules to change actions for existing push rules of local users.
|
||||
1
changelog.d/12480.misc
Normal file
1
changelog.d/12480.misc
Normal file
@@ -0,0 +1 @@
|
||||
Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time.
|
||||
1
changelog.d/12505.misc
Normal file
1
changelog.d/12505.misc
Normal file
@@ -0,0 +1 @@
|
||||
Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests.
|
||||
1
changelog.d/12526.feature
Normal file
1
changelog.d/12526.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid.
|
||||
1
changelog.d/12531.misc
Normal file
1
changelog.d/12531.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove unused `# type: ignore`s.
|
||||
1
changelog.d/12541.docker
Normal file
1
changelog.d/12541.docker
Normal file
@@ -0,0 +1 @@
|
||||
Explicitly opt-in to using [BuildKit-specific features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md) in the Dockerfile. This fixes issues with building images in some GitLab CI environments.
|
||||
1
changelog.d/12544.bugfix
Normal file
1
changelog.d/12544.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper.
|
||||
1
changelog.d/12556.misc
Normal file
1
changelog.d/12556.misc
Normal file
@@ -0,0 +1 @@
|
||||
Release script: confirm the commit to be tagged before tagging.
|
||||
1
changelog.d/12564.misc
Normal file
1
changelog.d/12564.misc
Normal file
@@ -0,0 +1 @@
|
||||
Consistently check if an object is a `frozendict`.
|
||||
1
changelog.d/12570.bugfix
Normal file
1
changelog.d/12570.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in Synapse 1.57 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation.
|
||||
1
changelog.d/12576.misc
Normal file
1
changelog.d/12576.misc
Normal file
@@ -0,0 +1 @@
|
||||
Allow unused `#type: ignore` comments in bleeding edge CI jobs.
|
||||
1
changelog.d/12579.doc
Normal file
1
changelog.d/12579.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add missing linebreak to pipx install instructions.
|
||||
1
changelog.d/12580.bugfix
Normal file
1
changelog.d/12580.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long standing bug where status codes would almost always get logged as 200!, irrespective of the actual status code, when clients disconnect before a request has finished processing.
|
||||
1
changelog.d/12581.misc
Normal file
1
changelog.d/12581.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve docstrings for the receipts store.
|
||||
1
changelog.d/12582.misc
Normal file
1
changelog.d/12582.misc
Normal file
@@ -0,0 +1 @@
|
||||
Use constants for read-receipts in tests.
|
||||
@@ -1 +0,0 @@
|
||||
Add ability to cancel disconnected requests to `SynapseRequest`.
|
||||
1
changelog.d/12589.misc
Normal file
1
changelog.d/12589.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove special-case for `twisted` logger from default log config.
|
||||
1
changelog.d/12594.bugfix
Normal file
1
changelog.d/12594.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix race when persisting an event and deleting a room that could lead to outbound federation breaking.
|
||||
1
changelog.d/12596.removal
Normal file
1
changelog.d/12596.removal
Normal file
@@ -0,0 +1 @@
|
||||
Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069).
|
||||
2
changelog.d/12597.removal
Normal file
2
changelog.d/12597.removal
Normal file
@@ -0,0 +1,2 @@
|
||||
Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from
|
||||
[MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778).
|
||||
1
changelog.d/12608.misc
Normal file
1
changelog.d/12608.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove redundant lines of config from `mypy.ini`.
|
||||
1
changelog.d/12612.bugfix
Normal file
1
changelog.d/12612.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a typo in the announcement text generated by the Synapse release development script.
|
||||
1
changelog.d/12613.removal
Normal file
1
changelog.d/12613.removal
Normal file
@@ -0,0 +1 @@
|
||||
Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk.
|
||||
1
changelog.d/12614.misc
Normal file
1
changelog.d/12614.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add extra debug logging to federation sender.
|
||||
1
changelog.d/12620.misc
Normal file
1
changelog.d/12620.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a consistency check on events which we read from the database.
|
||||
1
changelog.d/12624.misc
Normal file
1
changelog.d/12624.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove use of constantly library and switch to enums for EventRedactBehaviour. Contributed by @andrewdoh.
|
||||
1
changelog.d/12627.doc
Normal file
1
changelog.d/12627.doc
Normal file
@@ -0,0 +1 @@
|
||||
Fixes to the formatting of README.rst.
|
||||
@@ -1 +0,0 @@
|
||||
Add a helper class for testing request cancellation.
|
||||
@@ -1 +0,0 @@
|
||||
Improve documentation of the `synapse.push` module.
|
||||
@@ -1 +0,0 @@
|
||||
Refactor functions to on `PushRuleEvaluatorForEvent`.
|
||||
@@ -1 +0,0 @@
|
||||
Preparation for database schema simplifications: stop writing to `event_reference_hashes`.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room.
|
||||
@@ -1 +0,0 @@
|
||||
Refactor `EventContext` class.
|
||||
@@ -1 +0,0 @@
|
||||
Capture the `Deferred` for request cancellation in `_AsyncResource`.
|
||||
@@ -1 +0,0 @@
|
||||
Fixes an incorrect type hint for `Filter._check_event_relations`.
|
||||
@@ -66,18 +66,6 @@
|
||||
],
|
||||
"title": "Dashboards",
|
||||
"type": "dashboards"
|
||||
},
|
||||
{
|
||||
"asDropdown": false,
|
||||
"icon": "external link",
|
||||
"includeVars": false,
|
||||
"keepTime": false,
|
||||
"tags": [],
|
||||
"targetBlank": true,
|
||||
"title": "Synapse Documentation",
|
||||
"tooltip": "Open Documentation",
|
||||
"type": "link",
|
||||
"url": "https://matrix-org.github.io/synapse/latest/"
|
||||
}
|
||||
],
|
||||
"panels": [
|
||||
@@ -10901,4 +10889,4 @@
|
||||
"title": "Synapse",
|
||||
"uid": "000000012",
|
||||
"version": 100
|
||||
}
|
||||
}
|
||||
6
debian/build_virtualenv
vendored
6
debian/build_virtualenv
vendored
@@ -37,11 +37,7 @@ python3 -m venv "$TEMP_VENV"
|
||||
source "$TEMP_VENV/bin/activate"
|
||||
pip install -U pip
|
||||
pip install poetry==1.2.0b1
|
||||
poetry export \
|
||||
--extras all \
|
||||
--extras test \
|
||||
--extras systemd \
|
||||
-o exported_requirements.txt
|
||||
poetry export --extras all --extras test -o exported_requirements.txt
|
||||
deactivate
|
||||
rm -rf "$TEMP_VENV"
|
||||
|
||||
|
||||
17
debian/changelog
vendored
17
debian/changelog
vendored
@@ -1,20 +1,3 @@
|
||||
matrix-synapse-py3 (1.59.0~rc1) stable; urgency=medium
|
||||
|
||||
* Adjust how the `exported-requirements.txt` file is generated as part of
|
||||
the process of building these packages. This affects the package
|
||||
maintainers only; end-users are unaffected.
|
||||
* New Synapse release 1.59.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 10 May 2022 10:45:08 +0100
|
||||
|
||||
matrix-synapse-py3 (1.58.1) stable; urgency=medium
|
||||
|
||||
* Include python dependencies from the `systemd` and `cache_memory` extras package groups, which
|
||||
were incorrectly omitted from the 1.58.0 package.
|
||||
* New Synapse release 1.58.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 05 May 2022 14:58:23 +0100
|
||||
|
||||
matrix-synapse-py3 (1.58.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.58.0.
|
||||
|
||||
@@ -9,7 +9,7 @@ user=root
|
||||
files = /etc/supervisor/conf.d/*.conf
|
||||
|
||||
[program:nginx]
|
||||
command=/usr/local/bin/prefix-log /usr/sbin/nginx -g "daemon off;"
|
||||
command=/usr/sbin/nginx -g "daemon off;"
|
||||
priority=500
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
@@ -19,7 +19,7 @@ username=www-data
|
||||
autorestart=true
|
||||
|
||||
[program:redis]
|
||||
command=/usr/local/bin/prefix-log /usr/bin/redis-server /etc/redis/redis.conf --daemonize no
|
||||
command=/usr/bin/redis-server /etc/redis/redis.conf --daemonize no
|
||||
priority=1
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
@@ -29,7 +29,7 @@ username=redis
|
||||
autorestart=true
|
||||
|
||||
[program:synapse_main]
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
|
||||
command=/usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
|
||||
priority=10
|
||||
# Log startup failures to supervisord's stdout/err
|
||||
# Regular synapse logs will still go in the configured data directory
|
||||
|
||||
@@ -69,10 +69,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"worker_extra_conf": "enable_media_repo: true",
|
||||
},
|
||||
"appservice": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"app": "synapse.app.appservice",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"notify_appservices_from_worker": "appservice"},
|
||||
"shared_extra_conf": {"notify_appservices": False},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_sender": {
|
||||
|
||||
@@ -270,13 +270,13 @@ COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh
|
||||
To run a specific test file, you can pass the test name at the end of the command. The name passed comes from the naming structure in your Complement tests. If you're unsure of the name, you can do a full run and copy it from the test output:
|
||||
|
||||
```sh
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory
|
||||
```
|
||||
|
||||
To run a specific test, you can specify the whole name structure:
|
||||
|
||||
```sh
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory/parallel/Backfilled_historical_events_resolve_with_proper_state_in_correct_order
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -18,17 +18,6 @@ async def check_event_for_spam(event: "synapse.events.EventBase") -> Union[bool,
|
||||
|
||||
Called when receiving an event from a client or via federation. The callback must return
|
||||
either:
|
||||
- on `Decision.ALLOW`, the action is permitted.
|
||||
- on `Decision.DENY`, the action is rejected with a default error message/code.
|
||||
- on `Codes`, the action is rejected with a specific error message/code. In case
|
||||
of doubt, use `Codes.FORBIDDEN`.
|
||||
- (deprecated) on `False`, behave as `Decision.ALLOW`. Deprecated as methods in
|
||||
this API are inconsistent, some expect `True` for `ALLOW` and others `True`
|
||||
for `DENY`.
|
||||
- (deprecated) on `True`, behave as `Decision.DENY`. Deprecated as methods in
|
||||
this API are inconsistent, some expect `True` for `ALLOW` and others `True`
|
||||
for `DENY`.
|
||||
|
||||
- an error message string, to indicate the event must be rejected because of spam and
|
||||
give a rejection reason to forward to clients;
|
||||
- the boolean `True`, to indicate that the event is spammy, but not provide further details; or
|
||||
|
||||
@@ -35,8 +35,3 @@ See [the TCP replication documentation](tcp_replication.md).
|
||||
There are read-only version of the synapse storage layer in
|
||||
`synapse/replication/slave/storage` that use the response of the
|
||||
replication API to invalidate their caches.
|
||||
|
||||
### The TCP Replication Module
|
||||
Information about how the tcp replication module is structured, including how
|
||||
the classes interact, can be found in
|
||||
`synapse/replication/tcp/__init__.py`
|
||||
|
||||
@@ -407,11 +407,6 @@ manhole_settings:
|
||||
# sign up in a short space of time never to return after their initial
|
||||
# session.
|
||||
#
|
||||
# The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but
|
||||
# applies a different trial number if the user was registered by an appservice.
|
||||
# A value of 0 means no trial days are applied. Appservices not listed in this
|
||||
# dictionary use the value of `mau_trial_days` instead.
|
||||
#
|
||||
# 'mau_limit_alerting' is a means of limiting client side alerting
|
||||
# should the mau limit be reached. This is useful for small instances
|
||||
# where the admin has 5 mau seats (say) for 5 specific people and no
|
||||
@@ -422,8 +417,6 @@ manhole_settings:
|
||||
#max_mau_value: 50
|
||||
#mau_trial_days: 2
|
||||
#mau_limit_alerting: false
|
||||
#mau_appservice_trial_days:
|
||||
# "appservice-id": 1
|
||||
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
@@ -716,11 +709,11 @@ retention:
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
# Uncomment to allow device display name lookup over federation. By default, the
|
||||
# Federation API prevents other homeservers from obtaining the display names of
|
||||
# user devices on this homeserver. Defaults to 'false'.
|
||||
# Uncomment to disable device display name lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain device display names of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_device_name_lookup_over_federation: true
|
||||
#allow_device_name_lookup_over_federation: false
|
||||
|
||||
|
||||
## Caching ##
|
||||
|
||||
@@ -89,50 +89,6 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.59.0
|
||||
|
||||
## Device name lookup over federation has been disabled by default
|
||||
|
||||
The names of user devices are no longer visible to users on other homeservers by default.
|
||||
Device IDs are unaffected, as these are necessary to facilitate end-to-end encryption.
|
||||
|
||||
To re-enable this functionality, set the
|
||||
[`allow_device_name_lookup_over_federation`](https://matrix-org.github.io/synapse/v1.59/usage/configuration/config_documentation.html#federation)
|
||||
homeserver config option to `true`.
|
||||
|
||||
|
||||
## Deprecation of the `synapse.app.appservice` and `synapse.app.user_dir` worker application types
|
||||
|
||||
The `synapse.app.appservice` worker application type allowed you to configure a
|
||||
single worker to use to notify application services of new events, as long
|
||||
as this functionality was disabled on the main process with `notify_appservices: False`.
|
||||
Further, the `synapse.app.user_dir` worker application type allowed you to configure
|
||||
a single worker to be responsible for updating the user directory, as long as this
|
||||
was disabled on the main process with `update_user_directory: False`.
|
||||
|
||||
To unify Synapse's worker types, the `synapse.app.appservice` worker application
|
||||
type and the `notify_appservices` configuration option have been deprecated.
|
||||
The `synapse.app.user_dir` worker application type and `update_user_directory`
|
||||
configuration option have also been deprecated.
|
||||
|
||||
To get the same functionality as was provided by the deprecated options, it's now recommended that the `synapse.app.generic_worker`
|
||||
worker application type is used and that the `notify_appservices_from_worker` and/or
|
||||
`update_user_directory_from_worker` options are set to the name of a worker.
|
||||
|
||||
For the time being, the old options can be used alongside the new options to make
|
||||
it easier to transition between the two configurations, however please note that:
|
||||
|
||||
- the options must not contradict each other (otherwise Synapse won't start); and
|
||||
- the `notify_appservices` and `update_user_directory` options will be removed in a future release of Synapse.
|
||||
|
||||
Please see the [*Notifying Application Services*][v1_59_notify_ases_from] and
|
||||
[*Updating the User Directory*][v1_59_update_user_dir] sections of the worker
|
||||
documentation for more information.
|
||||
|
||||
[v1_59_notify_ases_from]: workers.md#notifying-application-services
|
||||
[v1_59_update_user_dir]: workers.md#updating-the-user-directory
|
||||
|
||||
|
||||
# Upgrading to v1.58.0
|
||||
|
||||
## Groups/communities feature has been disabled by default
|
||||
@@ -140,7 +96,6 @@ documentation for more information.
|
||||
The non-standard groups/communities feature in Synapse has been disabled by default
|
||||
and will be removed in Synapse v1.61.0.
|
||||
|
||||
|
||||
# Upgrading to v1.57.0
|
||||
|
||||
## Changes to database schema for application services
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
## Some useful SQL queries for Synapse Admins
|
||||
|
||||
## Size of full matrix db
|
||||
```sql
|
||||
SELECT pg_size_pretty( pg_database_size( 'matrix' ) );
|
||||
```
|
||||
|
||||
`SELECT pg_size_pretty( pg_database_size( 'matrix' ) );`
|
||||
### Result example:
|
||||
```
|
||||
pg_size_pretty
|
||||
@@ -12,19 +9,39 @@ pg_size_pretty
|
||||
6420 MB
|
||||
(1 row)
|
||||
```
|
||||
## Show top 20 larger rooms by state events count
|
||||
```sql
|
||||
SELECT r.name, s.room_id, s.current_state_events
|
||||
FROM room_stats_current s
|
||||
LEFT JOIN room_stats_state r USING (room_id)
|
||||
ORDER BY current_state_events DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
and by state_group_events count:
|
||||
```sql
|
||||
SELECT rss.name, s.room_id, count(s.room_id) FROM state_groups_state s
|
||||
LEFT JOIN room_stats_state rss USING (room_id)
|
||||
GROUP BY s.room_id, rss.name
|
||||
ORDER BY count(s.room_id) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
plus same, but with join removed for performance reasons:
|
||||
```sql
|
||||
SELECT s.room_id, count(s.room_id) FROM state_groups_state s
|
||||
GROUP BY s.room_id
|
||||
ORDER BY count(s.room_id) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
## Show top 20 larger tables by row count
|
||||
```sql
|
||||
SELECT relname, n_live_tup AS "rows"
|
||||
FROM pg_stat_user_tables
|
||||
SELECT relname, n_live_tup as rows
|
||||
FROM pg_stat_user_tables
|
||||
ORDER BY n_live_tup DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
This query is quick, but may be very approximate, for exact number of rows use:
|
||||
```sql
|
||||
SELECT COUNT(*) FROM <table_name>;
|
||||
```
|
||||
|
||||
This query is quick, but may be very approximate, for exact number of rows use `SELECT COUNT(*) FROM <table_name>`.
|
||||
### Result example:
|
||||
```
|
||||
state_groups_state - 161687170
|
||||
@@ -49,19 +66,46 @@ device_lists_stream - 326903
|
||||
user_directory_search - 316433
|
||||
```
|
||||
|
||||
## Show top 20 rooms by new events count in last 1 day:
|
||||
```sql
|
||||
SELECT e.room_id, r.name, COUNT(e.event_id) cnt FROM events e
|
||||
LEFT JOIN room_stats_state r USING (room_id)
|
||||
WHERE e.origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 day') * 1000 GROUP BY e.room_id, r.name ORDER BY cnt DESC LIMIT 20;
|
||||
```
|
||||
|
||||
## Show top 20 users on homeserver by sent events (messages) at last month:
|
||||
```sql
|
||||
SELECT user_id, SUM(total_events)
|
||||
FROM user_stats_historical
|
||||
WHERE TO_TIMESTAMP(end_ts/1000) AT TIME ZONE 'UTC' > date_trunc('day', now() - interval '1 month')
|
||||
GROUP BY user_id
|
||||
ORDER BY SUM(total_events) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
## Show last 100 messages from needed user, with room names:
|
||||
```sql
|
||||
SELECT e.room_id, r.name, e.event_id, e.type, e.content, j.json FROM events e
|
||||
LEFT JOIN event_json j USING (room_id)
|
||||
LEFT JOIN room_stats_state r USING (room_id)
|
||||
WHERE sender = '@LOGIN:example.com'
|
||||
AND e.type = 'm.room.message'
|
||||
ORDER BY stream_ordering DESC
|
||||
LIMIT 100;
|
||||
```
|
||||
|
||||
## Show top 20 larger tables by storage size
|
||||
```sql
|
||||
SELECT nspname || '.' || relname AS "relation",
|
||||
pg_size_pretty(pg_total_relation_size(c.oid)) AS "total_size"
|
||||
FROM pg_class c
|
||||
LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace)
|
||||
pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size"
|
||||
FROM pg_class C
|
||||
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
|
||||
WHERE nspname NOT IN ('pg_catalog', 'information_schema')
|
||||
AND c.relkind <> 'i'
|
||||
AND C.relkind <> 'i'
|
||||
AND nspname !~ '^pg_toast'
|
||||
ORDER BY pg_total_relation_size(c.oid) DESC
|
||||
ORDER BY pg_total_relation_size(C.oid) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
### Result example:
|
||||
```
|
||||
public.state_groups_state - 27 GB
|
||||
@@ -86,93 +130,8 @@ public.device_lists_remote_cache - 124 MB
|
||||
public.state_group_edges - 122 MB
|
||||
```
|
||||
|
||||
## Show top 20 larger rooms by state events count
|
||||
You get the same information when you use the
|
||||
[admin API](../../admin_api/rooms.md#list-room-api)
|
||||
and set parameter `order_by=state_events`.
|
||||
|
||||
```sql
|
||||
SELECT r.name, s.room_id, s.current_state_events
|
||||
FROM room_stats_current s
|
||||
LEFT JOIN room_stats_state r USING (room_id)
|
||||
ORDER BY current_state_events DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
and by state_group_events count:
|
||||
```sql
|
||||
SELECT rss.name, s.room_id, COUNT(s.room_id)
|
||||
FROM state_groups_state s
|
||||
LEFT JOIN room_stats_state rss USING (room_id)
|
||||
GROUP BY s.room_id, rss.name
|
||||
ORDER BY COUNT(s.room_id) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
plus same, but with join removed for performance reasons:
|
||||
```sql
|
||||
SELECT s.room_id, COUNT(s.room_id)
|
||||
FROM state_groups_state s
|
||||
GROUP BY s.room_id
|
||||
ORDER BY COUNT(s.room_id) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
## Show top 20 rooms by new events count in last 1 day:
|
||||
```sql
|
||||
SELECT e.room_id, r.name, COUNT(e.event_id) cnt
|
||||
FROM events e
|
||||
LEFT JOIN room_stats_state r USING (room_id)
|
||||
WHERE e.origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 day') * 1000
|
||||
GROUP BY e.room_id, r.name
|
||||
ORDER BY cnt DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
## Show top 20 users on homeserver by sent events (messages) at last month:
|
||||
Caution. This query does not use any indexes, can be slow and create load on the database.
|
||||
```sql
|
||||
SELECT COUNT(*), sender
|
||||
FROM events
|
||||
WHERE (type = 'm.room.encrypted' OR type = 'm.room.message')
|
||||
AND origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 month') * 1000
|
||||
GROUP BY sender
|
||||
ORDER BY COUNT(*) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
## Show last 100 messages from needed user, with room names:
|
||||
```sql
|
||||
SELECT e.room_id, r.name, e.event_id, e.type, e.content, j.json
|
||||
FROM events e
|
||||
LEFT JOIN event_json j USING (room_id)
|
||||
LEFT JOIN room_stats_state r USING (room_id)
|
||||
WHERE sender = '@LOGIN:example.com'
|
||||
AND e.type = 'm.room.message'
|
||||
ORDER BY stream_ordering DESC
|
||||
LIMIT 100;
|
||||
```
|
||||
|
||||
## Show rooms with names, sorted by events in this rooms
|
||||
|
||||
**Sort and order with bash**
|
||||
```bash
|
||||
echo "SELECT event_json.room_id, room_stats_state.name FROM event_json, room_stats_state \
|
||||
WHERE room_stats_state.room_id = event_json.room_id" | psql -d synapse -h localhost -U synapse_user -t \
|
||||
| sort | uniq -c | sort -n
|
||||
```
|
||||
Documentation for `psql` command line parameters: https://www.postgresql.org/docs/current/app-psql.html
|
||||
|
||||
**Sort and order with SQL**
|
||||
```sql
|
||||
SELECT COUNT(*), event_json.room_id, room_stats_state.name
|
||||
FROM event_json, room_stats_state
|
||||
WHERE room_stats_state.room_id = event_json.room_id
|
||||
GROUP BY event_json.room_id, room_stats_state.name
|
||||
ORDER BY COUNT(*) DESC
|
||||
LIMIT 50;
|
||||
```
|
||||
|
||||
`echo "select event_json.room_id,room_stats_state.name from event_json,room_stats_state where room_stats_state.room_id=event_json.room_id" | psql synapse | sort | uniq -c | sort -n`
|
||||
### Result example:
|
||||
```
|
||||
9459 !FPUfgzXYWTKgIrwKxW:matrix.org | This Week in Matrix
|
||||
@@ -186,22 +145,12 @@ SELECT COUNT(*), event_json.room_id, room_stats_state.name
|
||||
```
|
||||
|
||||
## Lookup room state info by list of room_id
|
||||
You get the same information when you use the
|
||||
[admin API](../../admin_api/rooms.md#room-details-api).
|
||||
```sql
|
||||
SELECT rss.room_id, rss.name, rss.canonical_alias, rss.topic, rss.encryption,
|
||||
rsc.joined_members, rsc.local_users_in_room, rss.join_rules
|
||||
FROM room_stats_state rss
|
||||
LEFT JOIN room_stats_current rsc USING (room_id)
|
||||
WHERE room_id IN ( WHERE room_id IN (
|
||||
'!OGEhHVWSdvArJzumhm:matrix.org',
|
||||
'!YTvKGNlinIzlkMTVRl:matrix.org'
|
||||
);
|
||||
```
|
||||
|
||||
## Show users and devices that have not been online for a while
|
||||
```sql
|
||||
SELECT user_id, device_id, user_agent, TO_TIMESTAMP(last_seen / 1000) AS "last_seen"
|
||||
FROM devices
|
||||
WHERE last_seen < DATE_PART('epoch', NOW() - INTERVAL '3 month') * 1000;
|
||||
```
|
||||
SELECT rss.room_id, rss.name, rss.canonical_alias, rss.topic, rss.encryption, rsc.joined_members, rsc.local_users_in_room, rss.join_rules
|
||||
FROM room_stats_state rss
|
||||
LEFT JOIN room_stats_current rsc USING (room_id)
|
||||
WHERE room_id IN (WHERE room_id IN (
|
||||
'!OGEhHVWSdvArJzumhm:matrix.org',
|
||||
'!YTvKGNlinIzlkMTVRl:matrix.org'
|
||||
)
|
||||
```
|
||||
@@ -627,20 +627,6 @@ Example configuration:
|
||||
mau_trial_days: 5
|
||||
```
|
||||
---
|
||||
Config option: `mau_appservice_trial_days`
|
||||
|
||||
The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but applies a different
|
||||
trial number if the user was registered by an appservice. A value
|
||||
of 0 means no trial days are applied. Appservices not listed in this dictionary
|
||||
use the value of `mau_trial_days` instead.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
mau_appservice_trial_days:
|
||||
my_appservice_id: 3
|
||||
another_appservice_id: 6
|
||||
```
|
||||
---
|
||||
Config option: `mau_limit_alerting`
|
||||
|
||||
The option `mau_limit_alerting` is a means of limiting client-side alerting
|
||||
@@ -1049,13 +1035,13 @@ allow_profile_lookup_over_federation: false
|
||||
---
|
||||
Config option: `allow_device_name_lookup_over_federation`
|
||||
|
||||
Set this option to true to allow device display name lookup over federation. By default, the
|
||||
Federation API prevents other homeservers from obtaining the display names of any user devices
|
||||
Set this option to false to disable device display name lookup over federation. By default, the
|
||||
Federation API allows other homeservers to obtain device display names of any user
|
||||
on this homeserver.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
allow_device_name_lookup_over_federation: true
|
||||
allow_device_name_lookup_over_federation: false
|
||||
```
|
||||
---
|
||||
## Caching ##
|
||||
|
||||
@@ -426,7 +426,7 @@ the shared configuration would include:
|
||||
run_background_tasks_on: background_worker
|
||||
```
|
||||
|
||||
You might also wish to investigate the `update_user_directory_from_worker` and
|
||||
You might also wish to investigate the `update_user_directory` and
|
||||
`media_instance_running_background_jobs` settings.
|
||||
|
||||
An example for a dedicated background worker instance:
|
||||
@@ -435,40 +435,6 @@ An example for a dedicated background worker instance:
|
||||
{{#include systemd-with-workers/workers/background_worker.yaml}}
|
||||
```
|
||||
|
||||
#### Updating the User Directory
|
||||
|
||||
You can designate one generic worker to update the user directory.
|
||||
|
||||
Specify its name in the shared configuration as follows:
|
||||
|
||||
```yaml
|
||||
update_user_directory_from_worker: worker_name
|
||||
```
|
||||
|
||||
This work cannot be load-balanced; please ensure the main process is restarted
|
||||
after setting this option in the shared configuration!
|
||||
|
||||
This style of configuration supersedes the legacy `synapse.app.user_dir`
|
||||
worker application type.
|
||||
|
||||
|
||||
#### Notifying Application Services
|
||||
|
||||
You can designate one generic worker to send output traffic to Application Services.
|
||||
|
||||
Specify its name in the shared configuration as follows:
|
||||
|
||||
```yaml
|
||||
notify_appservices_from_worker: worker_name
|
||||
```
|
||||
|
||||
This work cannot be load-balanced; please ensure the main process is restarted
|
||||
after setting this option in the shared configuration!
|
||||
|
||||
This style of configuration supersedes the legacy `synapse.app.appservice`
|
||||
worker application type.
|
||||
|
||||
|
||||
### `synapse.app.pusher`
|
||||
|
||||
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||
@@ -487,9 +453,6 @@ pusher_instances:
|
||||
|
||||
### `synapse.app.appservice`
|
||||
|
||||
**Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the
|
||||
`notify_appservices_from_worker` option instead.](#notifying-application-services)
|
||||
|
||||
Handles sending output traffic to Application Services. Doesn't handle any
|
||||
REST endpoints itself, but you should set `notify_appservices: False` in the
|
||||
shared configuration file to stop the main synapse sending appservice notifications.
|
||||
@@ -557,9 +520,6 @@ Note that if a reverse proxy is used , then `/_matrix/media/` must be routed for
|
||||
|
||||
### `synapse.app.user_dir`
|
||||
|
||||
**Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the
|
||||
`update_user_directory_from_worker` option instead.](#updating-the-user-directory)
|
||||
|
||||
Handles searches in the user directory. It can handle REST endpoints matching
|
||||
the following regular expressions:
|
||||
|
||||
|
||||
72
poetry.lock
generated
72
poetry.lock
generated
@@ -572,7 +572,7 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "mypy"
|
||||
version = "0.950"
|
||||
version = "0.931"
|
||||
description = "Optional static typing for Python"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -580,14 +580,13 @@ python-versions = ">=3.6"
|
||||
|
||||
[package.dependencies]
|
||||
mypy-extensions = ">=0.4.3"
|
||||
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
|
||||
tomli = ">=1.1.0"
|
||||
typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""}
|
||||
typing-extensions = ">=3.10"
|
||||
|
||||
[package.extras]
|
||||
dmypy = ["psutil (>=4.0)"]
|
||||
python2 = ["typed-ast (>=1.4.0,<2)"]
|
||||
reports = ["lxml"]
|
||||
|
||||
[[package]]
|
||||
name = "mypy-extensions"
|
||||
@@ -599,14 +598,14 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "mypy-zope"
|
||||
version = "0.3.7"
|
||||
version = "0.3.5"
|
||||
description = "Plugin for mypy to support zope interfaces"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[package.dependencies]
|
||||
mypy = "0.950"
|
||||
mypy = "0.931"
|
||||
"zope.interface" = "*"
|
||||
"zope.schema" = "*"
|
||||
|
||||
@@ -1021,7 +1020,7 @@ jeepney = ">=0.6"
|
||||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "1.5.11"
|
||||
version = "1.5.7"
|
||||
description = "Python client for Sentry (https://sentry.io)"
|
||||
category = "main"
|
||||
optional = true
|
||||
@@ -1371,7 +1370,7 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-pillow"
|
||||
version = "9.0.15"
|
||||
version = "9.0.6"
|
||||
description = "Typing stubs for Pillow"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -1546,7 +1545,7 @@ docs = ["sphinx", "repoze.sphinx.autointerface"]
|
||||
test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
|
||||
|
||||
[extras]
|
||||
all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "pyjwt", "txredisapi", "hiredis", "Pympler"]
|
||||
all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "pyjwt", "txredisapi", "hiredis"]
|
||||
cache_memory = ["Pympler"]
|
||||
jwt = ["pyjwt"]
|
||||
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
|
||||
@@ -1563,7 +1562,7 @@ url_preview = ["lxml"]
|
||||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = "^3.7.1"
|
||||
content-hash = "d39d5ac5d51c014581186b7691999b861058b569084c525523baf70b77f292b1"
|
||||
content-hash = "2bda1a7cfc8cc02832b4a7d16bf7e1615cb05e0639bdb30688aadf692d851942"
|
||||
|
||||
[metadata.files]
|
||||
attrs = [
|
||||
@@ -2090,37 +2089,34 @@ msgpack = [
|
||||
{file = "msgpack-1.0.3.tar.gz", hash = "sha256:51fdc7fb93615286428ee7758cecc2f374d5ff363bdd884c7ea622a7a327a81e"},
|
||||
]
|
||||
mypy = [
|
||||
{file = "mypy-0.950-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cf9c261958a769a3bd38c3e133801ebcd284ffb734ea12d01457cb09eacf7d7b"},
|
||||
{file = "mypy-0.950-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5b5bd0ffb11b4aba2bb6d31b8643902c48f990cc92fda4e21afac658044f0c0"},
|
||||
{file = "mypy-0.950-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5e7647df0f8fc947388e6251d728189cfadb3b1e558407f93254e35abc026e22"},
|
||||
{file = "mypy-0.950-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eaff8156016487c1af5ffa5304c3e3fd183edcb412f3e9c72db349faf3f6e0eb"},
|
||||
{file = "mypy-0.950-cp310-cp310-win_amd64.whl", hash = "sha256:563514c7dc504698fb66bb1cf897657a173a496406f1866afae73ab5b3cdb334"},
|
||||
{file = "mypy-0.950-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dd4d670eee9610bf61c25c940e9ade2d0ed05eb44227275cce88701fee014b1f"},
|
||||
{file = "mypy-0.950-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca75ecf2783395ca3016a5e455cb322ba26b6d33b4b413fcdedfc632e67941dc"},
|
||||
{file = "mypy-0.950-cp36-cp36m-win_amd64.whl", hash = "sha256:6003de687c13196e8a1243a5e4bcce617d79b88f83ee6625437e335d89dfebe2"},
|
||||
{file = "mypy-0.950-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4c653e4846f287051599ed8f4b3c044b80e540e88feec76b11044ddc5612ffed"},
|
||||
{file = "mypy-0.950-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e19736af56947addedce4674c0971e5dceef1b5ec7d667fe86bcd2b07f8f9075"},
|
||||
{file = "mypy-0.950-cp37-cp37m-win_amd64.whl", hash = "sha256:ef7beb2a3582eb7a9f37beaf38a28acfd801988cde688760aea9e6cc4832b10b"},
|
||||
{file = "mypy-0.950-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0112752a6ff07230f9ec2f71b0d3d4e088a910fdce454fdb6553e83ed0eced7d"},
|
||||
{file = "mypy-0.950-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ee0a36edd332ed2c5208565ae6e3a7afc0eabb53f5327e281f2ef03a6bc7687a"},
|
||||
{file = "mypy-0.950-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77423570c04aca807508a492037abbd72b12a1fb25a385847d191cd50b2c9605"},
|
||||
{file = "mypy-0.950-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ce6a09042b6da16d773d2110e44f169683d8cc8687e79ec6d1181a72cb028d2"},
|
||||
{file = "mypy-0.950-cp38-cp38-win_amd64.whl", hash = "sha256:5b231afd6a6e951381b9ef09a1223b1feabe13625388db48a8690f8daa9b71ff"},
|
||||
{file = "mypy-0.950-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0384d9f3af49837baa92f559d3fa673e6d2652a16550a9ee07fc08c736f5e6f8"},
|
||||
{file = "mypy-0.950-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1fdeb0a0f64f2a874a4c1f5271f06e40e1e9779bf55f9567f149466fc7a55038"},
|
||||
{file = "mypy-0.950-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:61504b9a5ae166ba5ecfed9e93357fd51aa693d3d434b582a925338a2ff57fd2"},
|
||||
{file = "mypy-0.950-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a952b8bc0ae278fc6316e6384f67bb9a396eb30aced6ad034d3a76120ebcc519"},
|
||||
{file = "mypy-0.950-cp39-cp39-win_amd64.whl", hash = "sha256:eaea21d150fb26d7b4856766e7addcf929119dd19fc832b22e71d942835201ef"},
|
||||
{file = "mypy-0.950-py3-none-any.whl", hash = "sha256:a4d9898f46446bfb6405383b57b96737dcfd0a7f25b748e78ef3e8c576bba3cb"},
|
||||
{file = "mypy-0.950.tar.gz", hash = "sha256:1b333cfbca1762ff15808a0ef4f71b5d3eed8528b23ea1c3fb50543c867d68de"},
|
||||
{file = "mypy-0.931-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3c5b42d0815e15518b1f0990cff7a705805961613e701db60387e6fb663fe78a"},
|
||||
{file = "mypy-0.931-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c89702cac5b302f0c5d33b172d2b55b5df2bede3344a2fbed99ff96bddb2cf00"},
|
||||
{file = "mypy-0.931-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:300717a07ad09525401a508ef5d105e6b56646f7942eb92715a1c8d610149714"},
|
||||
{file = "mypy-0.931-cp310-cp310-win_amd64.whl", hash = "sha256:7b3f6f557ba4afc7f2ce6d3215d5db279bcf120b3cfd0add20a5d4f4abdae5bc"},
|
||||
{file = "mypy-0.931-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:1bf752559797c897cdd2c65f7b60c2b6969ffe458417b8d947b8340cc9cec08d"},
|
||||
{file = "mypy-0.931-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4365c60266b95a3f216a3047f1d8e3f895da6c7402e9e1ddfab96393122cc58d"},
|
||||
{file = "mypy-0.931-cp36-cp36m-win_amd64.whl", hash = "sha256:1b65714dc296a7991000b6ee59a35b3f550e0073411ac9d3202f6516621ba66c"},
|
||||
{file = "mypy-0.931-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e839191b8da5b4e5d805f940537efcaa13ea5dd98418f06dc585d2891d228cf0"},
|
||||
{file = "mypy-0.931-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:50c7346a46dc76a4ed88f3277d4959de8a2bd0a0fa47fa87a4cde36fe247ac05"},
|
||||
{file = "mypy-0.931-cp37-cp37m-win_amd64.whl", hash = "sha256:d8f1ff62f7a879c9fe5917b3f9eb93a79b78aad47b533911b853a757223f72e7"},
|
||||
{file = "mypy-0.931-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f9fe20d0872b26c4bba1c1be02c5340de1019530302cf2dcc85c7f9fc3252ae0"},
|
||||
{file = "mypy-0.931-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1b06268df7eb53a8feea99cbfff77a6e2b205e70bf31743e786678ef87ee8069"},
|
||||
{file = "mypy-0.931-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8c11003aaeaf7cc2d0f1bc101c1cc9454ec4cc9cb825aef3cafff8a5fdf4c799"},
|
||||
{file = "mypy-0.931-cp38-cp38-win_amd64.whl", hash = "sha256:d9d2b84b2007cea426e327d2483238f040c49405a6bf4074f605f0156c91a47a"},
|
||||
{file = "mypy-0.931-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ff3bf387c14c805ab1388185dd22d6b210824e164d4bb324b195ff34e322d166"},
|
||||
{file = "mypy-0.931-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5b56154f8c09427bae082b32275a21f500b24d93c88d69a5e82f3978018a0266"},
|
||||
{file = "mypy-0.931-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8ca7f8c4b1584d63c9a0f827c37ba7a47226c19a23a753d52e5b5eddb201afcd"},
|
||||
{file = "mypy-0.931-cp39-cp39-win_amd64.whl", hash = "sha256:74f7eccbfd436abe9c352ad9fb65872cc0f1f0a868e9d9c44db0893440f0c697"},
|
||||
{file = "mypy-0.931-py3-none-any.whl", hash = "sha256:1171f2e0859cfff2d366da2c7092b06130f232c636a3f7301e3feb8b41f6377d"},
|
||||
{file = "mypy-0.931.tar.gz", hash = "sha256:0038b21890867793581e4cb0d810829f5fd4441aa75796b53033af3aa30430ce"},
|
||||
]
|
||||
mypy-extensions = [
|
||||
{file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
|
||||
{file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
|
||||
]
|
||||
mypy-zope = [
|
||||
{file = "mypy-zope-0.3.7.tar.gz", hash = "sha256:9da171e78e8ef7ac8922c86af1a62f1b7f3244f121020bd94a2246bc3f33c605"},
|
||||
{file = "mypy_zope-0.3.7-py3-none-any.whl", hash = "sha256:9c7637d066e4d1bafa0651abc091c752009769098043b236446e6725be2bc9c2"},
|
||||
{file = "mypy-zope-0.3.5.tar.gz", hash = "sha256:489e7da1c2af887f2cfe3496995fc247f296512b495b57817edddda9d22308f3"},
|
||||
{file = "mypy_zope-0.3.5-py3-none-any.whl", hash = "sha256:3bd0cc9a3e5933b02931af4b214ba32a4f4ff98adb30c979ce733857db91a18b"},
|
||||
]
|
||||
netaddr = [
|
||||
{file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"},
|
||||
@@ -2390,8 +2386,8 @@ secretstorage = [
|
||||
{file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"},
|
||||
]
|
||||
sentry-sdk = [
|
||||
{file = "sentry-sdk-1.5.11.tar.gz", hash = "sha256:6c01d9d0b65935fd275adc120194737d1df317dce811e642cbf0394d0d37a007"},
|
||||
{file = "sentry_sdk-1.5.11-py2.py3-none-any.whl", hash = "sha256:c17179183cac614e900cbd048dab03f49a48e2820182ec686c25e7ce46f8548f"},
|
||||
{file = "sentry-sdk-1.5.7.tar.gz", hash = "sha256:aa52da941c56b5a76fd838f8e9e92a850bf893a9eb1e33ffce6c21431d07ee30"},
|
||||
{file = "sentry_sdk-1.5.7-py2.py3-none-any.whl", hash = "sha256:411a8495bd18cf13038e5749e4710beb4efa53da6351f67b4c2f307c2d9b6d49"},
|
||||
]
|
||||
service-identity = [
|
||||
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
|
||||
@@ -2626,8 +2622,8 @@ types-opentracing = [
|
||||
{file = "types_opentracing-2.4.7-py3-none-any.whl", hash = "sha256:861fb8103b07cf717f501dd400cb274ca9992552314d4d6c7a824b11a215e512"},
|
||||
]
|
||||
types-pillow = [
|
||||
{file = "types-Pillow-9.0.15.tar.gz", hash = "sha256:d2e385fe5c192e75970f18accce69f5c2a9f186f3feb578a9b91cd6fdf64211d"},
|
||||
{file = "types_Pillow-9.0.15-py3-none-any.whl", hash = "sha256:c9646595dfafdf8b63d4b1443292ead17ee0fc7b18a143e497b68e0ea2dc1eb6"},
|
||||
{file = "types-Pillow-9.0.6.tar.gz", hash = "sha256:79b350b1188c080c27558429f1e119e69c9f020b877a82df761d9283070e0185"},
|
||||
{file = "types_Pillow-9.0.6-py3-none-any.whl", hash = "sha256:bd1e0a844fc718398aa265bf50fcad550fc520cc54f80e5ffeb7b3226b3cc507"},
|
||||
]
|
||||
types-psycopg2 = [
|
||||
{file = "types-psycopg2-2.9.9.tar.gz", hash = "sha256:4f9d4d52eeb343dc00fd5ed4f1513a8a5c18efba0a072eb82706d15cf4f20a2e"},
|
||||
|
||||
@@ -54,7 +54,7 @@ skip_gitignore = true
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.59.0rc1"
|
||||
version = "1.58.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -142,10 +142,8 @@ netaddr = ">=0.7.18"
|
||||
# add a lower bound to the Jinja2 dependency.
|
||||
Jinja2 = ">=3.0"
|
||||
bleach = ">=1.4.3"
|
||||
# We use `ParamSpec` and `Concatenate`, which were added in `typing-extensions` 3.10.0.0.
|
||||
# Additionally we need https://github.com/python/typing/pull/817 to allow types to be
|
||||
# generic over ParamSpecs.
|
||||
typing-extensions = ">=3.10.0.1"
|
||||
# We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0.
|
||||
typing-extensions = ">=3.10.0"
|
||||
# We enforce that we have a `cryptography` version that bundles an `openssl`
|
||||
# with the latest security patches.
|
||||
cryptography = ">=3.4.7"
|
||||
@@ -233,11 +231,10 @@ all = [
|
||||
"jaeger-client", "opentracing",
|
||||
# jwt
|
||||
"pyjwt",
|
||||
# redis
|
||||
"txredisapi", "hiredis",
|
||||
# cache_memory
|
||||
"pympler",
|
||||
#redis
|
||||
"txredisapi", "hiredis"
|
||||
# omitted:
|
||||
# - cache_memory: this is an experimental option
|
||||
# - test: it's useful to have this separate from dev deps in the olddeps job
|
||||
# - systemd: this is a system-based requirement
|
||||
]
|
||||
@@ -251,8 +248,8 @@ flake8-bugbear = "==21.3.2"
|
||||
flake8 = "*"
|
||||
|
||||
# Typechecking
|
||||
mypy = "*"
|
||||
mypy-zope = "*"
|
||||
mypy = "==0.931"
|
||||
mypy-zope = "==0.3.5"
|
||||
types-bleach = ">=4.1.0"
|
||||
types-commonmark = ">=0.9.2"
|
||||
types-jsonschema = ">=3.2.0"
|
||||
|
||||
@@ -43,8 +43,6 @@ fi
|
||||
# Build the base Synapse image from the local checkout
|
||||
docker build -t matrixdotorg/synapse -f "docker/Dockerfile" .
|
||||
|
||||
extra_test_args=()
|
||||
|
||||
# If we're using workers, modify the docker files slightly.
|
||||
if [[ -n "$WORKERS" ]]; then
|
||||
# Build the workers docker image (from the base Synapse image).
|
||||
@@ -54,14 +52,7 @@ if [[ -n "$WORKERS" ]]; then
|
||||
COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile
|
||||
|
||||
# And provide some more configuration to complement.
|
||||
|
||||
# It can take quite a while to spin up a worker-mode Synapse for the first
|
||||
# time (the main problem is that we start 14 python processes for each test,
|
||||
# and complement likes to do two of them in parallel).
|
||||
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120
|
||||
|
||||
# ... and it takes longer than 10m to run the whole suite.
|
||||
extra_test_args+=("-timeout=60m")
|
||||
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=60
|
||||
else
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
COMPLEMENT_DOCKERFILE=Dockerfile
|
||||
@@ -73,4 +64,4 @@ docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERF
|
||||
# Run the tests!
|
||||
echo "Images built; running complement"
|
||||
cd "$COMPLEMENT_DIR"
|
||||
go test -v -tags synapse_blacklist,msc2716,msc3030,faster_joins -count=1 "${extra_test_args[@]}" "$@" ./tests/...
|
||||
go test -v -tags synapse_blacklist,msc2716,msc3030,faster_joins -count=1 "$@" ./tests/...
|
||||
|
||||
@@ -85,19 +85,12 @@ class SortedDict(Dict[_KT, _VT]):
|
||||
def popitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
||||
def peekitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
||||
def setdefault(self, key: _KT, default: Optional[_VT] = ...) -> _VT: ...
|
||||
# Mypy now reports the first overload as an error, because typeshed widened the type
|
||||
# of `__map` to its internal `_typeshed.SupportsKeysAndGetItem` type in
|
||||
# https://github.com/python/typeshed/pull/6653
|
||||
# Since sorteddicts don't change the signature of `update` from that of `dict`, we
|
||||
# let the stubs for `update` inherit from the stubs for `dict`. (I suspect we could
|
||||
# do the same for many othe methods.) We leave the stubs commented to better track
|
||||
# how this file has evolved from the original stubs.
|
||||
# @overload
|
||||
# def update(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
||||
# @overload
|
||||
# def update(self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ...
|
||||
# @overload
|
||||
# def update(self, **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def update(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def update(self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def update(self, **kwargs: _VT) -> None: ...
|
||||
def __reduce__(
|
||||
self,
|
||||
) -> Tuple[
|
||||
|
||||
@@ -187,7 +187,7 @@ class Auth:
|
||||
Once get_user_by_req has set up the opentracing span, this does the actual work.
|
||||
"""
|
||||
try:
|
||||
ip_addr = request.getClientAddress().host
|
||||
ip_addr = request.getClientIP()
|
||||
user_agent = get_request_user_agent(request)
|
||||
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
@@ -356,7 +356,7 @@ class Auth:
|
||||
return None, None, None
|
||||
|
||||
if app_service.ip_range_whitelist:
|
||||
ip_address = IPAddress(request.getClientAddress().host)
|
||||
ip_address = IPAddress(request.getClientIP())
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
return None, None, None
|
||||
|
||||
@@ -417,8 +417,7 @@ class Auth:
|
||||
"""
|
||||
|
||||
if rights == "access":
|
||||
# First look in the database to see if the access token is present
|
||||
# as an opaque token.
|
||||
# first look in the database
|
||||
r = await self.store.get_user_by_access_token(token)
|
||||
if r:
|
||||
valid_until_ms = r.valid_until_ms
|
||||
@@ -435,8 +434,7 @@ class Auth:
|
||||
|
||||
return r
|
||||
|
||||
# If the token isn't found in the database, then it could still be a
|
||||
# macaroon, so we check that here.
|
||||
# otherwise it needs to be a valid macaroon
|
||||
try:
|
||||
user_id, guest = self._parse_and_validate_macaroon(token, rights)
|
||||
|
||||
@@ -484,12 +482,8 @@ class Auth:
|
||||
TypeError,
|
||||
ValueError,
|
||||
) as e:
|
||||
logger.warning(
|
||||
"Invalid access token in auth: %s %s.",
|
||||
type(e),
|
||||
e,
|
||||
)
|
||||
raise InvalidClientTokenError("Invalid access token passed.")
|
||||
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
def _parse_and_validate_macaroon(
|
||||
self, token: str, rights: str = "access"
|
||||
@@ -510,7 +504,10 @@ class Auth:
|
||||
try:
|
||||
macaroon = pymacaroons.Macaroon.deserialize(token)
|
||||
except Exception: # deserialize can throw more-or-less anything
|
||||
# The access token doesn't look like a macaroon.
|
||||
# doesn't look like a macaroon: treat it as an opaque token which
|
||||
# must be in the database.
|
||||
# TODO: it would be nice to get rid of this, but apparently some
|
||||
# people use access tokens which aren't macaroons
|
||||
raise _InvalidMacaroonException()
|
||||
|
||||
try:
|
||||
|
||||
@@ -255,5 +255,7 @@ class GuestAccess:
|
||||
|
||||
class ReceiptTypes:
|
||||
READ: Final = "m.read"
|
||||
READ_PRIVATE: Final = "org.matrix.msc2285.read.private"
|
||||
FULLY_READ: Final = "m.fully_read"
|
||||
|
||||
|
||||
class ReadReceiptEventFields:
|
||||
MSC2285_HIDDEN: Final = "org.matrix.msc2285.hidden"
|
||||
|
||||
@@ -19,7 +19,6 @@ from typing import (
|
||||
TYPE_CHECKING,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
@@ -445,9 +444,9 @@ class Filter:
|
||||
return room_ids
|
||||
|
||||
async def _check_event_relations(
|
||||
self, events: Collection[FilterEvent]
|
||||
self, events: Iterable[FilterEvent]
|
||||
) -> List[FilterEvent]:
|
||||
# The event IDs to check, mypy doesn't understand the isinstance check.
|
||||
# The event IDs to check, mypy doesn't understand the ifinstance check.
|
||||
event_ids = [event.event_id for event in events if isinstance(event, EventBase)] # type: ignore[attr-defined]
|
||||
event_ids_to_keep = set(
|
||||
await self._store.events_have_relations(
|
||||
|
||||
@@ -38,7 +38,6 @@ from typing import (
|
||||
|
||||
from cryptography.utils import CryptographyDeprecationWarning
|
||||
from matrix_common.versionstring import get_distribution_version_string
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
import twisted
|
||||
from twisted.internet import defer, error, reactor as _reactor
|
||||
@@ -82,12 +81,11 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
# list of tuples of function, args list, kwargs dict
|
||||
_sighup_callbacks: List[
|
||||
Tuple[Callable[..., None], Tuple[object, ...], Dict[str, object]]
|
||||
Tuple[Callable[..., None], Tuple[Any, ...], Dict[str, Any]]
|
||||
] = []
|
||||
P = ParamSpec("P")
|
||||
|
||||
|
||||
def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs) -> None:
|
||||
def register_sighup(func: Callable[..., None], *args: Any, **kwargs: Any) -> None:
|
||||
"""
|
||||
Register a function to be called when a SIGHUP occurs.
|
||||
|
||||
@@ -95,9 +93,7 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs)
|
||||
func: Function to be called when sent a SIGHUP signal.
|
||||
*args, **kwargs: args and kwargs to be passed to the target function.
|
||||
"""
|
||||
# This type-ignore should be redundant once we use a mypy release with
|
||||
# https://github.com/python/mypy/pull/12668.
|
||||
_sighup_callbacks.append((func, args, kwargs)) # type: ignore[arg-type]
|
||||
_sighup_callbacks.append((func, args, kwargs))
|
||||
|
||||
|
||||
def start_worker_reactor(
|
||||
@@ -218,9 +214,7 @@ def redirect_stdio_to_logs() -> None:
|
||||
print("Redirected stdout/stderr to logs")
|
||||
|
||||
|
||||
def register_start(
|
||||
cb: Callable[P, Awaitable], *args: P.args, **kwargs: P.kwargs
|
||||
) -> None:
|
||||
def register_start(cb: Callable[..., Awaitable], *args: Any, **kwargs: Any) -> None:
|
||||
"""Register a callback with the reactor, to be called once it is running
|
||||
|
||||
This can be used to initialise parts of the system which require an asynchronous
|
||||
|
||||
@@ -210,7 +210,7 @@ def start(config_options: List[str]) -> None:
|
||||
config.logging.no_redirect_stdio = True
|
||||
|
||||
# Explicitly disable background processes
|
||||
config.worker.should_update_user_directory = False
|
||||
config.server.update_user_directory = False
|
||||
config.worker.run_background_tasks = False
|
||||
config.worker.start_pushers = False
|
||||
config.worker.pusher_shard_config.instances = []
|
||||
|
||||
@@ -441,6 +441,38 @@ def start(config_options: List[str]) -> None:
|
||||
"synapse.app.user_dir",
|
||||
)
|
||||
|
||||
if config.worker.worker_app == "synapse.app.appservice":
|
||||
if config.appservice.notify_appservices:
|
||||
sys.stderr.write(
|
||||
"\nThe appservices must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``notify_appservices: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the appservice to start since they will be disabled in the main config
|
||||
config.appservice.notify_appservices = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.appservice.notify_appservices = False
|
||||
|
||||
if config.worker.worker_app == "synapse.app.user_dir":
|
||||
if config.server.update_user_directory:
|
||||
sys.stderr.write(
|
||||
"\nThe update_user_directory must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``update_user_directory: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.server.update_user_directory = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.server.update_user_directory = False
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ import urllib.parse
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
|
||||
|
||||
from prometheus_client import Counter
|
||||
from typing_extensions import TypeGuard
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
@@ -67,7 +66,7 @@ def _is_valid_3pe_metadata(info: JsonDict) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def _is_valid_3pe_result(r: object, field: str) -> TypeGuard[JsonDict]:
|
||||
def _is_valid_3pe_result(r: JsonDict, field: str) -> bool:
|
||||
if not isinstance(r, dict):
|
||||
return False
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@ class AppServiceConfig(Config):
|
||||
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.app_service_config_files = config.get("app_service_config_files", [])
|
||||
self.notify_appservices = config.get("notify_appservices", True)
|
||||
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
|
||||
|
||||
def generate_config_section(cls, **kwargs: Any) -> str:
|
||||
@@ -55,8 +56,7 @@ def load_appservices(
|
||||
) -> List[ApplicationService]:
|
||||
"""Returns a list of Application Services from the config files."""
|
||||
if not isinstance(config_files, list):
|
||||
# type-ignore: this function gets arbitrary json value; we do use this path.
|
||||
logger.warning("Expected %s to be a list of AS config files.", config_files) # type: ignore[unreachable]
|
||||
logger.warning("Expected %s to be a list of AS config files.", config_files)
|
||||
return []
|
||||
|
||||
# Dicts of value -> filename
|
||||
|
||||
@@ -32,7 +32,7 @@ class ExperimentalConfig(Config):
|
||||
# MSC2716 (importing historical messages)
|
||||
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
|
||||
|
||||
# MSC2285 (private read receipts)
|
||||
# MSC2285 (hidden read receipts)
|
||||
self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False)
|
||||
|
||||
# MSC3244 (room version capabilities)
|
||||
@@ -81,6 +81,3 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC2815 (allow room moderators to view redacted event content)
|
||||
self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)
|
||||
|
||||
# MSC3786 (Add a default push rule to ignore m.room.server_acl events)
|
||||
self.msc3786_enabled: bool = experimental.get("msc3786_enabled", False)
|
||||
|
||||
@@ -46,7 +46,7 @@ class FederationConfig(Config):
|
||||
)
|
||||
|
||||
self.allow_device_name_lookup_over_federation = config.get(
|
||||
"allow_device_name_lookup_over_federation", False
|
||||
"allow_device_name_lookup_over_federation", True
|
||||
)
|
||||
|
||||
def generate_config_section(self, **kwargs: Any) -> str:
|
||||
@@ -81,11 +81,11 @@ class FederationConfig(Config):
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
# Uncomment to allow device display name lookup over federation. By default, the
|
||||
# Federation API prevents other homeservers from obtaining the display names of
|
||||
# user devices on this homeserver. Defaults to 'false'.
|
||||
# Uncomment to disable device display name lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain device display names of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_device_name_lookup_over_federation: true
|
||||
#allow_device_name_lookup_over_federation: false
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -43,8 +43,8 @@ class RegistrationConfig(Config):
|
||||
self.registration_requires_token = config.get(
|
||||
"registration_requires_token", False
|
||||
)
|
||||
self.enable_registration_token_3pid_bypass = config.get(
|
||||
"enable_registration_token_3pid_bypass", False
|
||||
self.enable_registration_token_3pid_bypasss = config.get(
|
||||
"enable_registration_token_3pid_bypasss", False
|
||||
)
|
||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||
|
||||
|
||||
@@ -319,6 +319,10 @@ class ServerConfig(Config):
|
||||
self.presence_router_config,
|
||||
) = load_module(presence_router_config, ("presence", "presence_router"))
|
||||
|
||||
# Whether to update the user directory or not. This should be set to
|
||||
# false only if we are updating the user directory in a worker
|
||||
self.update_user_directory = config.get("update_user_directory", True)
|
||||
|
||||
# whether to enable the media repository endpoints. This should be set
|
||||
# to false if the media repository is running as a separate endpoint;
|
||||
# doing so ensures that we will not run cache cleanup jobs on the
|
||||
@@ -409,7 +413,6 @@ class ServerConfig(Config):
|
||||
)
|
||||
|
||||
self.mau_trial_days = config.get("mau_trial_days", 0)
|
||||
self.mau_appservice_trial_days = config.get("mau_appservice_trial_days", {})
|
||||
self.mau_limit_alerting = config.get("mau_limit_alerting", True)
|
||||
|
||||
# How long to keep redacted events in the database in unredacted form
|
||||
@@ -1102,11 +1105,6 @@ class ServerConfig(Config):
|
||||
# sign up in a short space of time never to return after their initial
|
||||
# session.
|
||||
#
|
||||
# The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but
|
||||
# applies a different trial number if the user was registered by an appservice.
|
||||
# A value of 0 means no trial days are applied. Appservices not listed in this
|
||||
# dictionary use the value of `mau_trial_days` instead.
|
||||
#
|
||||
# 'mau_limit_alerting' is a means of limiting client side alerting
|
||||
# should the mau limit be reached. This is useful for small instances
|
||||
# where the admin has 5 mau seats (say) for 5 specific people and no
|
||||
@@ -1117,8 +1115,6 @@ class ServerConfig(Config):
|
||||
#max_mau_value: 50
|
||||
#mau_trial_days: 2
|
||||
#mau_limit_alerting: false
|
||||
#mau_appservice_trial_days:
|
||||
# "appservice-id": 1
|
||||
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
|
||||
@@ -14,8 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
from typing import Any, Dict, List, Union
|
||||
from typing import Any, List, Union
|
||||
|
||||
import attr
|
||||
|
||||
@@ -43,13 +42,6 @@ synapse process before they can be run in a separate worker.
|
||||
Please add ``start_pushers: false`` to the main config
|
||||
"""
|
||||
|
||||
_DEPRECATED_WORKER_DUTY_OPTION_USED = """
|
||||
The '%s' configuration option is deprecated and will be removed in a future
|
||||
Synapse version. Please use ``%s: name_of_worker`` instead.
|
||||
"""
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
|
||||
"""Helper for allowing parsing a string or list of strings to a config
|
||||
@@ -304,112 +296,6 @@ class WorkerConfig(Config):
|
||||
self.worker_name is None and background_tasks_instance == "master"
|
||||
) or self.worker_name == background_tasks_instance
|
||||
|
||||
self.should_notify_appservices = self._should_this_worker_perform_duty(
|
||||
config,
|
||||
legacy_master_option_name="notify_appservices",
|
||||
legacy_worker_app_name="synapse.app.appservice",
|
||||
new_option_name="notify_appservices_from_worker",
|
||||
)
|
||||
|
||||
self.should_update_user_directory = self._should_this_worker_perform_duty(
|
||||
config,
|
||||
legacy_master_option_name="update_user_directory",
|
||||
legacy_worker_app_name="synapse.app.user_dir",
|
||||
new_option_name="update_user_directory_from_worker",
|
||||
)
|
||||
|
||||
def _should_this_worker_perform_duty(
|
||||
self,
|
||||
config: Dict[str, Any],
|
||||
legacy_master_option_name: str,
|
||||
legacy_worker_app_name: str,
|
||||
new_option_name: str,
|
||||
) -> bool:
|
||||
"""
|
||||
Figures out whether this worker should perform a certain duty.
|
||||
|
||||
This function is temporary and is only to deal with the complexity
|
||||
of allowing old, transitional and new configurations all at once.
|
||||
|
||||
Contradictions between the legacy and new part of a transitional configuration
|
||||
will lead to a ConfigError.
|
||||
|
||||
Parameters:
|
||||
config: The config dictionary
|
||||
legacy_master_option_name: The name of a legacy option, whose value is boolean,
|
||||
specifying whether it's the master that should handle a certain duty.
|
||||
e.g. "notify_appservices"
|
||||
legacy_worker_app_name: The name of a legacy Synapse worker application
|
||||
that would traditionally perform this duty.
|
||||
e.g. "synapse.app.appservice"
|
||||
new_option_name: The name of the new option, whose value is the name of a
|
||||
designated worker to perform the duty.
|
||||
e.g. "notify_appservices_from_worker"
|
||||
"""
|
||||
|
||||
# None means 'unspecified'; True means 'run here' and False means
|
||||
# 'don't run here'.
|
||||
new_option_should_run_here = None
|
||||
if new_option_name in config:
|
||||
designated_worker = config[new_option_name] or "master"
|
||||
new_option_should_run_here = (
|
||||
designated_worker == "master" and self.worker_name is None
|
||||
) or designated_worker == self.worker_name
|
||||
|
||||
legacy_option_should_run_here = None
|
||||
if legacy_master_option_name in config:
|
||||
run_on_master = bool(config[legacy_master_option_name])
|
||||
|
||||
legacy_option_should_run_here = (
|
||||
self.worker_name is None and run_on_master
|
||||
) or (self.worker_app == legacy_worker_app_name and not run_on_master)
|
||||
|
||||
# Suggest using the new option instead.
|
||||
logger.warning(
|
||||
_DEPRECATED_WORKER_DUTY_OPTION_USED,
|
||||
legacy_master_option_name,
|
||||
new_option_name,
|
||||
)
|
||||
|
||||
if self.worker_app == legacy_worker_app_name and config.get(
|
||||
legacy_master_option_name, True
|
||||
):
|
||||
# As an extra bit of complication, we need to check that the
|
||||
# specialised worker is only used if the legacy config says the
|
||||
# master isn't performing the duties.
|
||||
raise ConfigError(
|
||||
f"Cannot use deprecated worker app type '{legacy_worker_app_name}' whilst deprecated option '{legacy_master_option_name}' is not set to false.\n"
|
||||
f"Consider setting `worker_app: synapse.app.generic_worker` and using the '{new_option_name}' option instead.\n"
|
||||
f"The '{new_option_name}' option replaces '{legacy_master_option_name}'."
|
||||
)
|
||||
|
||||
if new_option_should_run_here is None and legacy_option_should_run_here is None:
|
||||
# Neither option specified; the fallback behaviour is to run on the main process
|
||||
return self.worker_name is None
|
||||
|
||||
if (
|
||||
new_option_should_run_here is not None
|
||||
and legacy_option_should_run_here is not None
|
||||
):
|
||||
# Both options specified; ensure they match!
|
||||
if new_option_should_run_here != legacy_option_should_run_here:
|
||||
update_worker_type = (
|
||||
" and set worker_app: synapse.app.generic_worker"
|
||||
if self.worker_app == legacy_worker_app_name
|
||||
else ""
|
||||
)
|
||||
# If the values conflict, we suggest the admin removes the legacy option
|
||||
# for simplicity.
|
||||
raise ConfigError(
|
||||
f"Conflicting configuration options: {legacy_master_option_name} (legacy), {new_option_name} (new).\n"
|
||||
f"Suggestion: remove {legacy_master_option_name}{update_worker_type}.\n"
|
||||
)
|
||||
|
||||
# We've already validated that these aren't conflicting; now just see if
|
||||
# either is True.
|
||||
# (By this point, these are either the same value or only one is not None.)
|
||||
return bool(new_option_should_run_here or legacy_option_should_run_here)
|
||||
|
||||
def generate_config_section(self, **kwargs: Any) -> str:
|
||||
return """\
|
||||
## Workers ##
|
||||
|
||||
@@ -22,16 +22,11 @@ from typing import (
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from twisted.internet.defer import CancelledError
|
||||
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -45,10 +40,6 @@ GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[Set[str], str]]]
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||
"""Wrapper that loads a presence router module configured using the old
|
||||
configuration, and registers the hooks they implement.
|
||||
@@ -72,15 +63,13 @@ def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||
|
||||
# All methods that the module provides should be async, but this wasn't enforced
|
||||
# in the old module system, so we wrap them if needed
|
||||
def async_wrapper(
|
||||
f: Optional[Callable[P, R]]
|
||||
) -> Optional[Callable[P, Awaitable[R]]]:
|
||||
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
|
||||
# f might be None if the callback isn't implemented by the module. In this
|
||||
# case we don't want to register a callback at all so we return None.
|
||||
if f is None:
|
||||
return None
|
||||
|
||||
def run(*args: P.args, **kwargs: P.kwargs) -> Awaitable[R]:
|
||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
@@ -91,7 +80,7 @@ def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||
return run
|
||||
|
||||
# Register the hooks through the module API.
|
||||
hooks: Dict[str, Optional[Callable[..., Any]]] = {
|
||||
hooks = {
|
||||
hook: async_wrapper(getattr(presence_router, hook, None))
|
||||
for hook in presence_router_methods
|
||||
}
|
||||
@@ -158,11 +147,7 @@ class PresenceRouter:
|
||||
# run all the callbacks for get_users_for_states and combine the results
|
||||
for callback in self._get_users_for_states_callbacks:
|
||||
try:
|
||||
# Note: result is an object here, because we don't trust modules to
|
||||
# return the types they're supposed to.
|
||||
result: object = await delay_cancellation(callback(state_updates))
|
||||
except CancelledError:
|
||||
raise
|
||||
result = await callback(state_updates)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
@@ -214,9 +199,7 @@ class PresenceRouter:
|
||||
# run all the callbacks for get_interested_users and combine the results
|
||||
for callback in self._get_interested_users_callbacks:
|
||||
try:
|
||||
result = await delay_cancellation(callback(user_id))
|
||||
except CancelledError:
|
||||
raise
|
||||
result = await callback(user_id)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
|
||||
@@ -15,10 +15,12 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
||||
|
||||
import attr
|
||||
from frozendict import frozendict
|
||||
from typing_extensions import Literal
|
||||
|
||||
from twisted.internet.defer import Deferred
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.types import JsonDict, StateMap
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -58,9 +60,6 @@ class EventContext:
|
||||
If ``state_group`` is None (ie, the event is an outlier),
|
||||
``state_group_before_event`` will always also be ``None``.
|
||||
|
||||
state_delta_due_to_event: If `state_group` and `state_group_before_event` are not None
|
||||
then this is the delta of the state between the two groups.
|
||||
|
||||
prev_group: If it is known, ``state_group``'s prev_group. Note that this being
|
||||
None does not necessarily mean that ``state_group`` does not have
|
||||
a prev_group!
|
||||
@@ -79,47 +78,73 @@ class EventContext:
|
||||
app_service: If this event is being sent by a (local) application service, that
|
||||
app service.
|
||||
|
||||
_current_state_ids: The room state map, including this event - ie, the state
|
||||
in ``state_group``.
|
||||
|
||||
(type, state_key) -> event_id
|
||||
|
||||
For an outlier, this is {}
|
||||
|
||||
Note that this is a private attribute: it should be accessed via
|
||||
``get_current_state_ids``. _AsyncEventContext impl calculates this
|
||||
on-demand: it will be None until that happens.
|
||||
|
||||
_prev_state_ids: The room state map, excluding this event - ie, the state
|
||||
in ``state_group_before_event``. For a non-state
|
||||
event, this will be the same as _current_state_events.
|
||||
|
||||
Note that it is a completely different thing to prev_group!
|
||||
|
||||
(type, state_key) -> event_id
|
||||
|
||||
For an outlier, this is {}
|
||||
|
||||
As with _current_state_ids, this is a private attribute. It should be
|
||||
accessed via get_prev_state_ids.
|
||||
|
||||
partial_state: if True, we may be storing this event with a temporary,
|
||||
incomplete state.
|
||||
"""
|
||||
|
||||
_storage: "Storage"
|
||||
rejected: Union[Literal[False], str] = False
|
||||
rejected: Union[bool, str] = False
|
||||
_state_group: Optional[int] = None
|
||||
state_group_before_event: Optional[int] = None
|
||||
_state_delta_due_to_event: Optional[StateMap[str]] = None
|
||||
prev_group: Optional[int] = None
|
||||
delta_ids: Optional[StateMap[str]] = None
|
||||
app_service: Optional[ApplicationService] = None
|
||||
|
||||
_current_state_ids: Optional[StateMap[str]] = None
|
||||
_prev_state_ids: Optional[StateMap[str]] = None
|
||||
|
||||
partial_state: bool = False
|
||||
|
||||
@staticmethod
|
||||
def with_state(
|
||||
storage: "Storage",
|
||||
state_group: Optional[int],
|
||||
state_group_before_event: Optional[int],
|
||||
state_delta_due_to_event: Optional[StateMap[str]],
|
||||
current_state_ids: Optional[StateMap[str]],
|
||||
prev_state_ids: Optional[StateMap[str]],
|
||||
partial_state: bool,
|
||||
prev_group: Optional[int] = None,
|
||||
delta_ids: Optional[StateMap[str]] = None,
|
||||
) -> "EventContext":
|
||||
return EventContext(
|
||||
storage=storage,
|
||||
current_state_ids=current_state_ids,
|
||||
prev_state_ids=prev_state_ids,
|
||||
state_group=state_group,
|
||||
state_group_before_event=state_group_before_event,
|
||||
state_delta_due_to_event=state_delta_due_to_event,
|
||||
prev_group=prev_group,
|
||||
delta_ids=delta_ids,
|
||||
partial_state=partial_state,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def for_outlier(
|
||||
storage: "Storage",
|
||||
) -> "EventContext":
|
||||
def for_outlier() -> "EventContext":
|
||||
"""Return an EventContext instance suitable for persisting an outlier event"""
|
||||
return EventContext(storage=storage)
|
||||
return EventContext(
|
||||
current_state_ids={},
|
||||
prev_state_ids={},
|
||||
)
|
||||
|
||||
async def serialize(self, event: EventBase, store: "DataStore") -> JsonDict:
|
||||
"""Converts self to a type that can be serialized as JSON, and then
|
||||
@@ -132,14 +157,24 @@ class EventContext:
|
||||
The serialized event.
|
||||
"""
|
||||
|
||||
# We don't serialize the full state dicts, instead they get pulled out
|
||||
# of the DB on the other side. However, the other side can't figure out
|
||||
# the prev_state_ids, so if we're a state event we include the event
|
||||
# id that we replaced in the state.
|
||||
if event.is_state():
|
||||
prev_state_ids = await self.get_prev_state_ids()
|
||||
prev_state_id = prev_state_ids.get((event.type, event.state_key))
|
||||
else:
|
||||
prev_state_id = None
|
||||
|
||||
return {
|
||||
"prev_state_id": prev_state_id,
|
||||
"event_type": event.type,
|
||||
"event_state_key": event.get_state_key(),
|
||||
"state_group": self._state_group,
|
||||
"state_group_before_event": self.state_group_before_event,
|
||||
"rejected": self.rejected,
|
||||
"prev_group": self.prev_group,
|
||||
"state_delta_due_to_event": _encode_state_dict(
|
||||
self._state_delta_due_to_event
|
||||
),
|
||||
"delta_ids": _encode_state_dict(self.delta_ids),
|
||||
"app_service_id": self.app_service.id if self.app_service else None,
|
||||
"partial_state": self.partial_state,
|
||||
@@ -157,16 +192,16 @@ class EventContext:
|
||||
Returns:
|
||||
The event context.
|
||||
"""
|
||||
context = EventContext(
|
||||
context = _AsyncEventContextImpl(
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
# current_state_ids out of the DB and construct prev_state_ids.
|
||||
storage=storage,
|
||||
prev_state_id=input["prev_state_id"],
|
||||
event_type=input["event_type"],
|
||||
event_state_key=input["event_state_key"],
|
||||
state_group=input["state_group"],
|
||||
state_group_before_event=input["state_group_before_event"],
|
||||
prev_group=input["prev_group"],
|
||||
state_delta_due_to_event=_decode_state_dict(
|
||||
input["state_delta_due_to_event"]
|
||||
),
|
||||
delta_ids=_decode_state_dict(input["delta_ids"]),
|
||||
rejected=input["rejected"],
|
||||
partial_state=input.get("partial_state", False),
|
||||
@@ -214,15 +249,8 @@ class EventContext:
|
||||
if self.rejected:
|
||||
raise RuntimeError("Attempt to access state_ids of rejected event")
|
||||
|
||||
assert self._state_delta_due_to_event is not None
|
||||
|
||||
prev_state_ids = await self.get_prev_state_ids()
|
||||
|
||||
if self._state_delta_due_to_event:
|
||||
prev_state_ids = dict(prev_state_ids)
|
||||
prev_state_ids.update(self._state_delta_due_to_event)
|
||||
|
||||
return prev_state_ids
|
||||
await self._ensure_fetched()
|
||||
return self._current_state_ids
|
||||
|
||||
async def get_prev_state_ids(self) -> StateMap[str]:
|
||||
"""
|
||||
@@ -237,10 +265,94 @@ class EventContext:
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
assert self.state_group_before_event is not None
|
||||
return await self._storage.state.get_state_ids_for_group(
|
||||
self.state_group_before_event
|
||||
await self._ensure_fetched()
|
||||
# There *should* be previous state IDs now.
|
||||
assert self._prev_state_ids is not None
|
||||
return self._prev_state_ids
|
||||
|
||||
def get_cached_current_state_ids(self) -> Optional[StateMap[str]]:
|
||||
"""Gets the current state IDs if we have them already cached.
|
||||
|
||||
It is an error to access this for a rejected event, since rejected state should
|
||||
not make it into the room state. This method will raise an exception if
|
||||
``rejected`` is set.
|
||||
|
||||
Returns:
|
||||
Returns None if we haven't cached the state or if state_group is None
|
||||
(which happens when the associated event is an outlier).
|
||||
|
||||
Otherwise, returns the the current state IDs.
|
||||
"""
|
||||
if self.rejected:
|
||||
raise RuntimeError("Attempt to access state_ids of rejected event")
|
||||
|
||||
return self._current_state_ids
|
||||
|
||||
async def _ensure_fetched(self) -> None:
|
||||
return None
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class _AsyncEventContextImpl(EventContext):
|
||||
"""
|
||||
An implementation of EventContext which fetches _current_state_ids and
|
||||
_prev_state_ids from the database on demand.
|
||||
|
||||
Attributes:
|
||||
|
||||
_storage
|
||||
|
||||
_fetching_state_deferred: Resolves when *_state_ids have been calculated.
|
||||
None if we haven't started calculating yet
|
||||
|
||||
_event_type: The type of the event the context is associated with.
|
||||
|
||||
_event_state_key: The state_key of the event the context is associated with.
|
||||
|
||||
_prev_state_id: If the event associated with the context is a state event,
|
||||
then `_prev_state_id` is the event_id of the state that was replaced.
|
||||
"""
|
||||
|
||||
# This needs to have a default as we're inheriting
|
||||
_storage: "Storage" = attr.ib(default=None)
|
||||
_prev_state_id: Optional[str] = attr.ib(default=None)
|
||||
_event_type: str = attr.ib(default=None)
|
||||
_event_state_key: Optional[str] = attr.ib(default=None)
|
||||
_fetching_state_deferred: Optional["Deferred[None]"] = attr.ib(default=None)
|
||||
|
||||
async def _ensure_fetched(self) -> None:
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(self._fill_out_state)
|
||||
|
||||
await make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
async def _fill_out_state(self) -> None:
|
||||
"""Called to populate the _current_state_ids and _prev_state_ids
|
||||
attributes by loading from the database.
|
||||
"""
|
||||
if self.state_group is None:
|
||||
# No state group means the event is an outlier. Usually the state_ids dicts are also
|
||||
# pre-set to empty dicts, but they get reset when the context is serialized, so set
|
||||
# them to empty dicts again here.
|
||||
self._current_state_ids = {}
|
||||
self._prev_state_ids = {}
|
||||
return
|
||||
|
||||
current_state_ids = await self._storage.state.get_state_ids_for_group(
|
||||
self.state_group
|
||||
)
|
||||
# Set this separately so mypy knows current_state_ids is not None.
|
||||
self._current_state_ids = current_state_ids
|
||||
if self._event_state_key is not None:
|
||||
self._prev_state_ids = dict(current_state_ids)
|
||||
|
||||
key = (self._event_type, self._event_state_key)
|
||||
if self._prev_state_id:
|
||||
self._prev_state_ids[key] = self._prev_state_id
|
||||
else:
|
||||
self._prev_state_ids.pop(key, None)
|
||||
else:
|
||||
self._prev_state_ids = current_state_ids
|
||||
|
||||
|
||||
def _encode_state_dict(
|
||||
|
||||
@@ -27,12 +27,11 @@ from typing import (
|
||||
Union,
|
||||
)
|
||||
|
||||
from synapse.api.errors import Codes
|
||||
from synapse.rest.media.v1._base import FileInfo
|
||||
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
|
||||
from synapse.spam_checker_api import ALLOW, Decision, RegistrationBehaviour
|
||||
from synapse.spam_checker_api import RegistrationBehaviour
|
||||
from synapse.types import RoomAlias, UserProfile
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import synapse.events
|
||||
@@ -40,34 +39,17 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
DEPRECATED_BOOL = bool
|
||||
|
||||
CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
|
||||
["synapse.events.EventBase"],
|
||||
Awaitable[Union[ALLOW, Codes, str, DEPRECATED_BOOL]],
|
||||
]
|
||||
USER_MAY_JOIN_ROOM_CALLBACK = Callable[
|
||||
[str, str, bool], Awaitable[Union[ALLOW, Codes, DEPRECATED_BOOL]]
|
||||
]
|
||||
USER_MAY_INVITE_CALLBACK = Callable[
|
||||
[str, str, str], Awaitable[Union[ALLOW, Codes, DEPRECATED_BOOL]]
|
||||
]
|
||||
USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[
|
||||
[str, str, str, str], Awaitable[Union[ALLOW, Codes, DEPRECATED_BOOL]]
|
||||
]
|
||||
USER_MAY_CREATE_ROOM_CALLBACK = Callable[
|
||||
[str], Awaitable[Union[ALLOW, Codes, DEPRECATED_BOOL]]
|
||||
]
|
||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[
|
||||
[str, RoomAlias], Awaitable[Union[ALLOW, Codes, DEPRECATED_BOOL]]
|
||||
]
|
||||
USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[
|
||||
[str, str], Awaitable[Union[ALLOW, Codes, DEPRECATED_BOOL]]
|
||||
]
|
||||
CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[
|
||||
[UserProfile], Awaitable[Union[ALLOW, Codes, DEPRECATED_BOOL]]
|
||||
Awaitable[Union[bool, str]],
|
||||
]
|
||||
USER_MAY_JOIN_ROOM_CALLBACK = Callable[[str, str, bool], Awaitable[bool]]
|
||||
USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]]
|
||||
USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[[str, str, str, str], Awaitable[bool]]
|
||||
USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]]
|
||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[[str, RoomAlias], Awaitable[bool]]
|
||||
USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]]
|
||||
CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[UserProfile], Awaitable[bool]]
|
||||
LEGACY_CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[
|
||||
[
|
||||
Optional[dict],
|
||||
@@ -83,11 +65,11 @@ CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[
|
||||
Collection[Tuple[str, str]],
|
||||
Optional[str],
|
||||
],
|
||||
Awaitable[Union[RegistrationBehaviour, Codes]],
|
||||
Awaitable[RegistrationBehaviour],
|
||||
]
|
||||
CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[
|
||||
[ReadableFileWrapper, FileInfo],
|
||||
Awaitable[Union[ALLOW, Codes, DEPRECATED_BOOL]],
|
||||
Awaitable[bool],
|
||||
]
|
||||
|
||||
|
||||
@@ -258,7 +240,7 @@ class SpamChecker:
|
||||
|
||||
async def check_event_for_spam(
|
||||
self, event: "synapse.events.EventBase"
|
||||
) -> Union[ALLOW, Codes, str]:
|
||||
) -> Union[bool, str]:
|
||||
"""Checks if a given event is considered "spammy" by this server.
|
||||
|
||||
If the server considers an event spammy, then it will be rejected if
|
||||
@@ -269,29 +251,19 @@ class SpamChecker:
|
||||
event: the event to be checked
|
||||
|
||||
Returns:
|
||||
- on `ALLOW`, the event is considered good (non-spammy) and should
|
||||
be let through. Other spamcheck filters may still reject it.
|
||||
- on `Codes`, the event is considered spammy and is rejected with a specific
|
||||
error message/code.
|
||||
- on `str`, the event is considered spammy and the string is used as error
|
||||
message.
|
||||
True or a string if the event is spammy. If a string is returned it
|
||||
will be used as the error message returned to the user.
|
||||
"""
|
||||
for callback in self._check_event_for_spam_callbacks:
|
||||
res: Union[ALLOW, Codes, str, DEPRECATED_BOOL] = await delay_cancellation(
|
||||
callback(event)
|
||||
)
|
||||
if res is False or res is ALLOW:
|
||||
continue
|
||||
elif res is True:
|
||||
return Codes.FORBIDDEN
|
||||
else:
|
||||
res: Union[bool, str] = await callback(event)
|
||||
if res:
|
||||
return res
|
||||
|
||||
return ALLOW
|
||||
return False
|
||||
|
||||
async def user_may_join_room(
|
||||
self, user_id: str, room_id: str, is_invited: bool
|
||||
) -> Decision:
|
||||
) -> bool:
|
||||
"""Checks if a given users is allowed to join a room.
|
||||
Not called when a user creates a room.
|
||||
|
||||
@@ -301,54 +273,42 @@ class SpamChecker:
|
||||
is_invited: Whether the user is invited into the room
|
||||
|
||||
Returns:
|
||||
- on `ALLOW`, the action is permitted.
|
||||
- on `Codes`, the action is rejected with a specific error message/code.
|
||||
Whether the user may join the room
|
||||
"""
|
||||
for callback in self._user_may_join_room_callbacks:
|
||||
may_join_room = await delay_cancellation(
|
||||
callback(user_id, room_id, is_invited)
|
||||
)
|
||||
if may_join_room is True or may_join_room is ALLOW:
|
||||
continue
|
||||
elif may_join_room is False:
|
||||
return Codes.FORBIDDEN
|
||||
else:
|
||||
return may_join_room
|
||||
if await callback(user_id, room_id, is_invited) is False:
|
||||
return False
|
||||
|
||||
return ALLOW
|
||||
return True
|
||||
|
||||
async def user_may_invite(
|
||||
self, inviter_userid: str, invitee_userid: str, room_id: str
|
||||
) -> Decision:
|
||||
) -> bool:
|
||||
"""Checks if a given user may send an invite
|
||||
|
||||
If this method returns false, the invite will be rejected.
|
||||
|
||||
Args:
|
||||
inviter_userid: The user ID of the sender of the invitation
|
||||
invitee_userid: The user ID targeted in the invitation
|
||||
room_id: The room ID
|
||||
|
||||
Returns:
|
||||
- on `ALLOW`, the action is permitted.
|
||||
- on `Codes`, the action is rejected with a specific error message/code.
|
||||
True if the user may send an invite, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_invite_callbacks:
|
||||
may_invite = await delay_cancellation(
|
||||
callback(inviter_userid, invitee_userid, room_id)
|
||||
)
|
||||
if may_invite is True or may_invite is ALLOW:
|
||||
continue
|
||||
elif may_invite is False:
|
||||
return Codes.FORBIDDEN
|
||||
else:
|
||||
return may_invite
|
||||
if await callback(inviter_userid, invitee_userid, room_id) is False:
|
||||
return False
|
||||
|
||||
return ALLOW
|
||||
return True
|
||||
|
||||
async def user_may_send_3pid_invite(
|
||||
self, inviter_userid: str, medium: str, address: str, room_id: str
|
||||
) -> Decision:
|
||||
) -> bool:
|
||||
"""Checks if a given user may invite a given threepid into the room
|
||||
|
||||
If this method returns false, the threepid invite will be rejected.
|
||||
|
||||
Note that if the threepid is already associated with a Matrix user ID, Synapse
|
||||
will call user_may_invite with said user ID instead.
|
||||
|
||||
@@ -359,94 +319,70 @@ class SpamChecker:
|
||||
room_id: The room ID
|
||||
|
||||
Returns:
|
||||
- on `ALLOW`, the action is permitted.
|
||||
- on `Codes`, the action is rejected with a specific error message/code.
|
||||
True if the user may send the invite, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_send_3pid_invite_callbacks:
|
||||
may_send_3pid_invite = await delay_cancellation(
|
||||
callback(inviter_userid, medium, address, room_id)
|
||||
)
|
||||
if may_send_3pid_invite is True or may_send_3pid_invite is ALLOW:
|
||||
continue
|
||||
elif may_send_3pid_invite is False:
|
||||
return Codes.FORBIDDEN
|
||||
else:
|
||||
return may_send_3pid_invite
|
||||
if await callback(inviter_userid, medium, address, room_id) is False:
|
||||
return False
|
||||
|
||||
return ALLOW
|
||||
return True
|
||||
|
||||
async def user_may_create_room(self, userid: str) -> Decision:
|
||||
async def user_may_create_room(self, userid: str) -> bool:
|
||||
"""Checks if a given user may create a room
|
||||
|
||||
If this method returns false, the creation request will be rejected.
|
||||
|
||||
Args:
|
||||
userid: The ID of the user attempting to create a room
|
||||
|
||||
Returns:
|
||||
- on `ALLOW`, the action is permitted.
|
||||
- on `Codes`, the action is rejected with a specific error message/code.
|
||||
True if the user may create a room, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_create_room_callbacks:
|
||||
may_create_room = await delay_cancellation(callback(userid))
|
||||
if may_create_room is True or may_create_room is ALLOW:
|
||||
continue
|
||||
elif may_create_room is False:
|
||||
return Codes.FORBIDDEN
|
||||
else:
|
||||
return may_create_room
|
||||
if await callback(userid) is False:
|
||||
return False
|
||||
|
||||
return ALLOW
|
||||
return True
|
||||
|
||||
async def user_may_create_room_alias(
|
||||
self, userid: str, room_alias: RoomAlias
|
||||
) -> Decision:
|
||||
) -> bool:
|
||||
"""Checks if a given user may create a room alias
|
||||
|
||||
If this method returns false, the association request will be rejected.
|
||||
|
||||
Args:
|
||||
userid: The ID of the user attempting to create a room alias
|
||||
room_alias: The alias to be created
|
||||
|
||||
Returns:
|
||||
- on `ALLOW`, the action is permitted.
|
||||
- on `Codes`, the action is rejected with a specific error message/code.
|
||||
True if the user may create a room alias, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_create_room_alias_callbacks:
|
||||
may_create_room_alias = await delay_cancellation(
|
||||
callback(userid, room_alias)
|
||||
)
|
||||
if may_create_room_alias is True or may_create_room_alias is ALLOW:
|
||||
continue
|
||||
elif may_create_room_alias is False:
|
||||
return Codes.FORBIDDEN
|
||||
else:
|
||||
return may_create_room_alias
|
||||
if await callback(userid, room_alias) is False:
|
||||
return False
|
||||
|
||||
return ALLOW
|
||||
return True
|
||||
|
||||
async def user_may_publish_room(
|
||||
self, userid: str, room_id: str
|
||||
) -> Union[ALLOW, Codes, DEPRECATED_BOOL]:
|
||||
async def user_may_publish_room(self, userid: str, room_id: str) -> bool:
|
||||
"""Checks if a given user may publish a room to the directory
|
||||
|
||||
If this method returns false, the publish request will be rejected.
|
||||
|
||||
Args:
|
||||
userid: The user ID attempting to publish the room
|
||||
room_id: The ID of the room that would be published
|
||||
|
||||
Returns:
|
||||
- on `ALLOW`, the action is permitted.
|
||||
- on `Codes`, the action is rejected with a specific error message/code.
|
||||
True if the user may publish the room, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_publish_room_callbacks:
|
||||
may_publish_room = await delay_cancellation(callback(userid, room_id))
|
||||
if may_publish_room is True or may_publish_room is ALLOW:
|
||||
continue
|
||||
elif may_publish_room is False:
|
||||
return Codes.FORBIDDEN
|
||||
else:
|
||||
return may_publish_room
|
||||
if await callback(userid, room_id) is False:
|
||||
return False
|
||||
|
||||
return ALLOW
|
||||
return True
|
||||
|
||||
async def check_username_for_spam(self, user_profile: UserProfile) -> Decision:
|
||||
async def check_username_for_spam(self, user_profile: UserProfile) -> bool:
|
||||
"""Checks if a user ID or display name are considered "spammy" by this server.
|
||||
|
||||
If the server considers a username spammy, then it will not be included in
|
||||
@@ -459,21 +395,15 @@ class SpamChecker:
|
||||
* avatar_url
|
||||
|
||||
Returns:
|
||||
- on `ALLOW`, the action is permitted.
|
||||
- on `Codes`, the action is rejected with a specific error message/code.
|
||||
True if the user is spammy.
|
||||
"""
|
||||
for callback in self._check_username_for_spam_callbacks:
|
||||
# Make a copy of the user profile object to ensure the spam checker cannot
|
||||
# modify it.
|
||||
is_spam = await delay_cancellation(callback(user_profile.copy()))
|
||||
if is_spam is False or is_spam is ALLOW:
|
||||
continue
|
||||
elif is_spam is True:
|
||||
return Codes.FORBIDDEN
|
||||
else:
|
||||
return is_spam
|
||||
if await callback(user_profile.copy()):
|
||||
return True
|
||||
|
||||
return ALLOW
|
||||
return False
|
||||
|
||||
async def check_registration_for_spam(
|
||||
self,
|
||||
@@ -498,11 +428,9 @@ class SpamChecker:
|
||||
"""
|
||||
|
||||
for callback in self._check_registration_for_spam_callbacks:
|
||||
behaviour = await delay_cancellation(
|
||||
behaviour = await (
|
||||
callback(email_threepid, username, request_info, auth_provider_id)
|
||||
)
|
||||
if isinstance(behaviour, Codes):
|
||||
return behaviour
|
||||
assert isinstance(behaviour, RegistrationBehaviour)
|
||||
if behaviour != RegistrationBehaviour.ALLOW:
|
||||
return behaviour
|
||||
@@ -511,7 +439,7 @@ class SpamChecker:
|
||||
|
||||
async def check_media_file_for_spam(
|
||||
self, file_wrapper: ReadableFileWrapper, file_info: FileInfo
|
||||
) -> Decision:
|
||||
) -> bool:
|
||||
"""Checks if a piece of newly uploaded media should be blocked.
|
||||
|
||||
This will be called for local uploads, downloads of remote media, each
|
||||
@@ -533,22 +461,19 @@ class SpamChecker:
|
||||
|
||||
return False
|
||||
|
||||
|
||||
Args:
|
||||
file: An object that allows reading the contents of the media.
|
||||
file_info: Metadata about the file.
|
||||
|
||||
Returns:
|
||||
- on `ALLOW`, the action is permitted.
|
||||
- on `Codes`, the action is rejected with a specific error message/code.
|
||||
True if the media should be blocked or False if it should be
|
||||
allowed.
|
||||
"""
|
||||
|
||||
for callback in self._check_media_file_for_spam_callbacks:
|
||||
is_spam = await delay_cancellation(callback(file_wrapper, file_info))
|
||||
if is_spam is False or is_spam is ALLOW:
|
||||
continue
|
||||
elif is_spam is True:
|
||||
return Codes.FORBIDDEN
|
||||
else:
|
||||
return is_spam
|
||||
spam = await callback(file_wrapper, file_info)
|
||||
if spam:
|
||||
return True
|
||||
|
||||
return ALLOW
|
||||
return False
|
||||
|
||||
@@ -14,14 +14,12 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple
|
||||
|
||||
from twisted.internet.defer import CancelledError
|
||||
|
||||
from synapse.api.errors import ModuleFailedException, SynapseError
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import Requester, StateMap
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -265,11 +263,7 @@ class ThirdPartyEventRules:
|
||||
|
||||
for callback in self._check_event_allowed_callbacks:
|
||||
try:
|
||||
res, replacement_data = await delay_cancellation(
|
||||
callback(event, state_events)
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
res, replacement_data = await callback(event, state_events)
|
||||
except SynapseError as e:
|
||||
# FIXME: Being able to throw SynapseErrors is relied upon by
|
||||
# some modules. PR #10386 accidentally broke this ability.
|
||||
@@ -339,13 +333,8 @@ class ThirdPartyEventRules:
|
||||
|
||||
for callback in self._check_threepid_can_be_invited_callbacks:
|
||||
try:
|
||||
threepid_can_be_invited = await delay_cancellation(
|
||||
callback(medium, address, state_events)
|
||||
)
|
||||
if threepid_can_be_invited is False:
|
||||
if await callback(medium, address, state_events) is False:
|
||||
return False
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
|
||||
@@ -372,13 +361,8 @@ class ThirdPartyEventRules:
|
||||
|
||||
for callback in self._check_visibility_can_be_modified_callbacks:
|
||||
try:
|
||||
visibility_can_be_modified = await delay_cancellation(
|
||||
callback(room_id, state_events, new_visibility)
|
||||
)
|
||||
if visibility_can_be_modified is False:
|
||||
if await callback(room_id, state_events, new_visibility) is False:
|
||||
return False
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
|
||||
@@ -416,11 +400,8 @@ class ThirdPartyEventRules:
|
||||
"""
|
||||
for callback in self._check_can_shutdown_room_callbacks:
|
||||
try:
|
||||
can_shutdown_room = await delay_cancellation(callback(user_id, room_id))
|
||||
if can_shutdown_room is False:
|
||||
if await callback(user_id, room_id) is False:
|
||||
return False
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to run module API callback %s: %s", callback, e
|
||||
@@ -441,13 +422,8 @@ class ThirdPartyEventRules:
|
||||
"""
|
||||
for callback in self._check_can_deactivate_user_callbacks:
|
||||
try:
|
||||
can_deactivate_user = await delay_cancellation(
|
||||
callback(user_id, by_admin)
|
||||
)
|
||||
if can_deactivate_user is False:
|
||||
if await callback(user_id, by_admin) is False:
|
||||
return False
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to run module API callback %s: %s", callback, e
|
||||
|
||||
@@ -22,7 +22,6 @@ from typing import (
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
Union,
|
||||
)
|
||||
@@ -581,20 +580,10 @@ class EventClientSerializer:
|
||||
]
|
||||
|
||||
|
||||
_PowerLevel = Union[str, int]
|
||||
|
||||
|
||||
def copy_and_fixup_power_levels_contents(
|
||||
old_power_levels: Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]]
|
||||
def copy_power_levels_contents(
|
||||
old_power_levels: Mapping[str, Union[int, Mapping[str, int]]]
|
||||
) -> Dict[str, Union[int, Dict[str, int]]]:
|
||||
"""Copy the content of a power_levels event, unfreezing frozendicts along the way.
|
||||
|
||||
We accept as input power level values which are strings, provided they represent an
|
||||
integer, e.g. `"`100"` instead of 100. Such strings are converted to integers
|
||||
in the returned dictionary (hence "fixup" in the function name).
|
||||
|
||||
Note that future room versions will outlaw such stringy power levels (see
|
||||
https://github.com/matrix-org/matrix-spec/issues/853).
|
||||
"""Copy the content of a power_levels event, unfreezing frozendicts along the way
|
||||
|
||||
Raises:
|
||||
TypeError if the input does not look like a valid power levels event content
|
||||
@@ -603,47 +592,29 @@ def copy_and_fixup_power_levels_contents(
|
||||
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
|
||||
|
||||
power_levels: Dict[str, Union[int, Dict[str, int]]] = {}
|
||||
|
||||
for k, v in old_power_levels.items():
|
||||
|
||||
if isinstance(v, int):
|
||||
power_levels[k] = v
|
||||
continue
|
||||
|
||||
if isinstance(v, collections.abc.Mapping):
|
||||
h: Dict[str, int] = {}
|
||||
power_levels[k] = h
|
||||
for k1, v1 in v.items():
|
||||
_copy_power_level_value_as_integer(v1, h, k1)
|
||||
# we should only have one level of nesting
|
||||
if not isinstance(v1, int):
|
||||
raise TypeError(
|
||||
"Invalid power_levels value for %s.%s: %r" % (k, k1, v1)
|
||||
)
|
||||
h[k1] = v1
|
||||
continue
|
||||
|
||||
else:
|
||||
_copy_power_level_value_as_integer(v, power_levels, k)
|
||||
raise TypeError("Invalid power_levels value for %s: %r" % (k, v))
|
||||
|
||||
return power_levels
|
||||
|
||||
|
||||
def _copy_power_level_value_as_integer(
|
||||
old_value: object,
|
||||
power_levels: MutableMapping[str, Any],
|
||||
key: str,
|
||||
) -> None:
|
||||
"""Set `power_levels[key]` to the integer represented by `old_value`.
|
||||
|
||||
:raises TypeError: if `old_value` is not an integer, nor a base-10 string
|
||||
representation of an integer.
|
||||
"""
|
||||
if isinstance(old_value, int):
|
||||
power_levels[key] = old_value
|
||||
return
|
||||
|
||||
if isinstance(old_value, str):
|
||||
try:
|
||||
parsed_value = int(old_value, base=10)
|
||||
except ValueError:
|
||||
# Fall through to the final TypeError.
|
||||
pass
|
||||
else:
|
||||
power_levels[key] = parsed_value
|
||||
return
|
||||
|
||||
raise TypeError(f"Invalid power_levels value for {key}: {old_value}")
|
||||
|
||||
|
||||
def validate_canonicaljson(value: Any) -> None:
|
||||
"""
|
||||
Ensure that the JSON object is valid according to the rules of canonical JSON.
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import EventFormatVersions, RoomVersion
|
||||
@@ -99,9 +98,9 @@ class FederationBase:
|
||||
)
|
||||
return redacted_event
|
||||
|
||||
spam_check = await self.spam_checker.check_event_for_spam(pdu)
|
||||
result = await self.spam_checker.check_event_for_spam(pdu)
|
||||
|
||||
if spam_check is not synapse.spam_checker_api.ALLOW:
|
||||
if result:
|
||||
logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
|
||||
# we redact (to save disk space) as well as soft-failing (to stop
|
||||
# using the event in prev_events).
|
||||
|
||||
@@ -618,7 +618,7 @@ class FederationClient(FederationBase):
|
||||
#
|
||||
# Dendrite returns a 404 (with a body of "404 page not found");
|
||||
# Conduit returns a 404 (with no body); and Synapse returns a 400
|
||||
# with M_UNRECOGNIZED.
|
||||
# with M_UNRECOGNISED.
|
||||
#
|
||||
# This needs to be rather specific as some endpoints truly do return 404
|
||||
# errors.
|
||||
@@ -1426,8 +1426,6 @@ class FederationClient(FederationBase):
|
||||
room = res.get("room")
|
||||
if not isinstance(room, dict):
|
||||
raise InvalidResponseError("'room' must be a dict")
|
||||
if room.get("room_id") != room_id:
|
||||
raise InvalidResponseError("wrong room returned in hierarchy response")
|
||||
|
||||
# Validate children_state of the room.
|
||||
children_state = room.pop("children_state", [])
|
||||
|
||||
@@ -23,7 +23,6 @@ from synapse.api.errors import AuthError, StoreError, SynapseError
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.types import UserID
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import delay_cancellation
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -151,7 +150,7 @@ class AccountValidityHandler:
|
||||
Whether the user has expired.
|
||||
"""
|
||||
for callback in self._is_user_expired_callbacks:
|
||||
expired = await delay_cancellation(callback(user_id))
|
||||
expired = await callback(user_id)
|
||||
if expired is not None:
|
||||
return expired
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ class ApplicationServicesHandler:
|
||||
self.scheduler = hs.get_application_service_scheduler()
|
||||
self.started_scheduler = False
|
||||
self.clock = hs.get_clock()
|
||||
self.notify_appservices = hs.config.worker.should_notify_appservices
|
||||
self.notify_appservices = hs.config.appservice.notify_appservices
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self._msc2409_to_device_messages_enabled = (
|
||||
hs.config.experimental.msc2409_to_device_messages_enabled
|
||||
|
||||
@@ -41,7 +41,6 @@ import pymacaroons
|
||||
import unpaddedbase64
|
||||
from pymacaroons.exceptions import MacaroonVerificationFailedException
|
||||
|
||||
from twisted.internet.defer import CancelledError
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.constants import LoginType
|
||||
@@ -68,7 +67,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
from synapse.util import stringutils as stringutils
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
from synapse.util.stringutils import base62_encode
|
||||
@@ -552,7 +551,7 @@ class AuthHandler:
|
||||
await self.store.set_ui_auth_clientdict(sid, clientdict)
|
||||
|
||||
user_agent = get_request_user_agent(request)
|
||||
clientip = request.getClientAddress().host
|
||||
clientip = request.getClientIP()
|
||||
|
||||
await self.store.add_user_agent_ip_to_ui_auth_session(
|
||||
session.session_id, user_agent, clientip
|
||||
@@ -2203,11 +2202,7 @@ class PasswordAuthProvider:
|
||||
# other than None (i.e. until a callback returns a success)
|
||||
for callback in self.auth_checker_callbacks[login_type]:
|
||||
try:
|
||||
result = await delay_cancellation(
|
||||
callback(username, login_type, login_dict)
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
result = await callback(username, login_type, login_dict)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
@@ -2268,9 +2263,7 @@ class PasswordAuthProvider:
|
||||
|
||||
for callback in self.check_3pid_auth_callbacks:
|
||||
try:
|
||||
result = await delay_cancellation(callback(medium, address, password))
|
||||
except CancelledError:
|
||||
raise
|
||||
result = await callback(medium, address, password)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
@@ -2352,7 +2345,7 @@ class PasswordAuthProvider:
|
||||
"""
|
||||
for callback in self.get_username_for_registration_callbacks:
|
||||
try:
|
||||
res = await delay_cancellation(callback(uia_results, params))
|
||||
res = await callback(uia_results, params)
|
||||
|
||||
if isinstance(res, str):
|
||||
return res
|
||||
@@ -2366,8 +2359,6 @@ class PasswordAuthProvider:
|
||||
callback,
|
||||
res,
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Module raised an exception in get_username_for_registration: %s",
|
||||
@@ -2397,7 +2388,7 @@ class PasswordAuthProvider:
|
||||
"""
|
||||
for callback in self.get_displayname_for_registration_callbacks:
|
||||
try:
|
||||
res = await delay_cancellation(callback(uia_results, params))
|
||||
res = await callback(uia_results, params)
|
||||
|
||||
if isinstance(res, str):
|
||||
return res
|
||||
@@ -2411,8 +2402,6 @@ class PasswordAuthProvider:
|
||||
callback,
|
||||
res,
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Module raised an exception in get_displayname_for_registration: %s",
|
||||
@@ -2440,7 +2429,7 @@ class PasswordAuthProvider:
|
||||
"""
|
||||
for callback in self.is_3pid_allowed_callbacks:
|
||||
try:
|
||||
res = await delay_cancellation(callback(medium, address, registration))
|
||||
res = await callback(medium, address, registration)
|
||||
|
||||
if res is False:
|
||||
return res
|
||||
@@ -2454,8 +2443,6 @@ class PasswordAuthProvider:
|
||||
callback,
|
||||
res,
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Module raised an exception in is_3pid_allowed: %s", e)
|
||||
raise SynapseError(code=500, msg="Internal Server Error")
|
||||
|
||||
@@ -16,7 +16,6 @@ import logging
|
||||
import string
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
@@ -138,13 +137,10 @@ class DirectoryHandler:
|
||||
403, "You must be in the room to create an alias for it"
|
||||
)
|
||||
|
||||
spam_check = await self.spam_checker.user_may_create_room_alias(
|
||||
if not await self.spam_checker.user_may_create_room_alias(
|
||||
user_id, room_alias
|
||||
)
|
||||
if spam_check is not synapse.spam_checker_api.ALLOW:
|
||||
raise AuthError(
|
||||
403, "This alias creation request has been rejected", spam_check
|
||||
)
|
||||
):
|
||||
raise AuthError(403, "This user is not permitted to create this alias")
|
||||
|
||||
if not self.config.roomdirectory.is_alias_creation_allowed(
|
||||
user_id, room_id, room_alias_str
|
||||
@@ -430,12 +426,9 @@ class DirectoryHandler:
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
spam_check = await self.spam_checker.user_may_publish_room(user_id, room_id)
|
||||
if spam_check is not synapse.spam_checker_api.ALLOW:
|
||||
if not await self.spam_checker.user_may_publish_room(user_id, room_id):
|
||||
raise AuthError(
|
||||
403,
|
||||
"This request to publish a room to the room list has been rejected",
|
||||
spam_check,
|
||||
403, "This user is not permitted to publish rooms to the room list"
|
||||
)
|
||||
|
||||
if requester.is_guest:
|
||||
|
||||
@@ -27,7 +27,6 @@ from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
import synapse
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import EventContentFields, EventTypes, Membership
|
||||
from synapse.api.errors import (
|
||||
@@ -660,7 +659,7 @@ class FederationHandler:
|
||||
# in the invitee's sync stream. It is stripped out for all other local users.
|
||||
event.unsigned["knock_room_state"] = stripped_room_state["knock_state_events"]
|
||||
|
||||
context = EventContext.for_outlier(self.storage)
|
||||
context = EventContext.for_outlier()
|
||||
stream_id = await self._federation_event_handler.persist_events_and_notify(
|
||||
event.room_id, [(event, context)]
|
||||
)
|
||||
@@ -800,14 +799,11 @@ class FederationHandler:
|
||||
if self.hs.config.server.block_non_admin_invites:
|
||||
raise SynapseError(403, "This server does not accept room invites")
|
||||
|
||||
spam_check = await self.spam_checker.user_may_invite(
|
||||
if not await self.spam_checker.user_may_invite(
|
||||
event.sender, event.state_key, event.room_id
|
||||
)
|
||||
if spam_check is not synapse.spam_checker_api.ALLOW:
|
||||
):
|
||||
raise SynapseError(
|
||||
403,
|
||||
"This user is not permitted to send invites to this server/user",
|
||||
spam_check,
|
||||
403, "This user is not permitted to send invites to this server/user"
|
||||
)
|
||||
|
||||
membership = event.content.get("membership")
|
||||
@@ -852,7 +848,7 @@ class FederationHandler:
|
||||
)
|
||||
)
|
||||
|
||||
context = EventContext.for_outlier(self.storage)
|
||||
context = EventContext.for_outlier()
|
||||
await self._federation_event_handler.persist_events_and_notify(
|
||||
event.room_id, [(event, context)]
|
||||
)
|
||||
@@ -881,7 +877,7 @@ class FederationHandler:
|
||||
|
||||
await self.federation_client.send_leave(host_list, event)
|
||||
|
||||
context = EventContext.for_outlier(self.storage)
|
||||
context = EventContext.for_outlier()
|
||||
stream_id = await self._federation_event_handler.persist_events_and_notify(
|
||||
event.room_id, [(event, context)]
|
||||
)
|
||||
|
||||
@@ -1423,7 +1423,7 @@ class FederationEventHandler:
|
||||
# we're not bothering about room state, so flag the event as an outlier.
|
||||
event.internal_metadata.outlier = True
|
||||
|
||||
context = EventContext.for_outlier(self._storage)
|
||||
context = EventContext.for_outlier()
|
||||
try:
|
||||
validate_event_for_room_version(room_version_obj, event)
|
||||
check_auth_rules_for_event(room_version_obj, event, auth)
|
||||
@@ -1874,10 +1874,10 @@ class FederationEventHandler:
|
||||
)
|
||||
|
||||
return EventContext.with_state(
|
||||
storage=self._storage,
|
||||
state_group=state_group,
|
||||
state_group_before_event=context.state_group_before_event,
|
||||
state_delta_due_to_event=state_updates,
|
||||
current_state_ids=current_state_ids,
|
||||
prev_state_ids=prev_state_ids,
|
||||
prev_group=prev_group,
|
||||
delta_ids=state_updates,
|
||||
partial_state=context.partial_state,
|
||||
|
||||
@@ -92,7 +92,7 @@ class IdentityHandler:
|
||||
"""
|
||||
|
||||
await self._3pid_validation_ratelimiter_ip.ratelimit(
|
||||
None, (medium, request.getClientAddress().host)
|
||||
None, (medium, request.getClientIP())
|
||||
)
|
||||
await self._3pid_validation_ratelimiter_address.ratelimit(
|
||||
None, (medium, address)
|
||||
|
||||
@@ -143,7 +143,7 @@ class InitialSyncHandler:
|
||||
to_key=int(now_token.receipt_key),
|
||||
)
|
||||
if self.hs.config.experimental.msc2285_enabled:
|
||||
receipt = ReceiptEventSource.filter_out_private(receipt, user_id)
|
||||
receipt = ReceiptEventSource.filter_out_hidden(receipt, user_id)
|
||||
|
||||
tags_by_room = await self.store.get_tags_for_user(user_id)
|
||||
|
||||
@@ -449,7 +449,7 @@ class InitialSyncHandler:
|
||||
if not receipts:
|
||||
return []
|
||||
if self.hs.config.experimental.msc2285_enabled:
|
||||
receipts = ReceiptEventSource.filter_out_private(receipts, user_id)
|
||||
receipts = ReceiptEventSource.filter_out_hidden(receipts, user_id)
|
||||
return receipts
|
||||
|
||||
presence, receipts, (messages, token) = await make_deferred_yieldable(
|
||||
|
||||
@@ -23,7 +23,6 @@ from canonicaljson import encode_canonical_json
|
||||
|
||||
from twisted.internet.interfaces import IDelayedCall
|
||||
|
||||
import synapse
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import (
|
||||
EventContentFields,
|
||||
@@ -758,10 +757,6 @@ class EventCreationHandler:
|
||||
The previous version of the event is returned, if it is found in the
|
||||
event context. Otherwise, None is returned.
|
||||
"""
|
||||
if event.internal_metadata.is_outlier():
|
||||
# This can happen due to out of band memberships
|
||||
return None
|
||||
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
prev_event_id = prev_state_ids.get((event.type, event.state_key))
|
||||
if not prev_event_id:
|
||||
@@ -882,11 +877,11 @@ class EventCreationHandler:
|
||||
event.sender,
|
||||
)
|
||||
|
||||
spam_check = await self.spam_checker.check_event_for_spam(event)
|
||||
if spam_check is not synapse.spam_checker_api.ALLOW:
|
||||
raise SynapseError(
|
||||
403, "This message had been rejected as probable spam", spam_check
|
||||
)
|
||||
spam_error = await self.spam_checker.check_event_for_spam(event)
|
||||
if spam_error:
|
||||
if not isinstance(spam_error, str):
|
||||
spam_error = "Spam is not permitted here"
|
||||
raise SynapseError(403, spam_error, Codes.FORBIDDEN)
|
||||
|
||||
ev = await self.handle_new_client_event(
|
||||
requester=requester,
|
||||
@@ -1006,7 +1001,7 @@ class EventCreationHandler:
|
||||
# after it is created
|
||||
if builder.internal_metadata.outlier:
|
||||
event.internal_metadata.outlier = True
|
||||
context = EventContext.for_outlier(self.storage)
|
||||
context = EventContext.for_outlier()
|
||||
elif (
|
||||
event.type == EventTypes.MSC2716_INSERTION
|
||||
and state_event_ids
|
||||
@@ -1432,7 +1427,7 @@ class EventCreationHandler:
|
||||
# Validate a newly added alias or newly added alt_aliases.
|
||||
|
||||
original_alias = None
|
||||
original_alt_aliases: object = []
|
||||
original_alt_aliases: List[str] = []
|
||||
|
||||
original_event_id = event.unsigned.get("replaces_state")
|
||||
if original_event_id:
|
||||
@@ -1460,7 +1455,6 @@ class EventCreationHandler:
|
||||
# If the old version of alt_aliases is of an unknown form,
|
||||
# completely replace it.
|
||||
if not isinstance(original_alt_aliases, (list, tuple)):
|
||||
# TODO: check that the original_alt_aliases' entries are all strings
|
||||
original_alt_aliases = []
|
||||
|
||||
# Check that each alias is currently valid.
|
||||
|
||||
@@ -448,7 +448,7 @@ class PaginationHandler:
|
||||
)
|
||||
# We expect `/messages` to use historic pagination tokens by default but
|
||||
# `/messages` should still works with live tokens when manually provided.
|
||||
assert from_token.room_key.topological is not None
|
||||
assert from_token.room_key.topological
|
||||
|
||||
if pagin_config.limit is None:
|
||||
# This shouldn't happen as we've set a default limit before this
|
||||
|
||||
@@ -659,28 +659,27 @@ class PresenceHandler(BasePresenceHandler):
|
||||
)
|
||||
|
||||
now = self.clock.time_msec()
|
||||
if self._presence_enabled:
|
||||
for state in self.user_to_current_state.values():
|
||||
self.wheel_timer.insert(
|
||||
now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER
|
||||
)
|
||||
for state in self.user_to_current_state.values():
|
||||
self.wheel_timer.insert(
|
||||
now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER
|
||||
)
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=state.user_id,
|
||||
then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
|
||||
)
|
||||
if self.is_mine_id(state.user_id):
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=state.user_id,
|
||||
then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
|
||||
then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL,
|
||||
)
|
||||
else:
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=state.user_id,
|
||||
then=state.last_federation_update_ts + FEDERATION_TIMEOUT,
|
||||
)
|
||||
if self.is_mine_id(state.user_id):
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=state.user_id,
|
||||
then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL,
|
||||
)
|
||||
else:
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=state.user_id,
|
||||
then=state.last_federation_update_ts + FEDERATION_TIMEOUT,
|
||||
)
|
||||
|
||||
# Set of users who have presence in the `user_to_current_state` that
|
||||
# have not yet been persisted
|
||||
@@ -805,13 +804,6 @@ class PresenceHandler(BasePresenceHandler):
|
||||
This is currently used to bump the max presence stream ID without changing any
|
||||
user's presence (see PresenceHandler.add_users_to_send_full_presence_to).
|
||||
"""
|
||||
if not self._presence_enabled:
|
||||
# We shouldn't get here if presence is disabled, but we check anyway
|
||||
# to ensure that we don't a) send out presence federation and b)
|
||||
# don't add things to the wheel timer that will never be handled.
|
||||
logger.warning("Tried to update presence states when presence is disabled")
|
||||
return
|
||||
|
||||
now = self.clock.time_msec()
|
||||
|
||||
with Measure(self.clock, "presence_update_states"):
|
||||
@@ -1237,10 +1229,6 @@ class PresenceHandler(BasePresenceHandler):
|
||||
):
|
||||
raise SynapseError(400, "Invalid presence state")
|
||||
|
||||
# If presence is disabled, no-op
|
||||
if not self.hs.config.server.use_presence:
|
||||
return
|
||||
|
||||
user_id = target_user.to_string()
|
||||
|
||||
prev_state = await self.current_state_for_user(user_id)
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
|
||||
|
||||
from synapse.api.constants import ReceiptTypes
|
||||
from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.streams import EventSource
|
||||
from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id
|
||||
@@ -112,7 +112,7 @@ class ReceiptsHandler:
|
||||
)
|
||||
|
||||
if not res:
|
||||
# res will be None if this receipt is 'old'
|
||||
# res will be None if this read receipt is 'old'
|
||||
continue
|
||||
|
||||
stream_id, max_persisted_id = res
|
||||
@@ -138,7 +138,7 @@ class ReceiptsHandler:
|
||||
return True
|
||||
|
||||
async def received_client_receipt(
|
||||
self, room_id: str, receipt_type: str, user_id: str, event_id: str
|
||||
self, room_id: str, receipt_type: str, user_id: str, event_id: str, hidden: bool
|
||||
) -> None:
|
||||
"""Called when a client tells us a local user has read up to the given
|
||||
event_id in the room.
|
||||
@@ -148,14 +148,16 @@ class ReceiptsHandler:
|
||||
receipt_type=receipt_type,
|
||||
user_id=user_id,
|
||||
event_ids=[event_id],
|
||||
data={"ts": int(self.clock.time_msec())},
|
||||
data={"ts": int(self.clock.time_msec()), "hidden": hidden},
|
||||
)
|
||||
|
||||
is_new = await self._handle_new_receipts([receipt])
|
||||
if not is_new:
|
||||
return
|
||||
|
||||
if self.federation_sender and receipt_type != ReceiptTypes.READ_PRIVATE:
|
||||
if self.federation_sender and not (
|
||||
self.hs.config.experimental.msc2285_enabled and hidden
|
||||
):
|
||||
await self.federation_sender.send_read_receipt(receipt)
|
||||
|
||||
|
||||
@@ -165,37 +167,46 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
|
||||
self.config = hs.config
|
||||
|
||||
@staticmethod
|
||||
def filter_out_private(events: List[JsonDict], user_id: str) -> List[JsonDict]:
|
||||
"""
|
||||
This method takes in what is returned by
|
||||
get_linearized_receipts_for_rooms() and goes through read receipts
|
||||
filtering out m.read.private receipts if they were not sent by the
|
||||
current user.
|
||||
"""
|
||||
|
||||
def filter_out_hidden(events: List[JsonDict], user_id: str) -> List[JsonDict]:
|
||||
visible_events = []
|
||||
|
||||
# filter out private receipts the user shouldn't see
|
||||
# filter out hidden receipts the user shouldn't see
|
||||
for event in events:
|
||||
content = event.get("content", {})
|
||||
new_event = event.copy()
|
||||
new_event["content"] = {}
|
||||
|
||||
for event_id, event_content in content.items():
|
||||
receipt_event = {}
|
||||
for receipt_type, receipt_content in event_content.items():
|
||||
if receipt_type == ReceiptTypes.READ_PRIVATE:
|
||||
user_rr = receipt_content.get(user_id, None)
|
||||
if user_rr:
|
||||
receipt_event[ReceiptTypes.READ_PRIVATE] = {
|
||||
user_id: user_rr.copy()
|
||||
}
|
||||
else:
|
||||
receipt_event[receipt_type] = receipt_content.copy()
|
||||
for event_id in content.keys():
|
||||
event_content = content.get(event_id, {})
|
||||
m_read = event_content.get(ReceiptTypes.READ, {})
|
||||
|
||||
# Only include the receipt event if it is non-empty.
|
||||
if receipt_event:
|
||||
new_event["content"][event_id] = receipt_event
|
||||
# If m_read is missing copy over the original event_content as there is nothing to process here
|
||||
if not m_read:
|
||||
new_event["content"][event_id] = event_content.copy()
|
||||
continue
|
||||
|
||||
new_users = {}
|
||||
for rr_user_id, user_rr in m_read.items():
|
||||
try:
|
||||
hidden = user_rr.get("hidden")
|
||||
except AttributeError:
|
||||
# Due to https://github.com/matrix-org/synapse/issues/10376
|
||||
# there are cases where user_rr is a string, in those cases
|
||||
# we just ignore the read receipt
|
||||
continue
|
||||
|
||||
if hidden is not True or rr_user_id == user_id:
|
||||
new_users[rr_user_id] = user_rr.copy()
|
||||
# If hidden has a value replace hidden with the correct prefixed key
|
||||
if hidden is not None:
|
||||
new_users[rr_user_id].pop("hidden")
|
||||
new_users[rr_user_id][
|
||||
ReadReceiptEventFields.MSC2285_HIDDEN
|
||||
] = hidden
|
||||
|
||||
# Set new users unless empty
|
||||
if len(new_users.keys()) > 0:
|
||||
new_event["content"][event_id] = {ReceiptTypes.READ: new_users}
|
||||
|
||||
# Append new_event to visible_events unless empty
|
||||
if len(new_event["content"].keys()) > 0:
|
||||
@@ -223,7 +234,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
|
||||
)
|
||||
|
||||
if self.config.experimental.msc2285_enabled:
|
||||
events = ReceiptEventSource.filter_out_private(events, user.to_string())
|
||||
events = ReceiptEventSource.filter_out_hidden(events, user.to_string())
|
||||
|
||||
return events, to_key
|
||||
|
||||
|
||||
@@ -364,29 +364,21 @@ class RelationsHandler:
|
||||
The results may include additional events which are related to the
|
||||
requested events.
|
||||
"""
|
||||
# De-duplicated events by ID to handle the same event requested multiple times.
|
||||
events_by_id = {}
|
||||
# De-duplicate events by ID to handle the same event requested multiple times.
|
||||
#
|
||||
# State events do not get bundled aggregations.
|
||||
events_by_id = {
|
||||
event.event_id: event for event in events if not event.is_state()
|
||||
}
|
||||
|
||||
# A map of event ID to the relation in that event, if there is one.
|
||||
relations_by_id: Dict[str, str] = {}
|
||||
for event in events:
|
||||
# State events do not get bundled aggregations.
|
||||
if event.is_state():
|
||||
continue
|
||||
|
||||
for event_id, event in events_by_id.items():
|
||||
relates_to = event.content.get("m.relates_to")
|
||||
relation_type = None
|
||||
if isinstance(relates_to, collections.abc.Mapping):
|
||||
relation_type = relates_to.get("rel_type")
|
||||
# An event which is a replacement (ie edit) or annotation (ie,
|
||||
# reaction) may not have any other event related to it.
|
||||
if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE):
|
||||
continue
|
||||
|
||||
# The event should get bundled aggregations.
|
||||
events_by_id[event.event_id] = event
|
||||
# Track the event's relation information for later.
|
||||
if isinstance(relation_type, str):
|
||||
relations_by_id[event.event_id] = relation_type
|
||||
if isinstance(relation_type, str):
|
||||
relations_by_id[event_id] = relation_type
|
||||
|
||||
# event ID -> bundled aggregation in non-serialized form.
|
||||
results: Dict[str, BundledAggregations] = {}
|
||||
@@ -421,6 +413,16 @@ class RelationsHandler:
|
||||
|
||||
# Fetch other relations per event.
|
||||
for event in events_by_id.values():
|
||||
# An event which is a replacement (ie edit) or annotation (ie, reaction)
|
||||
# may not have any other event related to it.
|
||||
#
|
||||
# XXX This is buggy, see https://github.com/matrix-org/synapse/issues/12566
|
||||
if relations_by_id.get(event.event_id) in (
|
||||
RelationTypes.ANNOTATION,
|
||||
RelationTypes.REPLACE,
|
||||
):
|
||||
continue
|
||||
|
||||
# Fetch any annotations (ie, reactions) to bundle with this event.
|
||||
annotations = await self.get_annotations_for_event(
|
||||
event.event_id, event.room_id, ignored_users=ignored_users
|
||||
|
||||
@@ -33,7 +33,6 @@ from typing import (
|
||||
import attr
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import (
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
@@ -58,7 +57,7 @@ from synapse.api.filtering import Filter
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.event_auth import validate_event_for_room_version
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import copy_and_fixup_power_levels_contents
|
||||
from synapse.events.utils import copy_power_levels_contents
|
||||
from synapse.federation.federation_client import InvalidResponseError
|
||||
from synapse.handlers.federation import get_domains_from_state
|
||||
from synapse.handlers.relations import BundledAggregations
|
||||
@@ -338,13 +337,13 @@ class RoomCreationHandler:
|
||||
# 50, but if the default PL in a room is 50 or more, then we set the
|
||||
# required PL above that.
|
||||
|
||||
pl_content = copy_and_fixup_power_levels_contents(old_room_pl_state.content)
|
||||
users_default: int = pl_content.get("users_default", 0) # type: ignore[assignment]
|
||||
pl_content = dict(old_room_pl_state.content)
|
||||
users_default = int(pl_content.get("users_default", 0))
|
||||
restricted_level = max(users_default + 1, 50)
|
||||
|
||||
updated = False
|
||||
for v in ("invite", "events_default"):
|
||||
current: int = pl_content.get(v, 0) # type: ignore[assignment]
|
||||
current = int(pl_content.get(v, 0))
|
||||
if current < restricted_level:
|
||||
logger.debug(
|
||||
"Setting level for %s in %s to %i (was %i)",
|
||||
@@ -381,9 +380,7 @@ class RoomCreationHandler:
|
||||
"state_key": "",
|
||||
"room_id": new_room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"content": copy_and_fixup_power_levels_contents(
|
||||
old_room_pl_state.content
|
||||
),
|
||||
"content": old_room_pl_state.content,
|
||||
},
|
||||
ratelimit=False,
|
||||
)
|
||||
@@ -408,10 +405,9 @@ class RoomCreationHandler:
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
spam_check = await self.spam_checker.user_may_create_room(user_id)
|
||||
if spam_check is not synapse.spam_checker_api.ALLOW:
|
||||
if not await self.spam_checker.user_may_create_room(user_id):
|
||||
raise SynapseError(
|
||||
403, "This room creation request has been rejected", spam_check
|
||||
403, "You are not permitted to create rooms", Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
creation_content: JsonDict = {
|
||||
@@ -475,7 +471,7 @@ class RoomCreationHandler:
|
||||
# dict so we can't just copy.deepcopy it.
|
||||
initial_state[
|
||||
(EventTypes.PowerLevels, "")
|
||||
] = power_levels = copy_and_fixup_power_levels_contents(
|
||||
] = power_levels = copy_power_levels_contents(
|
||||
initial_state[(EventTypes.PowerLevels, "")]
|
||||
)
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ import random
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
import synapse
|
||||
from synapse import types
|
||||
from synapse.api.constants import (
|
||||
AccountDataTypes,
|
||||
@@ -680,6 +679,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
if target_id == self._server_notices_mxid:
|
||||
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
|
||||
|
||||
block_invite = False
|
||||
|
||||
if (
|
||||
self._server_notices_mxid is not None
|
||||
and requester.user.to_string() == self._server_notices_mxid
|
||||
@@ -696,18 +697,16 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
"Blocking invite: user is not admin and non-admin "
|
||||
"invites disabled"
|
||||
)
|
||||
raise SynapseError(403, "Invites have been disabled on this server")
|
||||
block_invite = True
|
||||
|
||||
spam_check = await self.spam_checker.user_may_invite(
|
||||
if not await self.spam_checker.user_may_invite(
|
||||
requester.user.to_string(), target_id, room_id
|
||||
)
|
||||
if spam_check is not synapse.spam_checker_api.ALLOW:
|
||||
):
|
||||
logger.info("Blocking invite due to spam checker")
|
||||
raise SynapseError(
|
||||
403,
|
||||
"This invite has been rejected as probable spam",
|
||||
spam_check,
|
||||
)
|
||||
block_invite = True
|
||||
|
||||
if block_invite:
|
||||
raise SynapseError(403, "Invites have been disabled on this server")
|
||||
|
||||
# An empty prev_events list is allowed as long as the auth_event_ids are present
|
||||
if prev_event_ids is not None:
|
||||
@@ -815,14 +814,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
# We assume that if the spam checker allowed the user to create
|
||||
# a room then they're allowed to join it.
|
||||
and not new_room
|
||||
):
|
||||
spam_check = await self.spam_checker.user_may_join_room(
|
||||
and not await self.spam_checker.user_may_join_room(
|
||||
target.to_string(), room_id, is_invited=inviter is not None
|
||||
)
|
||||
if spam_check is not synapse.spam_checker_api.ALLOW:
|
||||
raise SynapseError(
|
||||
403, "This request to join room has been rejected", spam_check
|
||||
)
|
||||
):
|
||||
raise SynapseError(403, "Not allowed to join this room")
|
||||
|
||||
# Check if a remote join should be performed.
|
||||
remote_join, remote_room_hosts = await self._should_perform_remote_join(
|
||||
@@ -1376,14 +1372,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
)
|
||||
else:
|
||||
# Check if the spamchecker(s) allow this invite to go through.
|
||||
spam_check = await self.spam_checker.user_may_send_3pid_invite(
|
||||
if not await self.spam_checker.user_may_send_3pid_invite(
|
||||
inviter_userid=requester.user.to_string(),
|
||||
medium=medium,
|
||||
address=address,
|
||||
room_id=room_id,
|
||||
)
|
||||
if spam_check is not synapse.spam_checker_api.ALLOW:
|
||||
raise SynapseError(403, "Cannot send threepid invite", spam_check)
|
||||
):
|
||||
raise SynapseError(403, "Cannot send threepid invite")
|
||||
|
||||
stream_id = await self._make_and_store_3pid_invite(
|
||||
requester,
|
||||
|
||||
@@ -105,7 +105,6 @@ class RoomSummaryHandler:
|
||||
hs.get_clock(),
|
||||
"get_room_hierarchy",
|
||||
)
|
||||
self._msc3266_enabled = hs.config.experimental.msc3266_enabled
|
||||
|
||||
async def get_room_hierarchy(
|
||||
self,
|
||||
@@ -631,7 +630,7 @@ class RoomSummaryHandler:
|
||||
return False
|
||||
|
||||
async def _is_remote_room_accessible(
|
||||
self, requester: Optional[str], room_id: str, room: JsonDict
|
||||
self, requester: str, room_id: str, room: JsonDict
|
||||
) -> bool:
|
||||
"""
|
||||
Calculate whether the room received over federation should be shown to the requester.
|
||||
@@ -646,8 +645,7 @@ class RoomSummaryHandler:
|
||||
due to an invite, etc.
|
||||
|
||||
Args:
|
||||
requester: The user requesting the summary. If not passed only world
|
||||
readability is checked.
|
||||
requester: The user requesting the summary.
|
||||
room_id: The room ID returned over federation.
|
||||
room: The summary of the room returned over federation.
|
||||
|
||||
@@ -661,8 +659,6 @@ class RoomSummaryHandler:
|
||||
or room.get("world_readable") is True
|
||||
):
|
||||
return True
|
||||
elif not requester:
|
||||
return False
|
||||
|
||||
# Check if the user is a member of any of the allowed rooms from the response.
|
||||
allowed_rooms = room.get("allowed_room_ids")
|
||||
@@ -719,10 +715,6 @@ class RoomSummaryHandler:
|
||||
"room_type": create_event.content.get(EventContentFields.ROOM_TYPE),
|
||||
}
|
||||
|
||||
if self._msc3266_enabled:
|
||||
entry["im.nheko.summary.version"] = stats["version"]
|
||||
entry["im.nheko.summary.encryption"] = stats["encryption"]
|
||||
|
||||
# Federation requests need to provide additional information so the
|
||||
# requested server is able to filter the response appropriately.
|
||||
if for_federation:
|
||||
@@ -820,45 +812,9 @@ class RoomSummaryHandler:
|
||||
|
||||
room_summary["membership"] = membership or "leave"
|
||||
else:
|
||||
# Reuse the hierarchy query over federation
|
||||
if remote_room_hosts is None:
|
||||
raise SynapseError(400, "Missing via to query remote room")
|
||||
|
||||
(
|
||||
room_entry,
|
||||
children_room_entries,
|
||||
inaccessible_children,
|
||||
) = await self._summarize_remote_room_hierarchy(
|
||||
_RoomQueueEntry(room_id, remote_room_hosts),
|
||||
suggested_only=True,
|
||||
)
|
||||
|
||||
# The results over federation might include rooms that we, as the
|
||||
# requesting server, are allowed to see, but the requesting user is
|
||||
# not permitted to see.
|
||||
#
|
||||
# Filter the returned results to only what is accessible to the user.
|
||||
if not room_entry or not await self._is_remote_room_accessible(
|
||||
requester, room_entry.room_id, room_entry.room
|
||||
):
|
||||
raise NotFoundError("Room not found or is not accessible")
|
||||
|
||||
room = dict(room_entry.room)
|
||||
room.pop("allowed_room_ids", None)
|
||||
|
||||
# If there was a requester, add their membership.
|
||||
# We keep the membership in the local membership table unless the
|
||||
# room is purged even for remote rooms.
|
||||
if requester:
|
||||
(
|
||||
membership,
|
||||
_,
|
||||
) = await self._store.get_local_current_membership_for_user_in_room(
|
||||
requester, room_id
|
||||
)
|
||||
room["membership"] = membership or "leave"
|
||||
|
||||
return room
|
||||
# TODO federation API, descoped from initial unstable implementation
|
||||
# as MSC needs more maturing on that side.
|
||||
raise SynapseError(400, "Federation is not currently supported.")
|
||||
|
||||
return room_summary
|
||||
|
||||
|
||||
@@ -468,7 +468,7 @@ class SsoHandler:
|
||||
auth_provider_id,
|
||||
remote_user_id,
|
||||
get_request_user_agent(request),
|
||||
request.getClientAddress().host,
|
||||
request.getClientIP(),
|
||||
)
|
||||
new_user = True
|
||||
elif self._sso_update_profile_information:
|
||||
@@ -928,7 +928,7 @@ class SsoHandler:
|
||||
session.auth_provider_id,
|
||||
session.remote_user_id,
|
||||
get_request_user_agent(request),
|
||||
request.getClientAddress().host,
|
||||
request.getClientIP(),
|
||||
)
|
||||
|
||||
logger.info(
|
||||
|
||||
@@ -1045,7 +1045,7 @@ class SyncHandler:
|
||||
last_unread_event_id = await self.store.get_last_receipt_event_id_for_user(
|
||||
user_id=sync_config.user.to_string(),
|
||||
room_id=room_id,
|
||||
receipt_types=(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
|
||||
receipt_type=ReceiptTypes.READ,
|
||||
)
|
||||
|
||||
return await self.store.get_unread_event_push_actions_by_room_for_user(
|
||||
|
||||
@@ -258,7 +258,7 @@ class RegistrationTokenAuthChecker(UserInteractiveAuthChecker):
|
||||
self.hs = hs
|
||||
self._enabled = bool(
|
||||
hs.config.registration.registration_requires_token
|
||||
) or bool(hs.config.registration.enable_registration_token_3pid_bypass)
|
||||
) or bool(hs.config.registration.enable_registration_token_3pid_bypasss)
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
def is_enabled(self) -> bool:
|
||||
|
||||
@@ -60,7 +60,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.update_user_directory = hs.config.worker.should_update_user_directory
|
||||
self.update_user_directory = hs.config.server.update_user_directory
|
||||
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
# The current position in the current_state_delta stream
|
||||
@@ -100,8 +100,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
# Remove any spammy users from the results.
|
||||
non_spammy_users = []
|
||||
for user in results["results"]:
|
||||
spam_check = await self.spam_checker.check_username_for_spam(user)
|
||||
if spam_check is synapse.spam_checker_api.ALLOW:
|
||||
if not await self.spam_checker.check_username_for_spam(user):
|
||||
non_spammy_users.append(user)
|
||||
results["results"] = non_spammy_users
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user