mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
7 Commits
rei/worker
...
anoa/modul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3dcc1efc43 | ||
|
|
46c0ab559b | ||
|
|
e8cdfc771b | ||
|
|
1b30b82ac6 | ||
|
|
266f426c50 | ||
|
|
c3c3c6d200 | ||
|
|
9cd8fecdc5 |
@@ -35,9 +35,9 @@ sed -i \
|
||||
# compatible (as far the package metadata declares, anyway); pip's package resolver
|
||||
# is more lax.
|
||||
#
|
||||
# Rather than `poetry install --no-dev`, we drop all dev dependencies and the dev-docs
|
||||
# group from the toml file. This means we don't have to ensure compatibility between
|
||||
# old deps and dev tools.
|
||||
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
|
||||
# toml file. This means we don't have to ensure compatibility between old deps and
|
||||
# dev tools.
|
||||
|
||||
pip install toml wheel
|
||||
|
||||
@@ -47,7 +47,6 @@ with open('pyproject.toml', 'r') as f:
|
||||
data = toml.loads(f.read())
|
||||
|
||||
del data['tool']['poetry']['dev-dependencies']
|
||||
del data['tool']['poetry']['group']['dev-docs']
|
||||
|
||||
with open('pyproject.toml', 'w') as f:
|
||||
toml.dump(data, f)
|
||||
|
||||
12
.github/workflows/docker.yml
vendored
12
.github/workflows/docker.yml
vendored
@@ -10,7 +10,6 @@ on:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -35,20 +34,11 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Calculate docker image tag
|
||||
id: set-tag
|
||||
uses: docker/metadata-action@master
|
||||
with:
|
||||
images: |
|
||||
docker.io/matrixdotorg/synapse
|
||||
ghcr.io/matrix-org/synapse
|
||||
images: matrixdotorg/synapse
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
|
||||
79
.github/workflows/docs.yaml
vendored
79
.github/workflows/docs.yaml
vendored
@@ -13,10 +13,25 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
pre:
|
||||
name: Calculate variables for GitHub Pages deployment
|
||||
pages:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Build the documentation
|
||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
||||
# However, we're using docs/README.md for other purposes and need to pick a new page
|
||||
# as the default. Let's opt for the welcome page instead.
|
||||
run: |
|
||||
mdbook build
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
# Figure out the target directory.
|
||||
#
|
||||
# The target directory depends on the name of the branch
|
||||
@@ -40,65 +55,11 @@ jobs:
|
||||
|
||||
# finally, set the 'branch-version' var.
|
||||
echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
|
||||
outputs:
|
||||
branch-version: ${{ steps.vars.outputs.branch-version }}
|
||||
|
||||
################################################################################
|
||||
pages-docs:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- pre
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Build the documentation
|
||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
||||
# However, we're using docs/README.md for other purposes and need to pick a new page
|
||||
# as the default. Let's opt for the welcome page instead.
|
||||
run: |
|
||||
mdbook build
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
|
||||
uses: peaceiris/actions-gh-pages@bd8c6b06eba6b3d25d72b7a1767993c0aeee42e7 # v3.9.2
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./book
|
||||
destination_dir: ./${{ needs.pre.outputs.branch-version }}
|
||||
|
||||
################################################################################
|
||||
pages-devdocs:
|
||||
name: GitHub Pages (developer docs)
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- pre
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: "Set up Sphinx"
|
||||
uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.3.2"
|
||||
groups: "dev-docs"
|
||||
extras: ""
|
||||
|
||||
- name: Build the documentation
|
||||
run: |
|
||||
cd dev-docs
|
||||
poetry run make html
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./dev-docs/_build/html
|
||||
destination_dir: ./dev-docs/${{ needs.pre.outputs.branch-version }}
|
||||
destination_dir: ./${{ steps.vars.outputs.branch-version }}
|
||||
|
||||
6
.github/workflows/latest_deps.yml
vendored
6
.github/workflows/latest_deps.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -134,7 +134,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
4
.github/workflows/release-artifacts.yml
vendored
4
.github/workflows/release-artifacts.yml
vendored
@@ -4,15 +4,13 @@ name: Build release artifacts
|
||||
|
||||
on:
|
||||
# we build on PRs and develop to (hopefully) get early warning
|
||||
# of things breaking (but only build one set of debs). PRs skip
|
||||
# building wheels on macOS & ARM.
|
||||
# of things breaking (but only build one set of debs)
|
||||
pull_request:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
|
||||
# we do the full build on tags.
|
||||
tags: ["v*"]
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
|
||||
35
.github/workflows/tests.yml
vendored
35
.github/workflows/tests.yml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
@@ -34,14 +33,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -103,14 +94,6 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
poetry-version: "1.3.2"
|
||||
@@ -129,7 +112,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
components: clippy
|
||||
@@ -151,7 +134,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
components: clippy
|
||||
@@ -171,7 +154,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
# We use nightly so that it correctly groups together imports
|
||||
toolchain: nightly-2022-12-01
|
||||
@@ -239,7 +222,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -284,7 +267,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -405,7 +388,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -550,7 +533,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -581,7 +564,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -604,7 +587,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
15
.github/workflows/twisted_trunk.yml
vendored
15
.github/workflows/twisted_trunk.yml
vendored
@@ -5,13 +5,6 @@ on:
|
||||
- cron: 0 8 * * *
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
twisted_ref:
|
||||
description: Commit, branch or tag to checkout from upstream Twisted.
|
||||
required: false
|
||||
default: 'trunk'
|
||||
type: string
|
||||
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -25,7 +18,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -36,7 +29,7 @@ jobs:
|
||||
extras: "all"
|
||||
- run: |
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref }}
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry install --no-interaction --extras "all test"
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
@@ -50,7 +43,7 @@ jobs:
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -89,7 +82,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -53,7 +53,6 @@ __pycache__/
|
||||
/coverage.*
|
||||
/dist/
|
||||
/docs/build/
|
||||
/dev-docs/_build/
|
||||
/htmlcov
|
||||
/pip-wheel-metadata/
|
||||
|
||||
@@ -62,7 +61,7 @@ book/
|
||||
|
||||
# complement
|
||||
/complement-*
|
||||
/main.tar.gz
|
||||
/master.tar.gz
|
||||
|
||||
# rust
|
||||
/target/
|
||||
|
||||
192
CHANGES.md
192
CHANGES.md
@@ -1,191 +1,3 @@
|
||||
Synapse 1.81.0rc1 (2023-04-04)
|
||||
==============================
|
||||
|
||||
Synapse now attempts the versioned appservice paths before falling back to the
|
||||
[legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes).
|
||||
Usage of the legacy routes should be considered deprecated.
|
||||
|
||||
Additionally, Synapse has supported sending the application service access token
|
||||
via [the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)
|
||||
since v1.70.0. For backwards compatibility it is *also* sent as the `access_token`
|
||||
query parameter. This is insecure and should be considered deprecated.
|
||||
|
||||
A future version of Synapse (v1.88.0 or later) will remove support for legacy
|
||||
application service routes and query parameter authorization.
|
||||
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add the ability to enable/disable registrations when in the OIDC flow. ([\#14978](https://github.com/matrix-org/synapse/issues/14978))
|
||||
- Add a primitive helper script for listing worker endpoints. ([\#15243](https://github.com/matrix-org/synapse/issues/15243))
|
||||
- Experimental support for passing One Time Key and device key requests to application services ([MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) and [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984)). ([\#15314](https://github.com/matrix-org/synapse/issues/15314), [\#15321](https://github.com/matrix-org/synapse/issues/15321))
|
||||
- Allow loading `/password_policy` endpoint on workers. ([\#15331](https://github.com/matrix-org/synapse/issues/15331))
|
||||
- Add experimental support for Unix sockets. Contributed by Jason Little. ([\#15353](https://github.com/matrix-org/synapse/issues/15353))
|
||||
- Build Debian packages for Ubuntu 23.04 (Lunar Lobster). ([\#15381](https://github.com/matrix-org/synapse/issues/15381))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled. ([\#15295](https://github.com/matrix-org/synapse/issues/15295))
|
||||
- Fix a bug introduced in Synapse v1.55.0 which could delay remote homeservers being able to decrypt encrypted messages sent by local users. ([\#15297](https://github.com/matrix-org/synapse/issues/15297))
|
||||
- Add a check to [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite)
|
||||
to ensure that the sqlite database passed to the script exists before trying to port from it. ([\#15306](https://github.com/matrix-org/synapse/issues/15306))
|
||||
- Fix a bug introduced in Synapse 1.76.0 where responses from worker deployments could include an internal `_INT_STREAM_POS` key. ([\#15309](https://github.com/matrix-org/synapse/issues/15309))
|
||||
- Fix a long-standing bug that Synpase only used the [legacy appservice routes](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes). ([\#15317](https://github.com/matrix-org/synapse/issues/15317))
|
||||
- Fix a long-standing bug preventing users from rejoining rooms after being banned and unbanned over federation. Contributed by Nico. ([\#15323](https://github.com/matrix-org/synapse/issues/15323))
|
||||
- Fix bug in worker mode where on a rolling restart of workers the "typing" worker would consume 100% CPU until it got restarted. ([\#15332](https://github.com/matrix-org/synapse/issues/15332))
|
||||
- Fix a long-standing bug where some to_device messages could be dropped when using workers. ([\#15349](https://github.com/matrix-org/synapse/issues/15349))
|
||||
- Fix a bug introduced in Synapse 1.70.0 where the background sync from a faster join could spin for hours when one of the events involved had been marked for backoff. ([\#15351](https://github.com/matrix-org/synapse/issues/15351))
|
||||
- Fix missing app variable in mail subject for password resets. Contributed by Cyberes. ([\#15352](https://github.com/matrix-org/synapse/issues/15352))
|
||||
- Fix a rare bug introduced in Synapse 1.66.0 where initial syncs would fail when the user had been kicked from a faster joined room that had not finished syncing. ([\#15383](https://github.com/matrix-org/synapse/issues/15383))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Fix a typo in login requests ratelimit defaults. ([\#15341](https://github.com/matrix-org/synapse/issues/15341))
|
||||
- Add some clarification to the doc/comments regarding TCP replication. ([\#15354](https://github.com/matrix-org/synapse/issues/15354))
|
||||
- Note that Synapse 1.74 queued a rebuild of the user directory tables. ([\#15386](https://github.com/matrix-org/synapse/issues/15386))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Use `immutabledict` instead of `frozendict`. ([\#15113](https://github.com/matrix-org/synapse/issues/15113))
|
||||
- Add developer documentation for the Federation Sender and add a documentation mechanism using Sphinx. ([\#15265](https://github.com/matrix-org/synapse/issues/15265), [\#15336](https://github.com/matrix-org/synapse/issues/15336))
|
||||
- Make the pushers rely on the `device_id` instead of the `access_token_id` for various operations. ([\#15280](https://github.com/matrix-org/synapse/issues/15280))
|
||||
- Bump sentry-sdk from 1.15.0 to 1.17.0. ([\#15285](https://github.com/matrix-org/synapse/issues/15285))
|
||||
- Allow running the Twisted trunk job against other branches. ([\#15302](https://github.com/matrix-org/synapse/issues/15302))
|
||||
- Remind the releaser to ask for changelog feedback in [#synapse-dev](https://matrix.to/#/#synapse-dev:matrix.org). ([\#15303](https://github.com/matrix-org/synapse/issues/15303))
|
||||
- Bump dtolnay/rust-toolchain from e12eda571dc9a5ee5d58eecf4738ec291c66f295 to fc3253060d0c959bea12a59f10f8391454a0b02d. ([\#15304](https://github.com/matrix-org/synapse/issues/15304))
|
||||
- Reject events with an invalid "mentions" property per [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15311](https://github.com/matrix-org/synapse/issues/15311))
|
||||
- As an optimisation, use `TRUNCATE` on Postgres when clearing the user directory tables. ([\#15316](https://github.com/matrix-org/synapse/issues/15316))
|
||||
- Fix `.gitignore` rule for the Complement source tarball downloaded automatically by `complement.sh`. ([\#15319](https://github.com/matrix-org/synapse/issues/15319))
|
||||
- Bump serde from 1.0.157 to 1.0.158. ([\#15324](https://github.com/matrix-org/synapse/issues/15324))
|
||||
- Bump regex from 1.7.1 to 1.7.3. ([\#15325](https://github.com/matrix-org/synapse/issues/15325))
|
||||
- Bump types-pyopenssl from 23.0.0.4 to 23.1.0.0. ([\#15326](https://github.com/matrix-org/synapse/issues/15326))
|
||||
- Bump furo from 2022.12.7 to 2023.3.23. ([\#15327](https://github.com/matrix-org/synapse/issues/15327))
|
||||
- Bump ruff from 0.0.252 to 0.0.259. ([\#15328](https://github.com/matrix-org/synapse/issues/15328))
|
||||
- Bump cryptography from 40.0.0 to 40.0.1. ([\#15329](https://github.com/matrix-org/synapse/issues/15329))
|
||||
- Bump mypy-zope from 0.9.0 to 0.9.1. ([\#15330](https://github.com/matrix-org/synapse/issues/15330))
|
||||
- Speed up unit tests when using SQLite3. ([\#15334](https://github.com/matrix-org/synapse/issues/15334))
|
||||
- Speed up pydantic CI job. ([\#15339](https://github.com/matrix-org/synapse/issues/15339))
|
||||
- Speed up sample config CI job. ([\#15340](https://github.com/matrix-org/synapse/issues/15340))
|
||||
- Fix copyright year in SSO footer template. ([\#15358](https://github.com/matrix-org/synapse/issues/15358))
|
||||
- Bump peaceiris/actions-gh-pages from 3.9.2 to 3.9.3. ([\#15369](https://github.com/matrix-org/synapse/issues/15369))
|
||||
- Bump serde from 1.0.158 to 1.0.159. ([\#15370](https://github.com/matrix-org/synapse/issues/15370))
|
||||
- Bump serde_json from 1.0.94 to 1.0.95. ([\#15371](https://github.com/matrix-org/synapse/issues/15371))
|
||||
- Speed up membership queries for users with forgotten rooms. ([\#15385](https://github.com/matrix-org/synapse/issues/15385))
|
||||
|
||||
|
||||
Synapse 1.80.0 (2023-03-28)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.80.0rc2.
|
||||
|
||||
|
||||
Synapse 1.80.0rc2 (2023-03-22)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug in which the [`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) endpoint would return the wrong error if the user did not have permission to view the event. This aligns Synapse's implementation with [MSC2249](https://github.com/matrix-org/matrix-spec-proposals/pull/2249). ([\#15298](https://github.com/matrix-org/synapse/issues/15298), [\#15300](https://github.com/matrix-org/synapse/issues/15300))
|
||||
- Fix a bug introduced in Synapse 1.75.0rc1 where the [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite)
|
||||
would fail to open the SQLite database. ([\#15301](https://github.com/matrix-org/synapse/issues/15301))
|
||||
|
||||
|
||||
Synapse 1.80.0rc1 (2023-03-21)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition. ([\#15187](https://github.com/matrix-org/synapse/issues/15187))
|
||||
- Implement [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15249](https://github.com/matrix-org/synapse/issues/15249))
|
||||
- Allow loading `/register/available` endpoint on workers. ([\#15268](https://github.com/matrix-org/synapse/issues/15268))
|
||||
- Improve performance of creating and authenticating events. ([\#15195](https://github.com/matrix-org/synapse/issues/15195))
|
||||
- Add topic and name events to group of events that are batch persisted when creating a room. ([\#15229](https://github.com/matrix-org/synapse/issues/15229))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change. ([\#14755](https://github.com/matrix-org/synapse/issues/14755), [\#14756](https://github.com/matrix-org/synapse/issues/14756))
|
||||
- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules. ([\#15190](https://github.com/matrix-org/synapse/issues/15190))
|
||||
- Faster joins: Fix a bug introduced in Synapse 1.66 where spurious "Failed to find memberships ..." errors would be logged. ([\#15232](https://github.com/matrix-org/synapse/issues/15232))
|
||||
- Fix a long-standing error when sending message into deleted room. ([\#15235](https://github.com/matrix-org/synapse/issues/15235))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Ensure the Dockerfile builds on platforms that don't have a `cryptography` wheel. ([\#15239](https://github.com/matrix-org/synapse/issues/15239))
|
||||
- Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`). ([\#15281](https://github.com/matrix-org/synapse/issues/15281), [\#15282](https://github.com/matrix-org/synapse/issues/15282))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add a missing endpoint to the workers documentation. ([\#15223](https://github.com/matrix-org/synapse/issues/15223))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add additional functionality to declaring worker types when starting Complement in worker mode. ([\#14921](https://github.com/matrix-org/synapse/issues/14921))
|
||||
- Add `Synapse-Trace-Id` to `access-control-expose-headers` header. ([\#14974](https://github.com/matrix-org/synapse/issues/14974))
|
||||
- Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key. ([\#15200](https://github.com/matrix-org/synapse/issues/15200))
|
||||
- Improve log lines when purging rooms. ([\#15222](https://github.com/matrix-org/synapse/issues/15222))
|
||||
- Improve type hints. ([\#15230](https://github.com/matrix-org/synapse/issues/15230), [\#15231](https://github.com/matrix-org/synapse/issues/15231), [\#15238](https://github.com/matrix-org/synapse/issues/15238))
|
||||
- Move various module API callback registration methods to a dedicated class. ([\#15237](https://github.com/matrix-org/synapse/issues/15237))
|
||||
- Configure GitHub Actions for merge queues. ([\#15244](https://github.com/matrix-org/synapse/issues/15244))
|
||||
- Add schema comments about the `destinations` and `destination_rooms` tables. ([\#15247](https://github.com/matrix-org/synapse/issues/15247))
|
||||
- Skip processing of auto-join room behaviour if there are no auto-join rooms configured. ([\#15262](https://github.com/matrix-org/synapse/issues/15262))
|
||||
- Remove unused store method `_set_destination_retry_timings_emulated`. ([\#15266](https://github.com/matrix-org/synapse/issues/15266))
|
||||
- Reorganize URL preview code. ([\#15269](https://github.com/matrix-org/synapse/issues/15269))
|
||||
- Clean-up direct TCP replication code. ([\#15272](https://github.com/matrix-org/synapse/issues/15272), [\#15274](https://github.com/matrix-org/synapse/issues/15274))
|
||||
- Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python. ([\#15275](https://github.com/matrix-org/synapse/issues/15275))
|
||||
- Add a `/versions` flag for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15293](https://github.com/matrix-org/synapse/issues/15293))
|
||||
- Bump hiredis from 2.2.1 to 2.2.2. ([\#15252](https://github.com/matrix-org/synapse/issues/15252))
|
||||
- Bump serde from 1.0.152 to 1.0.155. ([\#15253](https://github.com/matrix-org/synapse/issues/15253))
|
||||
- Bump pysaml2 from 7.2.1 to 7.3.1. ([\#15254](https://github.com/matrix-org/synapse/issues/15254))
|
||||
- Bump msgpack from 1.0.4 to 1.0.5. ([\#15255](https://github.com/matrix-org/synapse/issues/15255))
|
||||
- Bump gitpython from 3.1.30 to 3.1.31. ([\#15256](https://github.com/matrix-org/synapse/issues/15256))
|
||||
- Bump cryptography from 39.0.1 to 39.0.2. ([\#15257](https://github.com/matrix-org/synapse/issues/15257))
|
||||
- Bump pydantic from 1.10.4 to 1.10.6. ([\#15286](https://github.com/matrix-org/synapse/issues/15286))
|
||||
- Bump serde from 1.0.155 to 1.0.157. ([\#15287](https://github.com/matrix-org/synapse/issues/15287))
|
||||
- Bump anyhow from 1.0.69 to 1.0.70. ([\#15288](https://github.com/matrix-org/synapse/issues/15288))
|
||||
- Bump txredisapi from 1.4.7 to 1.4.9. ([\#15289](https://github.com/matrix-org/synapse/issues/15289))
|
||||
- Bump pygithub from 1.57 to 1.58.1. ([\#15290](https://github.com/matrix-org/synapse/issues/15290))
|
||||
- Bump types-requests from 2.28.11.12 to 2.28.11.15. ([\#15291](https://github.com/matrix-org/synapse/issues/15291))
|
||||
|
||||
|
||||
|
||||
Synapse 1.79.0 (2023-03-14)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.79.0rc2.
|
||||
|
||||
|
||||
Synapse 1.79.0rc2 (2023-03-13)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.79.0rc1 where attempting to register a `on_remove_user_third_party_identifier` module API callback would be a no-op. ([\#15227](https://github.com/matrix-org/synapse/issues/15227))
|
||||
- Fix a rare bug introduced in Synapse 1.73 where events could remain unsent to other homeservers after a faster-join to a room. ([\#15248](https://github.com/matrix-org/synapse/issues/15248))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Refactor `filter_events_for_server`. ([\#15240](https://github.com/matrix-org/synapse/issues/15240))
|
||||
|
||||
|
||||
Synapse 1.79.0rc1 (2023-03-07)
|
||||
==============================
|
||||
|
||||
@@ -235,7 +47,7 @@ Improved Documentation
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044](https://github.com/matrix-org/synapse/issues/15044))
|
||||
- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044]
|
||||
- Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093))
|
||||
- Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189))
|
||||
- Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137))
|
||||
@@ -476,7 +288,7 @@ Those who are `poetry install`ing from source using our lockfile should ensure t
|
||||
Notes on faster joins
|
||||
---------------------
|
||||
|
||||
The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms.
|
||||
The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms.
|
||||
|
||||
After a faster join, Synapse considers that room "partially joined". In this state, you should be able to
|
||||
|
||||
|
||||
49
Cargo.lock
generated
49
Cargo.lock
generated
@@ -13,9 +13,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.70"
|
||||
version = "1.0.69"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4"
|
||||
checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -185,9 +185,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.52"
|
||||
version = "1.0.46"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
|
||||
checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -250,7 +250,7 @@ dependencies = [
|
||||
"proc-macro2",
|
||||
"pyo3-macros-backend",
|
||||
"quote",
|
||||
"syn 1.0.104",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -261,7 +261,7 @@ checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.104",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -276,9 +276,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.26"
|
||||
version = "1.0.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
|
||||
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
@@ -294,9 +294,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.7.3"
|
||||
version = "1.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d"
|
||||
checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -305,9 +305,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.29"
|
||||
version = "0.6.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
|
||||
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
@@ -323,29 +323,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.159"
|
||||
version = "1.0.152"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065"
|
||||
checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.159"
|
||||
version = "1.0.152"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585"
|
||||
checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.10",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.95"
|
||||
version = "1.0.94"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744"
|
||||
checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -375,17 +375,6 @@ dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synapse"
|
||||
version = "0.1.0"
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Delete server-side backup keys when deactivating an account.
|
||||
1
changelog.d/15187.feature
Normal file
1
changelog.d/15187.feature
Normal file
@@ -0,0 +1 @@
|
||||
Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition.
|
||||
1
changelog.d/15190.bugfix
Normal file
1
changelog.d/15190.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules.
|
||||
1
changelog.d/15195.misc
Normal file
1
changelog.d/15195.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve performance of creating and authenticating events.
|
||||
1
changelog.d/15200.misc
Normal file
1
changelog.d/15200.misc
Normal file
@@ -0,0 +1 @@
|
||||
Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key.
|
||||
1
changelog.d/15223.doc
Normal file
1
changelog.d/15223.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add a missing endpoint to the workers documentation.
|
||||
36
debian/changelog
vendored
36
debian/changelog
vendored
@@ -1,39 +1,3 @@
|
||||
matrix-synapse-py3 (1.81.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.81.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Apr 2023 14:29:03 +0100
|
||||
|
||||
matrix-synapse-py3 (1.80.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.80.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Mar 2023 11:10:33 +0100
|
||||
|
||||
matrix-synapse-py3 (1.80.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.80.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 22 Mar 2023 08:30:16 -0700
|
||||
|
||||
matrix-synapse-py3 (1.80.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.80.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Mar 2023 10:56:08 -0700
|
||||
|
||||
matrix-synapse-py3 (1.79.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.79.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Mar 2023 16:14:50 +0100
|
||||
|
||||
matrix-synapse-py3 (1.79.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.79.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 13 Mar 2023 12:54:21 +0000
|
||||
|
||||
matrix-synapse-py3 (1.79.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.79.0rc1.
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
@@ -1,50 +0,0 @@
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# For the full list of built-in configuration values, see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||
|
||||
project = "Synapse development"
|
||||
copyright = "2023, The Matrix.org Foundation C.I.C."
|
||||
author = "The Synapse Maintainers and Community"
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
||||
|
||||
extensions = [
|
||||
"autodoc2",
|
||||
"myst_parser",
|
||||
]
|
||||
|
||||
templates_path = ["_templates"]
|
||||
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
|
||||
|
||||
|
||||
# -- Options for Autodoc2 ----------------------------------------------------
|
||||
|
||||
autodoc2_docstring_parser_regexes = [
|
||||
# this will render all docstrings as 'MyST' Markdown
|
||||
(r".*", "myst"),
|
||||
]
|
||||
|
||||
autodoc2_packages = [
|
||||
{
|
||||
"path": "../synapse",
|
||||
# Don't render documentation for everything as a matter of course
|
||||
"auto_mode": False,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
# -- Options for MyST (Markdown) ---------------------------------------------
|
||||
|
||||
# myst_heading_anchors = 2
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
||||
|
||||
html_theme = "furo"
|
||||
html_static_path = ["_static"]
|
||||
@@ -1,22 +0,0 @@
|
||||
.. Synapse Developer Documentation documentation master file, created by
|
||||
sphinx-quickstart on Mon Mar 13 08:59:51 2023.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to the Synapse Developer Documentation!
|
||||
===========================================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
modules/federation_sender
|
||||
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
@@ -1,5 +0,0 @@
|
||||
Federation Sender
|
||||
=================
|
||||
|
||||
```{autodoc2-docstring} synapse.federation.sender
|
||||
```
|
||||
@@ -37,24 +37,9 @@ RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential curl git libffi-dev libssl-dev \
|
||||
build-essential git libffi-dev libssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install rust and ensure its in the PATH.
|
||||
# (Rust may be needed to compile `cryptography`---which is one of poetry's
|
||||
# dependencies---on platforms that don't have a `cryptography` wheel.
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||
|
||||
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
|
||||
# set to true, so we expose it as a build-arg.
|
||||
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
|
||||
# We install poetry in its own build stage to avoid its dependencies conflicting with
|
||||
# synapse's dependencies.
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
|
||||
@@ -51,7 +51,8 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
# -z True if the length of string is zero.
|
||||
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
|
||||
export SYNAPSE_WORKER_TYPES="\
|
||||
event_persister:2, \
|
||||
event_persister, \
|
||||
event_persister, \
|
||||
background_worker, \
|
||||
frontend_proxy, \
|
||||
event_creator, \
|
||||
@@ -63,8 +64,7 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
synchrotron, \
|
||||
client_reader, \
|
||||
appservice, \
|
||||
pusher, \
|
||||
stream_writers=account_data+presence+receipts+to_device+typing"
|
||||
pusher"
|
||||
|
||||
fi
|
||||
log "Workers requested: $SYNAPSE_WORKER_TYPES"
|
||||
|
||||
@@ -19,15 +19,8 @@
|
||||
# The environment variables it reads are:
|
||||
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
|
||||
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKERS_CONFIG
|
||||
# below. Leave empty for no workers. Add a ':' and a number at the end to
|
||||
# multiply that worker. Append multiple worker types with '+' to merge the
|
||||
# worker types into a single worker. Add a name and a '=' to the front of a
|
||||
# worker type to give this instance a name in logs and nginx.
|
||||
# Examples:
|
||||
# SYNAPSE_WORKER_TYPES='event_persister, federation_sender, client_reader'
|
||||
# SYNAPSE_WORKER_TYPES='event_persister:2, federation_sender:2, client_reader'
|
||||
# SYNAPSE_WORKER_TYPES='stream_writers=account_data+presence+typing'
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
|
||||
# below. Leave empty for no workers.
|
||||
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
|
||||
# will be treated as Application Service registration files.
|
||||
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
||||
@@ -47,33 +40,16 @@
|
||||
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
List,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
NoReturn,
|
||||
Optional,
|
||||
Set,
|
||||
SupportsIndex,
|
||||
)
|
||||
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
|
||||
|
||||
import yaml
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
|
||||
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
|
||||
# during processing with the name of the worker.
|
||||
WORKER_PLACEHOLDER_NAME = "placeholder_name"
|
||||
|
||||
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
|
||||
# Watching /_matrix/client needs a "client" listener
|
||||
# Watching /_matrix/federation needs a "federation" listener
|
||||
@@ -94,13 +70,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
|
||||
],
|
||||
"shared_extra_conf": {
|
||||
"update_user_directory_from_worker": WORKER_PLACEHOLDER_NAME
|
||||
},
|
||||
"shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"media_repository": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"app": "synapse.app.media_repository",
|
||||
"listener_resources": ["media"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/media/",
|
||||
@@ -113,7 +87,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
"enable_media_repo": False,
|
||||
"media_instance_running_background_jobs": WORKER_PLACEHOLDER_NAME,
|
||||
"media_instance_running_background_jobs": "media_repository1",
|
||||
},
|
||||
"worker_extra_conf": "enable_media_repo: true",
|
||||
},
|
||||
@@ -121,9 +95,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {
|
||||
"notify_appservices_from_worker": WORKER_PLACEHOLDER_NAME
|
||||
},
|
||||
"shared_extra_conf": {"notify_appservices_from_worker": "appservice1"},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_sender": {
|
||||
@@ -163,7 +135,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/client/versions$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/register$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/register/available$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
|
||||
@@ -172,7 +143,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
|
||||
"^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)",
|
||||
"^/_matrix/client/(r0|v3|unstable)/password_policy$",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
@@ -222,9 +192,9 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
# This worker cannot be sharded. Therefore, there should only ever be one
|
||||
# background worker. This is enforced for the safety of your database.
|
||||
"shared_extra_conf": {"run_background_tasks_on": WORKER_PLACEHOLDER_NAME},
|
||||
# This worker cannot be sharded. Therefore there should only ever be one background
|
||||
# worker, and it should be named background_worker1
|
||||
"shared_extra_conf": {"run_background_tasks_on": "background_worker1"},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"event_creator": {
|
||||
@@ -305,7 +275,7 @@ NGINX_LOCATION_CONFIG_BLOCK = """
|
||||
"""
|
||||
|
||||
NGINX_UPSTREAM_CONFIG_BLOCK = """
|
||||
upstream {upstream_worker_base_name} {{
|
||||
upstream {upstream_worker_type} {{
|
||||
{body}
|
||||
}}
|
||||
"""
|
||||
@@ -356,7 +326,7 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||
|
||||
def add_worker_roles_to_shared_config(
|
||||
shared_config: dict,
|
||||
worker_types_set: Set[str],
|
||||
worker_type: str,
|
||||
worker_name: str,
|
||||
worker_port: int,
|
||||
) -> None:
|
||||
@@ -364,36 +334,22 @@ def add_worker_roles_to_shared_config(
|
||||
append appropriate worker information to it for the current worker_type instance.
|
||||
|
||||
Args:
|
||||
shared_config: The config dict that all worker instances share (after being
|
||||
converted to YAML)
|
||||
worker_types_set: The type of worker (one of those defined in WORKERS_CONFIG).
|
||||
This list can be a single worker type or multiple.
|
||||
shared_config: The config dict that all worker instances share (after being converted to YAML)
|
||||
worker_type: The type of worker (one of those defined in WORKERS_CONFIG).
|
||||
worker_name: The name of the worker instance.
|
||||
worker_port: The HTTP replication port that the worker instance is listening on.
|
||||
"""
|
||||
# The instance_map config field marks the workers that write to various replication
|
||||
# streams
|
||||
# The instance_map config field marks the workers that write to various replication streams
|
||||
instance_map = shared_config.setdefault("instance_map", {})
|
||||
|
||||
# This is a list of the stream_writers that there can be only one of. Events can be
|
||||
# sharded, and therefore doesn't belong here.
|
||||
singular_stream_writers = [
|
||||
"account_data",
|
||||
"presence",
|
||||
"receipts",
|
||||
"to_device",
|
||||
"typing",
|
||||
]
|
||||
|
||||
# Worker-type specific sharding config. Now a single worker can fulfill multiple
|
||||
# roles, check each.
|
||||
if "pusher" in worker_types_set:
|
||||
# Worker-type specific sharding config
|
||||
if worker_type == "pusher":
|
||||
shared_config.setdefault("pusher_instances", []).append(worker_name)
|
||||
|
||||
if "federation_sender" in worker_types_set:
|
||||
elif worker_type == "federation_sender":
|
||||
shared_config.setdefault("federation_sender_instances", []).append(worker_name)
|
||||
|
||||
if "event_persister" in worker_types_set:
|
||||
elif worker_type == "event_persister":
|
||||
# Event persisters write to the events stream, so we need to update
|
||||
# the list of event stream writers
|
||||
shared_config.setdefault("stream_writers", {}).setdefault("events", []).append(
|
||||
@@ -406,154 +362,19 @@ def add_worker_roles_to_shared_config(
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
# Update the list of stream writers. It's convenient that the name of the worker
|
||||
# type is the same as the stream to write. Iterate over the whole list in case there
|
||||
# is more than one.
|
||||
for worker in worker_types_set:
|
||||
if worker in singular_stream_writers:
|
||||
shared_config.setdefault("stream_writers", {}).setdefault(
|
||||
worker, []
|
||||
).append(worker_name)
|
||||
elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
|
||||
# Update the list of stream writers
|
||||
# It's convenient that the name of the worker type is the same as the stream to write
|
||||
shared_config.setdefault("stream_writers", {}).setdefault(
|
||||
worker_type, []
|
||||
).append(worker_name)
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
|
||||
def merge_worker_template_configs(
|
||||
existing_dict: Optional[Dict[str, Any]],
|
||||
to_be_merged_dict: Dict[str, Any],
|
||||
) -> Dict[str, Any]:
|
||||
"""When given an existing dict of worker template configuration consisting with both
|
||||
dicts and lists, merge new template data from WORKERS_CONFIG(or create) and
|
||||
return new dict.
|
||||
|
||||
Args:
|
||||
existing_dict: Either an existing worker template or a fresh blank one.
|
||||
to_be_merged_dict: The template from WORKERS_CONFIGS to be merged into
|
||||
existing_dict.
|
||||
Returns: The newly merged together dict values.
|
||||
"""
|
||||
new_dict: Dict[str, Any] = {}
|
||||
if not existing_dict:
|
||||
# It doesn't exist yet, just use the new dict(but take a copy not a reference)
|
||||
new_dict = to_be_merged_dict.copy()
|
||||
else:
|
||||
for i in to_be_merged_dict.keys():
|
||||
if (i == "endpoint_patterns") or (i == "listener_resources"):
|
||||
# merge the two lists, remove duplicates
|
||||
new_dict[i] = list(set(existing_dict[i] + to_be_merged_dict[i]))
|
||||
elif i == "shared_extra_conf":
|
||||
# merge dictionary's, the worker name will be replaced later
|
||||
new_dict[i] = {**existing_dict[i], **to_be_merged_dict[i]}
|
||||
elif i == "worker_extra_conf":
|
||||
# There is only one worker type that has a 'worker_extra_conf' and it is
|
||||
# the media_repo. Since duplicate worker types on the same worker don't
|
||||
# work, this is fine.
|
||||
new_dict[i] = existing_dict[i] + to_be_merged_dict[i]
|
||||
else:
|
||||
# Everything else should be identical, like "app", which only works
|
||||
# because all apps are now generic_workers.
|
||||
new_dict[i] = to_be_merged_dict[i]
|
||||
return new_dict
|
||||
|
||||
|
||||
def insert_worker_name_for_worker_config(
|
||||
existing_dict: Dict[str, Any], worker_name: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Insert a given worker name into the worker's configuration dict.
|
||||
|
||||
Args:
|
||||
existing_dict: The worker_config dict that is imported into shared_config.
|
||||
worker_name: The name of the worker to insert.
|
||||
Returns: Copy of the dict with newly inserted worker name
|
||||
"""
|
||||
dict_to_edit = existing_dict.copy()
|
||||
for k, v in dict_to_edit["shared_extra_conf"].items():
|
||||
# Only proceed if it's the placeholder name string
|
||||
if v == WORKER_PLACEHOLDER_NAME:
|
||||
dict_to_edit["shared_extra_conf"][k] = worker_name
|
||||
return dict_to_edit
|
||||
|
||||
|
||||
def apply_requested_multiplier_for_worker(worker_types: List[str]) -> List[str]:
|
||||
"""
|
||||
Apply multiplier(if found) by returning a new expanded list with some basic error
|
||||
checking.
|
||||
|
||||
Args:
|
||||
worker_types: The unprocessed List of requested workers
|
||||
Returns:
|
||||
A new list with all requested workers expanded.
|
||||
"""
|
||||
# Checking performed:
|
||||
# 1. if worker:2 or more is declared, it will create additional workers up to number
|
||||
# 2. if worker:1, it will create a single copy of this worker as if no number was
|
||||
# given
|
||||
# 3. if worker:0 is declared, this worker will be ignored. This is to allow for
|
||||
# scripting and automated expansion and is intended behaviour.
|
||||
# 4. if worker:NaN or is a negative number, it will error and log it.
|
||||
new_worker_types = []
|
||||
for worker_type in worker_types:
|
||||
if ":" in worker_type:
|
||||
worker_type_components = split_and_strip_string(worker_type, ":", 1)
|
||||
worker_count = 0
|
||||
# Should only be 2 components, a type of worker(s) and an integer as a
|
||||
# string. Cast the number as an int then it can be used as a counter.
|
||||
try:
|
||||
worker_count = int(worker_type_components[1])
|
||||
except ValueError:
|
||||
error(
|
||||
f"Bad number in worker count for '{worker_type}': "
|
||||
f"'{worker_type_components[1]}' is not an integer"
|
||||
)
|
||||
|
||||
# As long as there are more than 0, we add one to the list to make below.
|
||||
for _ in range(worker_count):
|
||||
new_worker_types.append(worker_type_components[0])
|
||||
|
||||
else:
|
||||
# If it's not a real worker_type, it will error out later.
|
||||
new_worker_types.append(worker_type)
|
||||
return new_worker_types
|
||||
|
||||
|
||||
def is_sharding_allowed_for_worker_type(worker_type: str) -> bool:
|
||||
"""Helper to check to make sure worker types that cannot have multiples do not.
|
||||
|
||||
Args:
|
||||
worker_type: The type of worker to check against.
|
||||
Returns: True if allowed, False if not
|
||||
"""
|
||||
return worker_type not in [
|
||||
"background_worker",
|
||||
"account_data",
|
||||
"presence",
|
||||
"receipts",
|
||||
"typing",
|
||||
"to_device",
|
||||
]
|
||||
|
||||
|
||||
def split_and_strip_string(
|
||||
given_string: str, split_char: str, max_split: SupportsIndex = -1
|
||||
) -> List[str]:
|
||||
"""
|
||||
Helper to split a string on split_char and strip whitespace from each end of each
|
||||
element.
|
||||
Args:
|
||||
given_string: The string to split
|
||||
split_char: The character to split the string on
|
||||
max_split: kwarg for split() to limit how many times the split() happens
|
||||
Returns:
|
||||
A List of strings
|
||||
"""
|
||||
# Removes whitespace from ends of result strings before adding to list. Allow for
|
||||
# overriding 'maxsplit' kwarg, default being -1 to signify no maximum.
|
||||
return [x.strip() for x in given_string.split(split_char, maxsplit=max_split)]
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
|
||||
def generate_base_homeserver_config() -> None:
|
||||
@@ -568,153 +389,29 @@ def generate_base_homeserver_config() -> None:
|
||||
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
|
||||
|
||||
|
||||
def parse_worker_types(
|
||||
requested_worker_types: List[str],
|
||||
) -> Dict[str, Set[str]]:
|
||||
"""Read the desired list of requested workers and prepare the data for use in
|
||||
generating worker config files while also checking for potential gotchas.
|
||||
|
||||
Args:
|
||||
requested_worker_types: The list formed from the split environment variable
|
||||
containing the unprocessed requests for workers.
|
||||
|
||||
Returns: A dict of worker names to set of worker types. Format:
|
||||
{'worker_name':
|
||||
{'worker_type', 'worker_type2'}
|
||||
}
|
||||
"""
|
||||
# A counter of worker_base_name -> int. Used for determining the name for a given
|
||||
# worker when generating its config file, as each worker's name is just
|
||||
# worker_base_name followed by instance number
|
||||
worker_base_name_counter: Dict[str, int] = defaultdict(int)
|
||||
|
||||
# Similar to above, but more finely grained. This is used to determine we don't have
|
||||
# more than a single worker for cases where multiples would be bad(e.g. presence).
|
||||
worker_type_shard_counter: Dict[str, int] = defaultdict(int)
|
||||
|
||||
# The final result of all this processing
|
||||
dict_to_return: Dict[str, Set[str]] = {}
|
||||
|
||||
# Handle any multipliers requested for given workers.
|
||||
multiple_processed_worker_types = apply_requested_multiplier_for_worker(
|
||||
requested_worker_types
|
||||
)
|
||||
|
||||
# Process each worker_type_string
|
||||
# Examples of expected formats:
|
||||
# - requested_name=type1+type2+type3
|
||||
# - synchrotron
|
||||
# - event_creator+event_persister
|
||||
for worker_type_string in multiple_processed_worker_types:
|
||||
# First, if a name is requested, use that — otherwise generate one.
|
||||
worker_base_name: str = ""
|
||||
if "=" in worker_type_string:
|
||||
# Split on "=", remove extra whitespace from ends then make list
|
||||
worker_type_split = split_and_strip_string(worker_type_string, "=")
|
||||
if len(worker_type_split) > 2:
|
||||
error(
|
||||
"There should only be one '=' in the worker type string. "
|
||||
f"Please fix: {worker_type_string}"
|
||||
)
|
||||
|
||||
# Assign the name
|
||||
worker_base_name = worker_type_split[0]
|
||||
|
||||
if not re.match(r"^[a-zA-Z0-9_+-]*[a-zA-Z_+-]$", worker_base_name):
|
||||
# Apply a fairly narrow regex to the worker names. Some characters
|
||||
# aren't safe for use in file paths or nginx configurations.
|
||||
# Don't allow to end with a number because we'll add a number
|
||||
# ourselves in a moment.
|
||||
error(
|
||||
"Invalid worker name; please choose a name consisting of "
|
||||
"alphanumeric letters, _ + -, but not ending with a digit: "
|
||||
f"{worker_base_name!r}"
|
||||
)
|
||||
|
||||
# Continue processing the remainder of the worker_type string
|
||||
# with the name override removed.
|
||||
worker_type_string = worker_type_split[1]
|
||||
|
||||
# Split the worker_type_string on "+", remove whitespace from ends then make
|
||||
# the list a set so it's deduplicated.
|
||||
worker_types_set: Set[str] = set(
|
||||
split_and_strip_string(worker_type_string, "+")
|
||||
)
|
||||
|
||||
if not worker_base_name:
|
||||
# No base name specified: generate one deterministically from set of
|
||||
# types
|
||||
worker_base_name = "+".join(sorted(worker_types_set))
|
||||
|
||||
# At this point, we have:
|
||||
# worker_base_name which is the name for the worker, without counter.
|
||||
# worker_types_set which is the set of worker types for this worker.
|
||||
|
||||
# Validate worker_type and make sure we don't allow sharding for a worker type
|
||||
# that doesn't support it. Will error and stop if it is a problem,
|
||||
# e.g. 'background_worker'.
|
||||
for worker_type in worker_types_set:
|
||||
# Verify this is a real defined worker type. If it's not, stop everything so
|
||||
# it can be fixed.
|
||||
if worker_type not in WORKERS_CONFIG:
|
||||
error(
|
||||
f"{worker_type} is an unknown worker type! Was found in "
|
||||
f"'{worker_type_string}'. Please fix!"
|
||||
)
|
||||
|
||||
if worker_type in worker_type_shard_counter:
|
||||
if not is_sharding_allowed_for_worker_type(worker_type):
|
||||
error(
|
||||
f"There can be only a single worker with {worker_type} "
|
||||
"type. Please recount and remove."
|
||||
)
|
||||
# Not in shard counter, must not have seen it yet, add it.
|
||||
worker_type_shard_counter[worker_type] += 1
|
||||
|
||||
# Generate the number for the worker using incrementing counter
|
||||
worker_base_name_counter[worker_base_name] += 1
|
||||
worker_number = worker_base_name_counter[worker_base_name]
|
||||
worker_name = f"{worker_base_name}{worker_number}"
|
||||
|
||||
if worker_number > 1:
|
||||
# If this isn't the first worker, check that we don't have a confusing
|
||||
# mixture of worker types with the same base name.
|
||||
first_worker_with_base_name = dict_to_return[f"{worker_base_name}1"]
|
||||
if first_worker_with_base_name != worker_types_set:
|
||||
error(
|
||||
f"Can not use worker_name: '{worker_name}' for worker_type(s): "
|
||||
f"{worker_types_set!r}. It is already in use by "
|
||||
f"worker_type(s): {first_worker_with_base_name!r}"
|
||||
)
|
||||
|
||||
dict_to_return[worker_name] = worker_types_set
|
||||
|
||||
return dict_to_return
|
||||
|
||||
|
||||
def generate_worker_files(
|
||||
environ: Mapping[str, str],
|
||||
config_path: str,
|
||||
data_dir: str,
|
||||
requested_worker_types: Dict[str, Set[str]],
|
||||
environ: Mapping[str, str], config_path: str, data_dir: str
|
||||
) -> None:
|
||||
"""Read the desired workers(if any) that is passed in and generate shared
|
||||
homeserver, nginx and supervisord configs.
|
||||
"""Read the desired list of workers from environment variables and generate
|
||||
shared homeserver, nginx and supervisord configs.
|
||||
|
||||
Args:
|
||||
environ: os.environ instance.
|
||||
config_path: The location of the generated Synapse main worker config file.
|
||||
data_dir: The location of the synapse data directory. Where log and
|
||||
user-facing config files live.
|
||||
requested_worker_types: A Dict containing requested workers in the format of
|
||||
{'worker_name1': {'worker_type', ...}}
|
||||
"""
|
||||
# Note that yaml cares about indentation, so care should be taken to insert lines
|
||||
# into files at the correct indentation below.
|
||||
|
||||
# First read the original config file and extract the listeners block. Then we'll
|
||||
# add another listener for replication. Later we'll write out the result to the
|
||||
# shared config file.
|
||||
# shared_config is the contents of a Synapse config file that will be shared amongst
|
||||
# the main Synapse process as well as all workers.
|
||||
# It is intended mainly for disabling functionality when certain workers are spun up,
|
||||
# and adding a replication listener.
|
||||
|
||||
# First read the original config file and extract the listeners block. Then we'll add
|
||||
# another listener for replication. Later we'll write out the result to the shared
|
||||
# config file.
|
||||
listeners = [
|
||||
{
|
||||
"port": 9093,
|
||||
@@ -730,9 +427,9 @@ def generate_worker_files(
|
||||
listeners += original_listeners
|
||||
|
||||
# The shared homeserver config. The contents of which will be inserted into the
|
||||
# base shared worker jinja2 template. This config file will be passed to all
|
||||
# workers, included Synapse's main process. It is intended mainly for disabling
|
||||
# functionality when certain workers are spun up, and adding a replication listener.
|
||||
# base shared worker jinja2 template.
|
||||
#
|
||||
# This config file will be passed to all workers, included Synapse's main process.
|
||||
shared_config: Dict[str, Any] = {"listeners": listeners}
|
||||
|
||||
# List of dicts that describe workers.
|
||||
@@ -740,20 +437,31 @@ def generate_worker_files(
|
||||
# program blocks.
|
||||
worker_descriptors: List[Dict[str, Any]] = []
|
||||
|
||||
# Upstreams for load-balancing purposes. This dict takes the form of the worker
|
||||
# type to the ports of each worker. For example:
|
||||
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
|
||||
# ports of each worker. For example:
|
||||
# {
|
||||
# worker_type: {1234, 1235, ...}}
|
||||
# }
|
||||
# and will be used to construct 'upstream' nginx directives.
|
||||
nginx_upstreams: Dict[str, Set[int]] = {}
|
||||
|
||||
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what
|
||||
# will be placed after the proxy_pass directive. The main benefit to representing
|
||||
# this data as a dict over a str is that we can easily deduplicate endpoints
|
||||
# across multiple instances of the same worker. The final rendering will be combined
|
||||
# with nginx_upstreams and placed in /etc/nginx/conf.d.
|
||||
nginx_locations: Dict[str, str] = {}
|
||||
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
|
||||
# placed after the proxy_pass directive. The main benefit to representing this data as a
|
||||
# dict over a str is that we can easily deduplicate endpoints across multiple instances
|
||||
# of the same worker.
|
||||
#
|
||||
# An nginx site config that will be amended to depending on the workers that are
|
||||
# spun up. To be placed in /etc/nginx/conf.d.
|
||||
nginx_locations = {}
|
||||
|
||||
# Read the desired worker configuration from the environment
|
||||
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
|
||||
if not worker_types_env:
|
||||
# No workers, just the main process
|
||||
worker_types = []
|
||||
else:
|
||||
# Split type names by comma, ignoring whitespace.
|
||||
worker_types = [x.strip() for x in worker_types_env.split(",")]
|
||||
|
||||
# Create the worker configuration directory if it doesn't already exist
|
||||
os.makedirs("/conf/workers", exist_ok=True)
|
||||
@@ -761,57 +469,66 @@ def generate_worker_files(
|
||||
# Start worker ports from this arbitrary port
|
||||
worker_port = 18009
|
||||
|
||||
# A counter of worker_type -> int. Used for determining the name for a given
|
||||
# worker type when generating its config file, as each worker's name is just
|
||||
# worker_type + instance #
|
||||
worker_type_counter: Dict[str, int] = {}
|
||||
|
||||
# A list of internal endpoints to healthcheck, starting with the main process
|
||||
# which exists even if no workers do.
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
|
||||
# Get the set of all worker types that we have configured
|
||||
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
|
||||
# Map locations to upstreams (corresponding to worker types) in Nginx
|
||||
# but only if we use the appropriate worker type
|
||||
for worker_type in all_worker_types_in_use:
|
||||
for endpoint_pattern in WORKERS_CONFIG[worker_type]["endpoint_patterns"]:
|
||||
nginx_locations[endpoint_pattern] = f"http://{worker_type}"
|
||||
# For each worker type specified by the user, create config values
|
||||
for worker_type in worker_types:
|
||||
worker_config = WORKERS_CONFIG.get(worker_type)
|
||||
if worker_config:
|
||||
worker_config = worker_config.copy()
|
||||
else:
|
||||
error(worker_type + " is an unknown worker type! Please fix!")
|
||||
|
||||
# For each worker type specified by the user, create config values and write it's
|
||||
# yaml config file
|
||||
for worker_name, worker_types_set in requested_worker_types.items():
|
||||
# The collected and processed data will live here.
|
||||
worker_config: Dict[str, Any] = {}
|
||||
|
||||
# Merge all worker config templates for this worker into a single config
|
||||
for worker_type in worker_types_set:
|
||||
copy_of_template_config = WORKERS_CONFIG[worker_type].copy()
|
||||
|
||||
# Merge worker type template configuration data. It's a combination of lists
|
||||
# and dicts, so use this helper.
|
||||
worker_config = merge_worker_template_configs(
|
||||
worker_config, copy_of_template_config
|
||||
)
|
||||
|
||||
# Replace placeholder names in the config template with the actual worker name.
|
||||
worker_config = insert_worker_name_for_worker_config(worker_config, worker_name)
|
||||
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
|
||||
worker_type_counter[worker_type] = new_worker_count
|
||||
|
||||
# Name workers by their type concatenated with an incrementing number
|
||||
# e.g. federation_reader1
|
||||
worker_name = worker_type + str(new_worker_count)
|
||||
worker_config.update(
|
||||
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
|
||||
)
|
||||
|
||||
# Update the shared config with any worker_type specific options. The first of a
|
||||
# given worker_type needs to stay assigned and not be replaced.
|
||||
worker_config["shared_extra_conf"].update(shared_config)
|
||||
shared_config = worker_config["shared_extra_conf"]
|
||||
# Update the shared config with any worker-type specific options
|
||||
shared_config.update(worker_config["shared_extra_conf"])
|
||||
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
|
||||
# Check if more than one instance of this worker type has been specified
|
||||
worker_type_total_count = worker_types.count(worker_type)
|
||||
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_worker_roles_to_shared_config(
|
||||
shared_config, worker_types_set, worker_name, worker_port
|
||||
shared_config, worker_type, worker_name, worker_port
|
||||
)
|
||||
|
||||
# Enable the worker in supervisord
|
||||
worker_descriptors.append(worker_config)
|
||||
|
||||
# Add nginx location blocks for this worker's endpoints (if any are defined)
|
||||
for pattern in worker_config["endpoint_patterns"]:
|
||||
# Determine whether we need to load-balance this worker
|
||||
if worker_type_total_count > 1:
|
||||
# Create or add to a load-balanced upstream for this worker
|
||||
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
|
||||
|
||||
# Upstreams are named after the worker_type
|
||||
upstream = "http://" + worker_type
|
||||
else:
|
||||
upstream = "http://localhost:%d" % (worker_port,)
|
||||
|
||||
# Note that this endpoint should proxy to this upstream
|
||||
nginx_locations[pattern] = upstream
|
||||
|
||||
# Write out the worker's logging config file
|
||||
|
||||
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
|
||||
|
||||
# Then a worker config file
|
||||
@@ -822,10 +539,6 @@ def generate_worker_files(
|
||||
worker_log_config_filepath=log_config_filepath,
|
||||
)
|
||||
|
||||
# Save this worker's port number to the correct nginx upstreams
|
||||
for worker_type in worker_types_set:
|
||||
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
|
||||
|
||||
worker_port += 1
|
||||
|
||||
# Build the nginx location config blocks
|
||||
@@ -838,14 +551,15 @@ def generate_worker_files(
|
||||
|
||||
# Determine the load-balancing upstreams to configure
|
||||
nginx_upstream_config = ""
|
||||
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
|
||||
|
||||
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
|
||||
body = ""
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server localhost:{port};\n"
|
||||
body += " server localhost:%d;\n" % (port,)
|
||||
|
||||
# Add to the list of configured upstreams
|
||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
||||
upstream_worker_base_name=upstream_worker_base_name,
|
||||
upstream_worker_type=upstream_worker_type,
|
||||
body=body,
|
||||
)
|
||||
|
||||
@@ -866,7 +580,7 @@ def generate_worker_files(
|
||||
if reg_path.suffix.lower() in (".yaml", ".yml")
|
||||
]
|
||||
|
||||
workers_in_use = len(requested_worker_types) > 0
|
||||
workers_in_use = len(worker_types) > 0
|
||||
|
||||
# Shared homeserver config
|
||||
convert(
|
||||
@@ -964,26 +678,13 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
|
||||
generate_base_homeserver_config()
|
||||
else:
|
||||
log("Base homeserver config exists—not regenerating")
|
||||
# This script may be run multiple times (mostly by Complement, see note at top of
|
||||
# file). Don't re-configure workers in this instance.
|
||||
# This script may be run multiple times (mostly by Complement, see note at top of file).
|
||||
# Don't re-configure workers in this instance.
|
||||
mark_filepath = "/conf/workers_have_been_configured"
|
||||
if not os.path.exists(mark_filepath):
|
||||
# Collect and validate worker_type requests
|
||||
# Read the desired worker configuration from the environment
|
||||
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
|
||||
# Only process worker_types if they exist
|
||||
if not worker_types_env:
|
||||
# No workers, just the main process
|
||||
worker_types = []
|
||||
requested_worker_types: Dict[str, Any] = {}
|
||||
else:
|
||||
# Split type names by comma, ignoring whitespace.
|
||||
worker_types = split_and_strip_string(worker_types_env, ",")
|
||||
requested_worker_types = parse_worker_types(worker_types)
|
||||
|
||||
# Always regenerate all other config files
|
||||
log("Generating worker config files")
|
||||
generate_worker_files(environ, config_path, data_dir, requested_worker_types)
|
||||
generate_worker_files(environ, config_path, data_dir)
|
||||
|
||||
# Mark workers as being configured
|
||||
with open(mark_filepath, "w") as f:
|
||||
|
||||
@@ -26,8 +26,8 @@ for most users.
|
||||
#### Docker images and Ansible playbooks
|
||||
|
||||
There is an official synapse image available at
|
||||
<https://hub.docker.com/r/matrixdotorg/synapse> or at [`ghcr.io/matrix-org/synapse`](https://ghcr.io/matrix-org/synapse)
|
||||
which can be used with the docker-compose file available at
|
||||
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
|
||||
the docker-compose file available at
|
||||
[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
|
||||
Further information on this including configuration options is available in the README
|
||||
on hub.docker.com.
|
||||
|
||||
@@ -25,7 +25,7 @@ position of all streams. The server then periodically sends `RDATA` commands
|
||||
which have the format `RDATA <stream_name> <instance_name> <token> <row>`, where
|
||||
the format of `<row>` is defined by the individual streams. The
|
||||
`<instance_name>` is the name of the Synapse process that generated the data
|
||||
(usually "master"). We expect an RDATA for every row in the DB.
|
||||
(usually "master").
|
||||
|
||||
Error reporting happens by either the client or server sending an ERROR
|
||||
command, and usually the connection will be closed.
|
||||
@@ -107,7 +107,7 @@ reconnect, following the steps above.
|
||||
If the server sends messages faster than the client can consume them the
|
||||
server will first buffer a (fairly large) number of commands and then
|
||||
disconnect the client. This ensures that we don't queue up an unbounded
|
||||
number of commands in memory and gives us a potential opportunity to
|
||||
number of commands in memory and gives us a potential oppurtunity to
|
||||
squawk loudly. When/if the client recovers it can reconnect to the
|
||||
server and ask for missed messages.
|
||||
|
||||
@@ -122,7 +122,7 @@ since these include tokens which can be used to restart the stream on
|
||||
connection errors.
|
||||
|
||||
The client should keep track of the token in the last RDATA command
|
||||
received for each stream so that on reconnection it can start streaming
|
||||
received for each stream so that on reconneciton it can start streaming
|
||||
from the correct place. Note: not all RDATA have valid tokens due to
|
||||
batching. See `RdataCommand` for more details.
|
||||
|
||||
@@ -188,8 +188,7 @@ client (C):
|
||||
Two positions are included, the "new" position and the last position sent respectively.
|
||||
This allows servers to tell instances that the positions have advanced but no
|
||||
data has been written, without clients needlessly checking to see if they
|
||||
have missed any updates. Instances will only fetch stuff if there is a gap between
|
||||
their current position and the given last position.
|
||||
have missed any updates.
|
||||
|
||||
#### ERROR (S, C)
|
||||
|
||||
|
||||
@@ -88,34 +88,6 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.81.0
|
||||
|
||||
## Application service path & authentication deprecations
|
||||
|
||||
Synapse now attempts the versioned appservice paths before falling back to the
|
||||
[legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes).
|
||||
Usage of the legacy routes should be considered deprecated.
|
||||
|
||||
Additionally, Synapse has supported sending the application service access token
|
||||
via [the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)
|
||||
since v1.70.0. For backwards compatibility it is *also* sent as the `access_token`
|
||||
query parameter. This is insecure and should be considered deprecated.
|
||||
|
||||
A future version of Synapse (v1.88.0 or later) will remove support for legacy
|
||||
application service routes and query parameter authorization.
|
||||
|
||||
# Upgrading to v1.80.0
|
||||
|
||||
## Reporting events error code change
|
||||
|
||||
Before this update, the
|
||||
[`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid)
|
||||
endpoint would return a `403` if a user attempted to report an event that they did not have access to.
|
||||
This endpoint will now return a `404` in this case instead.
|
||||
|
||||
Clients that implement event reporting should check that their error handling code will handle this
|
||||
change.
|
||||
|
||||
# Upgrading to v1.79.0
|
||||
|
||||
## The `on_threepid_bind` module callback method has been deprecated
|
||||
@@ -199,17 +171,6 @@ Docker images and Debian packages need nothing specific as they already
|
||||
include or specify ICU as an explicit dependency.
|
||||
|
||||
|
||||
## User directory rebuild
|
||||
|
||||
Synapse 1.74 queues a background update
|
||||
[to rebuild the user directory](https://github.com/matrix-org/synapse/pull/14643),
|
||||
in order to fix missing or erroneous entries.
|
||||
|
||||
When this update begins, the user directory will be cleared out and rebuilt from
|
||||
scratch. User directory lookups will be incomplete until the rebuild completes.
|
||||
Admins can monitor the rebuild's progress by using the
|
||||
[Background update Admin API](usage/administration/admin_api/background_updates.md#status).
|
||||
|
||||
# Upgrading to v1.73.0
|
||||
|
||||
## Legacy Prometheus metric names have now been removed
|
||||
|
||||
@@ -1521,7 +1521,7 @@ This option specifies several limits for login:
|
||||
address. Defaults to `per_second: 0.003`, `burst_count: 5`.
|
||||
|
||||
* `account` ratelimits login requests based on the account the
|
||||
client is attempting to log into. Defaults to `per_second: 0.003`,
|
||||
client is attempting to log into. Defaults to `per_second: 0.03`,
|
||||
`burst_count: 5`.
|
||||
|
||||
* `failed_attempts` ratelimits login requests based on the account the
|
||||
@@ -3100,11 +3100,6 @@ Options for each entry include:
|
||||
match a pre-existing account instead of failing. This could be used if
|
||||
switching from password logins to OIDC. Defaults to false.
|
||||
|
||||
* `enable_registration`: set to 'false' to disable automatic registration of new
|
||||
users. This allows the OIDC SSO flow to be limited to sign in only, rather than
|
||||
automatically registering users that have a valid SSO login but do not have
|
||||
a pre-registered account. Defaults to true.
|
||||
|
||||
* `user_mapping_provider`: Configuration for how attributes returned from a OIDC
|
||||
provider are mapped onto a matrix user. This setting has the following
|
||||
sub-properties:
|
||||
@@ -3221,7 +3216,6 @@ oidc_providers:
|
||||
userinfo_endpoint: "https://accounts.example.com/userinfo"
|
||||
jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
|
||||
skip_verification: true
|
||||
enable_registration: true
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
|
||||
@@ -245,9 +245,7 @@ information.
|
||||
# Registration/login requests
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/login$
|
||||
^/_matrix/client/(r0|v3|unstable)/register$
|
||||
^/_matrix/client/(r0|v3|unstable)/register/available$
|
||||
^/_matrix/client/v1/register/m.login.registration_token/validity$
|
||||
^/_matrix/client/(r0|v3|unstable)/password_policy$
|
||||
|
||||
# Event sending requests
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact
|
||||
|
||||
8
mypy.ini
8
mypy.ini
@@ -48,6 +48,9 @@ warn_unused_ignores = False
|
||||
[mypy-synapse.util.caches.treecache]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.storage.database]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-tests.util.caches.test_descriptors]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
@@ -71,6 +74,11 @@ ignore_missing_imports = True
|
||||
[mypy-msgpack]
|
||||
ignore_missing_imports = True
|
||||
|
||||
# Note: WIP stubs available at
|
||||
# https://github.com/microsoft/python-type-stubs/tree/64934207f523ad6b611e6cfe039d85d7175d7d0d/netaddr
|
||||
[mypy-netaddr]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-parameterized.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
|
||||
1998
poetry.lock
generated
1998
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.81.0rc1"
|
||||
version = "1.79.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -153,13 +153,15 @@ python = "^3.7.1"
|
||||
# ----------------------
|
||||
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
|
||||
jsonschema = ">=3.0.0"
|
||||
# We choose 2.0 as a lower bound: the most recent backwards incompatible release.
|
||||
# It seems generally available, judging by https://pkgs.org/search/?q=immutabledict
|
||||
immutabledict = ">=2.0"
|
||||
# frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41
|
||||
# We cannot test our wheels against the 2.3.5 release in CI. Putting in an upper bound for this
|
||||
# because frozendict has been more trouble than it's worth; we would like to move to immutabledict.
|
||||
frozendict = ">=1,!=2.1.2,<2.3.5"
|
||||
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
|
||||
unpaddedbase64 = ">=2.1.0"
|
||||
# We require 2.0.0 for immutabledict support.
|
||||
canonicaljson = "^2.0.0"
|
||||
# We require 1.5.0 to work around an issue when running against the C implementation of
|
||||
# frozendict: https://github.com/matrix-org/python-canonicaljson/issues/36
|
||||
canonicaljson = "^1.5.0"
|
||||
# we use the type definitions added in signedjson 1.1.
|
||||
signedjson = "^1.1.0"
|
||||
# validating SSL certs for IP addresses requires service_identity 18.1.
|
||||
@@ -311,7 +313,7 @@ all = [
|
||||
# We pin black so that our tests don't start failing on new releases.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.3.0"
|
||||
ruff = "0.0.259"
|
||||
ruff = "0.0.252"
|
||||
|
||||
# Typechecking
|
||||
mypy = "*"
|
||||
@@ -319,7 +321,6 @@ mypy-zope = "*"
|
||||
types-bleach = ">=4.1.0"
|
||||
types-commonmark = ">=0.9.2"
|
||||
types-jsonschema = ">=3.2.0"
|
||||
types-netaddr = ">=0.8.0.6"
|
||||
types-opentracing = ">=2.4.2"
|
||||
types-Pillow = ">=8.3.4"
|
||||
types-psycopg2 = ">=2.9.9"
|
||||
@@ -350,18 +351,6 @@ towncrier = ">=18.6.0rc1"
|
||||
# Used for checking the Poetry lockfile
|
||||
tomli = ">=1.2.3"
|
||||
|
||||
|
||||
# Dependencies for building the development documentation
|
||||
[tool.poetry.group.dev-docs]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.dev-docs.dependencies]
|
||||
sphinx = {version = "^6.1", python = "^3.8"}
|
||||
sphinx-autodoc2 = {version = "^0.4.2", python = "^3.8"}
|
||||
myst-parser = {version = "^1.0.0", python = "^3.8"}
|
||||
furo = ">=2022.12.7,<2024.0.0"
|
||||
|
||||
|
||||
[build-system]
|
||||
# The upper bounds here are defensive, intended to prevent situations like
|
||||
# #13849 and #14079 where we see buildtime or runtime errors caused by build
|
||||
|
||||
@@ -28,7 +28,6 @@ DISTS = (
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
|
||||
@@ -91,7 +91,6 @@ else
|
||||
"synapse" "docker" "tests"
|
||||
"scripts-dev"
|
||||
"contrib" "synmark" "stubs" ".ci"
|
||||
"dev-docs"
|
||||
)
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -280,7 +280,7 @@ def _prepare() -> None:
|
||||
)
|
||||
|
||||
print("Opening the changelog in your browser...")
|
||||
print("Please ask #synapse-dev to give it a check.")
|
||||
print("Please ask others to give it a check.")
|
||||
click.launch(
|
||||
f"https://github.com/matrix-org/synapse/blob/{synapse_repo.active_branch.name}/CHANGES.md"
|
||||
)
|
||||
|
||||
39
stubs/frozendict.pyi
Normal file
39
stubs/frozendict.pyi
Normal file
@@ -0,0 +1,39 @@
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Stub for frozendict.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Hashable, Iterable, Iterator, Mapping, Tuple, TypeVar, overload
|
||||
|
||||
_KT = TypeVar("_KT", bound=Hashable) # Key type.
|
||||
_VT = TypeVar("_VT") # Value type.
|
||||
|
||||
class frozendict(Mapping[_KT, _VT]):
|
||||
@overload
|
||||
def __init__(self, **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def __init__(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def __init__(
|
||||
self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT
|
||||
) -> None: ...
|
||||
def __getitem__(self, key: _KT) -> _VT: ...
|
||||
def __contains__(self, key: Any) -> bool: ...
|
||||
def copy(self, **add_or_replace: Any) -> frozendict: ...
|
||||
def __iter__(self) -> Iterator[_KT]: ...
|
||||
def __len__(self) -> int: ...
|
||||
def __repr__(self) -> str: ...
|
||||
def __hash__(self) -> int: ...
|
||||
@@ -17,9 +17,9 @@
|
||||
""" This is an implementation of a Matrix homeserver.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from typing import Any, Dict
|
||||
|
||||
from synapse.util.rust import check_rust_lib_up_to_date
|
||||
from synapse.util.stringutils import strtobool
|
||||
@@ -61,20 +61,11 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Teach canonicaljson how to serialise immutabledicts.
|
||||
# Use the standard library json implementation instead of simplejson.
|
||||
try:
|
||||
from canonicaljson import register_preserialisation_callback
|
||||
from immutabledict import immutabledict
|
||||
from canonicaljson import set_json_library
|
||||
|
||||
def _immutabledict_cb(d: immutabledict) -> Dict[str, Any]:
|
||||
try:
|
||||
return d._dict
|
||||
except Exception:
|
||||
# Paranoia: fall back to a `dict()` call, in case a future version of
|
||||
# immutabledict removes `_dict` from the implementation.
|
||||
return dict(d)
|
||||
|
||||
register_preserialisation_callback(immutabledict, _immutabledict_cb)
|
||||
set_json_library(json)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
@@ -1,302 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2022-2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, Iterable, Optional, Pattern, Set, Tuple
|
||||
|
||||
import yaml
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.federation.transport.server import (
|
||||
TransportLayerServer,
|
||||
register_servlets as register_federation_servlets,
|
||||
)
|
||||
from synapse.http.server import HttpServer, ServletCallback
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.key.v2 import RemoteKey
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore
|
||||
|
||||
logger = logging.getLogger("generate_workers_map")
|
||||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
|
||||
def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None:
|
||||
super().__init__(config.server.server_name, config=config)
|
||||
self.config.worker.worker_app = worker_app
|
||||
|
||||
|
||||
GROUP_PATTERN = re.compile(r"\(\?P<[^>]+?>(.+?)\)")
|
||||
|
||||
|
||||
@dataclass
|
||||
class EndpointDescription:
|
||||
"""
|
||||
Describes an endpoint and how it should be routed.
|
||||
"""
|
||||
|
||||
# The servlet class that handles this endpoint
|
||||
servlet_class: object
|
||||
|
||||
# The category of this endpoint. Is read from the `CATEGORY` constant in the servlet
|
||||
# class.
|
||||
category: Optional[str]
|
||||
|
||||
# TODO:
|
||||
# - does it need to be routed based on a stream writer config?
|
||||
# - does it benefit from any optimised, but optional, routing?
|
||||
# - what 'opinionated synapse worker class' (event_creator, synchrotron, etc) does
|
||||
# it go in?
|
||||
|
||||
|
||||
class EnumerationResource(HttpServer):
|
||||
"""
|
||||
Accepts servlet registrations for the purposes of building up a description of
|
||||
all endpoints.
|
||||
"""
|
||||
|
||||
def __init__(self, is_worker: bool) -> None:
|
||||
self.registrations: Dict[Tuple[str, str], EndpointDescription] = {}
|
||||
self._is_worker = is_worker
|
||||
|
||||
def register_paths(
|
||||
self,
|
||||
method: str,
|
||||
path_patterns: Iterable[Pattern],
|
||||
callback: ServletCallback,
|
||||
servlet_classname: str,
|
||||
) -> None:
|
||||
# federation servlet callbacks are wrapped, so unwrap them.
|
||||
callback = getattr(callback, "__wrapped__", callback)
|
||||
|
||||
# fish out the servlet class
|
||||
servlet_class = callback.__self__.__class__ # type: ignore
|
||||
|
||||
if self._is_worker and method in getattr(
|
||||
servlet_class, "WORKERS_DENIED_METHODS", ()
|
||||
):
|
||||
# This endpoint would cause an error if called on a worker, so pretend it
|
||||
# was never registered!
|
||||
return
|
||||
|
||||
sd = EndpointDescription(
|
||||
servlet_class=servlet_class,
|
||||
category=getattr(servlet_class, "CATEGORY", None),
|
||||
)
|
||||
|
||||
for pat in path_patterns:
|
||||
self.registrations[(method, pat.pattern)] = sd
|
||||
|
||||
|
||||
def get_registered_paths_for_hs(
|
||||
hs: HomeServer,
|
||||
) -> Dict[Tuple[str, str], EndpointDescription]:
|
||||
"""
|
||||
Given a homeserver, get all registered endpoints and their descriptions.
|
||||
"""
|
||||
|
||||
enumerator = EnumerationResource(is_worker=hs.config.worker.worker_app is not None)
|
||||
ClientRestResource.register_servlets(enumerator, hs)
|
||||
federation_server = TransportLayerServer(hs)
|
||||
|
||||
# we can't use `federation_server.register_servlets` but this line does the
|
||||
# same thing, only it uses this enumerator
|
||||
register_federation_servlets(
|
||||
federation_server.hs,
|
||||
resource=enumerator,
|
||||
ratelimiter=federation_server.ratelimiter,
|
||||
authenticator=federation_server.authenticator,
|
||||
servlet_groups=federation_server.servlet_groups,
|
||||
)
|
||||
|
||||
# the key server endpoints are separate again
|
||||
RemoteKey(hs).register(enumerator)
|
||||
|
||||
return enumerator.registrations
|
||||
|
||||
|
||||
def get_registered_paths_for_default(
|
||||
worker_app: Optional[str], base_config: HomeServerConfig
|
||||
) -> Dict[Tuple[str, str], EndpointDescription]:
|
||||
"""
|
||||
Given the name of a worker application and a base homeserver configuration,
|
||||
returns:
|
||||
|
||||
Dict from (method, path) to EndpointDescription
|
||||
|
||||
TODO Don't require passing in a config
|
||||
"""
|
||||
|
||||
hs = MockHomeserver(base_config, worker_app)
|
||||
# TODO We only do this to avoid an error, but don't need the database etc
|
||||
hs.setup()
|
||||
return get_registered_paths_for_hs(hs)
|
||||
|
||||
|
||||
def elide_http_methods_if_unconflicting(
|
||||
registrations: Dict[Tuple[str, str], EndpointDescription],
|
||||
all_possible_registrations: Dict[Tuple[str, str], EndpointDescription],
|
||||
) -> Dict[Tuple[str, str], EndpointDescription]:
|
||||
"""
|
||||
Elides HTTP methods (by replacing them with `*`) if all possible registered methods
|
||||
can be handled by the worker whose registration map is `registrations`.
|
||||
|
||||
i.e. the only endpoints left with methods (other than `*`) should be the ones where
|
||||
the worker can't handle all possible methods for that path.
|
||||
"""
|
||||
|
||||
def paths_to_methods_dict(
|
||||
methods_and_paths: Iterable[Tuple[str, str]]
|
||||
) -> Dict[str, Set[str]]:
|
||||
"""
|
||||
Given (method, path) pairs, produces a dict from path to set of methods
|
||||
available at that path.
|
||||
"""
|
||||
result: Dict[str, Set[str]] = {}
|
||||
for method, path in methods_and_paths:
|
||||
result.setdefault(path, set()).add(method)
|
||||
return result
|
||||
|
||||
all_possible_reg_methods = paths_to_methods_dict(all_possible_registrations)
|
||||
reg_methods = paths_to_methods_dict(registrations)
|
||||
|
||||
output = {}
|
||||
|
||||
for path, handleable_methods in reg_methods.items():
|
||||
if handleable_methods == all_possible_reg_methods[path]:
|
||||
any_method = next(iter(handleable_methods))
|
||||
# TODO This assumes that all methods have the same servlet.
|
||||
# I suppose that's possibly dubious?
|
||||
output[("*", path)] = registrations[(any_method, path)]
|
||||
else:
|
||||
for method in handleable_methods:
|
||||
output[(method, path)] = registrations[(method, path)]
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def simplify_path_regexes(
|
||||
registrations: Dict[Tuple[str, str], EndpointDescription]
|
||||
) -> Dict[Tuple[str, str], EndpointDescription]:
|
||||
"""
|
||||
Simplify all the path regexes for the dict of endpoint descriptions,
|
||||
so that we don't use the Python-specific regex extensions
|
||||
(and also to remove needlessly specific detail).
|
||||
"""
|
||||
|
||||
def simplify_path_regex(path: str) -> str:
|
||||
"""
|
||||
Given a regex pattern, replaces all named capturing groups (e.g. `(?P<blah>xyz)`)
|
||||
with a simpler version available in more common regex dialects (e.g. `.*`).
|
||||
"""
|
||||
|
||||
# TODO it's hard to choose between these two;
|
||||
# `.*` is a vague simplification
|
||||
# return GROUP_PATTERN.sub(r"\1", path)
|
||||
return GROUP_PATTERN.sub(r".*", path)
|
||||
|
||||
return {(m, simplify_path_regex(p)): v for (m, p), v in registrations.items()}
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Updates a synapse database to the latest schema and optionally runs background updates"
|
||||
" on it."
|
||||
)
|
||||
)
|
||||
parser.add_argument("-v", action="store_true")
|
||||
parser.add_argument(
|
||||
"--config-path",
|
||||
type=argparse.FileType("r"),
|
||||
required=True,
|
||||
help="Synapse configuration file",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# TODO
|
||||
# logging.basicConfig(**logging_config)
|
||||
|
||||
# Load, process and sanity-check the config.
|
||||
hs_config = yaml.safe_load(args.config_path)
|
||||
|
||||
config = HomeServerConfig()
|
||||
config.parse_config_dict(hs_config, "", "")
|
||||
|
||||
master_paths = get_registered_paths_for_default(None, config)
|
||||
worker_paths = get_registered_paths_for_default(
|
||||
"synapse.app.generic_worker", config
|
||||
)
|
||||
|
||||
all_paths = {**master_paths, **worker_paths}
|
||||
|
||||
elided_worker_paths = elide_http_methods_if_unconflicting(worker_paths, all_paths)
|
||||
elide_http_methods_if_unconflicting(master_paths, all_paths)
|
||||
|
||||
# TODO SSO endpoints (pick_idp etc) NOT REGISTERED BY THIS SCRIPT
|
||||
|
||||
categories_to_methods_and_paths: Dict[
|
||||
Optional[str], Dict[Tuple[str, str], EndpointDescription]
|
||||
] = defaultdict(dict)
|
||||
|
||||
for (method, path), desc in elided_worker_paths.items():
|
||||
categories_to_methods_and_paths[desc.category][method, path] = desc
|
||||
|
||||
for category, contents in categories_to_methods_and_paths.items():
|
||||
print_category(category, contents)
|
||||
|
||||
|
||||
def print_category(
|
||||
category_name: Optional[str],
|
||||
elided_worker_paths: Dict[Tuple[str, str], EndpointDescription],
|
||||
) -> None:
|
||||
"""
|
||||
Prints out a category, in documentation page style.
|
||||
|
||||
Example:
|
||||
```
|
||||
# Category name
|
||||
/path/xyz
|
||||
|
||||
GET /path/abc
|
||||
```
|
||||
"""
|
||||
|
||||
if category_name:
|
||||
print(f"# {category_name}")
|
||||
else:
|
||||
print("# (Uncategorised requests)")
|
||||
|
||||
for ln in sorted(
|
||||
p for m, p in simplify_path_regexes(elided_worker_paths) if m == "*"
|
||||
):
|
||||
print(ln)
|
||||
print()
|
||||
for ln in sorted(
|
||||
f"{m:6} {p}" for m, p in simplify_path_regexes(elided_worker_paths) if m != "*"
|
||||
):
|
||||
print(ln)
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -18,7 +18,6 @@
|
||||
import argparse
|
||||
import curses
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
@@ -59,7 +58,6 @@ from synapse.storage.databases.main.account_data import AccountDataWorkerStore
|
||||
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore
|
||||
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore
|
||||
from synapse.storage.databases.main.event_push_actions import EventPushActionsStore
|
||||
from synapse.storage.databases.main.events_bg_updates import (
|
||||
@@ -69,10 +67,7 @@ from synapse.storage.databases.main.media_repository import (
|
||||
MediaRepositoryBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.pusher import (
|
||||
PusherBackgroundUpdatesStore,
|
||||
PusherWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.pusher import PusherWorkerStore
|
||||
from synapse.storage.databases.main.receipts import ReceiptsBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.registration import (
|
||||
RegistrationBackgroundUpdateStore,
|
||||
@@ -226,12 +221,10 @@ class Store(
|
||||
MainStateBackgroundUpdateStore,
|
||||
UserDirectoryBackgroundUpdateStore,
|
||||
EndToEndKeyBackgroundStore,
|
||||
EndToEndRoomKeyBackgroundStore,
|
||||
StatsStore,
|
||||
AccountDataWorkerStore,
|
||||
PushRuleStore,
|
||||
PusherWorkerStore,
|
||||
PusherBackgroundUpdatesStore,
|
||||
PresenceBackgroundUpdateStore,
|
||||
ReceiptsBackgroundUpdateStore,
|
||||
RelationsWorkerStore,
|
||||
@@ -1333,17 +1326,10 @@ def main() -> None:
|
||||
filename="port-synapse.log" if args.curses else None,
|
||||
)
|
||||
|
||||
if not os.path.isfile(args.sqlite_database):
|
||||
sys.stderr.write(
|
||||
"The sqlite database you specified does not exist, please check that you have the"
|
||||
"correct path."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
sqlite_config = {
|
||||
"name": "sqlite3",
|
||||
"args": {
|
||||
"database": args.sqlite_database,
|
||||
"database": "file:{}?mode=rw".format(args.sqlite_database),
|
||||
"cp_min": 1,
|
||||
"cp_max": 1,
|
||||
"check_same_thread": False,
|
||||
|
||||
@@ -27,7 +27,7 @@ from synapse.util import json_decoder
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.types import JsonDict, StrCollection
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -108,11 +108,6 @@ class Codes(str, Enum):
|
||||
|
||||
USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL"
|
||||
|
||||
AS_PING_URL_NOT_SET = "FI.MAU.MSC2659_URL_NOT_SET"
|
||||
AS_PING_BAD_STATUS = "FI.MAU.MSC2659_BAD_STATUS"
|
||||
AS_PING_CONNECTION_TIMEOUT = "FI.MAU.MSC2659_CONNECTION_TIMEOUT"
|
||||
AS_PING_CONNECTION_FAILED = "FI.MAU.MSC2659_CONNECTION_FAILED"
|
||||
|
||||
# Attempt to send a second annotation with the same event type & annotation key
|
||||
# MSC2677
|
||||
DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION"
|
||||
@@ -682,27 +677,18 @@ class FederationPullAttemptBackoffError(RuntimeError):
|
||||
Attributes:
|
||||
event_id: The event_id which we are refusing to pull
|
||||
message: A custom error message that gives more context
|
||||
retry_after_ms: The remaining backoff interval, in milliseconds
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, event_ids: "StrCollection", message: Optional[str], retry_after_ms: int
|
||||
):
|
||||
event_ids = list(event_ids)
|
||||
def __init__(self, event_ids: List[str], message: Optional[str]):
|
||||
self.event_ids = event_ids
|
||||
|
||||
if message:
|
||||
error_message = message
|
||||
else:
|
||||
error_message = (
|
||||
f"Not attempting to pull event_ids={event_ids} because we already "
|
||||
"tried to pull them recently (backing off)."
|
||||
)
|
||||
error_message = f"Not attempting to pull event_ids={self.event_ids} because we already tried to pull them recently (backing off)."
|
||||
|
||||
super().__init__(error_message)
|
||||
|
||||
self.event_ids = event_ids
|
||||
self.retry_after_ms = retry_after_ms
|
||||
|
||||
|
||||
class HttpResponseException(CodeMessageException):
|
||||
"""
|
||||
|
||||
@@ -41,12 +41,7 @@ from typing_extensions import ParamSpec
|
||||
|
||||
import twisted
|
||||
from twisted.internet import defer, error, reactor as _reactor
|
||||
from twisted.internet.interfaces import (
|
||||
IOpenSSLContextFactory,
|
||||
IReactorSSL,
|
||||
IReactorTCP,
|
||||
IReactorUNIX,
|
||||
)
|
||||
from twisted.internet.interfaces import IOpenSSLContextFactory, IReactorSSL, IReactorTCP
|
||||
from twisted.internet.protocol import ServerFactory
|
||||
from twisted.internet.tcp import Port
|
||||
from twisted.logger import LoggingFile, LogLevel
|
||||
@@ -61,11 +56,8 @@ from synapse.app.phone_stats_home import start_phone_stats_home
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config._base import format_config_error
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ListenerConfig, ManholeConfig, TCPListenerConfig
|
||||
from synapse.config.server import ListenerConfig, ManholeConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.events.presence_router import load_legacy_presence_router
|
||||
from synapse.events.spamcheck import load_legacy_spam_checkers
|
||||
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
|
||||
from synapse.handlers.auth import load_legacy_password_auth_providers
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
@@ -73,6 +65,15 @@ from synapse.logging.opentracing import init_tracer
|
||||
from synapse.metrics import install_gc_manager, register_threadpool
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.metrics.jemalloc import setup_jemalloc_stats
|
||||
from synapse.module_api.callbacks.presence_router_callbacks import (
|
||||
load_legacy_presence_router,
|
||||
)
|
||||
from synapse.module_api.callbacks.spam_checker_callbacks import (
|
||||
load_legacy_spam_checkers,
|
||||
)
|
||||
from synapse.module_api.callbacks.third_party_event_rules_callbacks import (
|
||||
load_legacy_third_party_event_rules,
|
||||
)
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.caches.lrucache import setup_expire_lru_cache_entries
|
||||
@@ -356,28 +357,6 @@ def listen_tcp(
|
||||
return r # type: ignore[return-value]
|
||||
|
||||
|
||||
def listen_unix(
|
||||
path: str,
|
||||
mode: int,
|
||||
factory: ServerFactory,
|
||||
reactor: IReactorUNIX = reactor,
|
||||
backlog: int = 50,
|
||||
) -> List[Port]:
|
||||
"""
|
||||
Create a UNIX socket for a given path and 'mode' permission
|
||||
|
||||
Returns:
|
||||
list of twisted.internet.tcp.Port listening for TCP connections
|
||||
"""
|
||||
wantPID = True
|
||||
|
||||
return [
|
||||
# IReactorUNIX returns an object implementing IListeningPort from listenUNIX,
|
||||
# but we know it will be a Port instance.
|
||||
cast(Port, reactor.listenUNIX(path, factory, backlog, mode, wantPID))
|
||||
]
|
||||
|
||||
|
||||
def listen_http(
|
||||
listener_config: ListenerConfig,
|
||||
root_resource: Resource,
|
||||
@@ -386,13 +365,18 @@ def listen_http(
|
||||
context_factory: Optional[IOpenSSLContextFactory],
|
||||
reactor: ISynapseReactor = reactor,
|
||||
) -> List[Port]:
|
||||
port = listener_config.port
|
||||
bind_addresses = listener_config.bind_addresses
|
||||
tls = listener_config.tls
|
||||
|
||||
assert listener_config.http_options is not None
|
||||
|
||||
site_tag = listener_config.get_site_tag()
|
||||
site_tag = listener_config.http_options.tag
|
||||
if site_tag is None:
|
||||
site_tag = str(port)
|
||||
|
||||
site = SynapseSite(
|
||||
"synapse.access.%s.%s"
|
||||
% ("https" if listener_config.is_tls() else "http", site_tag),
|
||||
"synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
@@ -400,41 +384,25 @@ def listen_http(
|
||||
max_request_body_size=max_request_body_size,
|
||||
reactor=reactor,
|
||||
)
|
||||
|
||||
if isinstance(listener_config, TCPListenerConfig):
|
||||
if listener_config.is_tls():
|
||||
# refresh_certificate should have been called before this.
|
||||
assert context_factory is not None
|
||||
ports = listen_ssl(
|
||||
listener_config.bind_addresses,
|
||||
listener_config.port,
|
||||
site,
|
||||
context_factory,
|
||||
reactor=reactor,
|
||||
)
|
||||
logger.info(
|
||||
"Synapse now listening on TCP port %d (TLS)", listener_config.port
|
||||
)
|
||||
else:
|
||||
ports = listen_tcp(
|
||||
listener_config.bind_addresses,
|
||||
listener_config.port,
|
||||
site,
|
||||
reactor=reactor,
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d", listener_config.port)
|
||||
|
||||
if tls:
|
||||
# refresh_certificate should have been called before this.
|
||||
assert context_factory is not None
|
||||
ports = listen_ssl(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
context_factory,
|
||||
reactor=reactor,
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d (TLS)", port)
|
||||
else:
|
||||
ports = listen_unix(
|
||||
listener_config.path, listener_config.mode, site, reactor=reactor
|
||||
ports = listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
reactor=reactor,
|
||||
)
|
||||
# getHost() returns a UNIXAddress which contains an instance variable of 'name'
|
||||
# encoded as a byte string. Decode as utf-8 so pretty.
|
||||
logger.info(
|
||||
"Synapse now listening on Unix Socket at: "
|
||||
f"{ports[0].getHost().name.decode('utf-8')}"
|
||||
)
|
||||
|
||||
logger.info("Synapse now listening on TCP port %d", port)
|
||||
return ports
|
||||
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ from synapse.app._base import (
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.server import ListenerConfig, TCPListenerConfig
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.server import JsonResource, OptionsResource
|
||||
from synapse.logging.context import LoggingContext
|
||||
@@ -236,18 +236,12 @@ class GenericWorkerServer(HomeServer):
|
||||
if listener.type == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener.type == "manhole":
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
"Can not using a unix socket for manhole at this time."
|
||||
)
|
||||
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
)
|
||||
elif listener.type == "metrics":
|
||||
if not self.config.metrics.enable_metrics:
|
||||
logger.warning(
|
||||
@@ -255,16 +249,10 @@ class GenericWorkerServer(HomeServer):
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
else:
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
"Can not use a unix socket for metrics at this time."
|
||||
)
|
||||
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
)
|
||||
else:
|
||||
logger.warning("Unsupported listener type: %s", listener.type)
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ from synapse.app._base import (
|
||||
)
|
||||
from synapse.config._base import ConfigError, format_config_error
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ListenerConfig, TCPListenerConfig
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.additional_resource import AdditionalResource
|
||||
from synapse.http.server import (
|
||||
@@ -78,13 +78,14 @@ class SynapseHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
|
||||
def _listener_http(
|
||||
self,
|
||||
config: HomeServerConfig,
|
||||
listener_config: ListenerConfig,
|
||||
self, config: HomeServerConfig, listener_config: ListenerConfig
|
||||
) -> Iterable[Port]:
|
||||
port = listener_config.port
|
||||
# Must exist since this is an HTTP listener.
|
||||
assert listener_config.http_options is not None
|
||||
site_tag = listener_config.get_site_tag()
|
||||
site_tag = listener_config.http_options.tag
|
||||
if site_tag is None:
|
||||
site_tag = str(port)
|
||||
|
||||
# We always include a health resource.
|
||||
resources: Dict[str, Resource] = {"/health": HealthResource()}
|
||||
@@ -251,17 +252,12 @@ class SynapseHomeServer(HomeServer):
|
||||
self._listener_http(self.config, listener)
|
||||
)
|
||||
elif listener.type == "manhole":
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
"Can not use a unix socket for manhole at this time."
|
||||
)
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
)
|
||||
elif listener.type == "metrics":
|
||||
if not self.config.metrics.enable_metrics:
|
||||
logger.warning(
|
||||
@@ -269,16 +265,10 @@ class SynapseHomeServer(HomeServer):
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
else:
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
"Can not use a unix socket for metrics at this time."
|
||||
)
|
||||
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
)
|
||||
else:
|
||||
# this shouldn't happen, as the listener type should have been checked
|
||||
# during parsing
|
||||
|
||||
@@ -17,8 +17,6 @@ import urllib.parse
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
@@ -26,14 +24,13 @@ from typing import (
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
from typing_extensions import Concatenate, ParamSpec, TypeGuard
|
||||
from typing_extensions import TypeGuard
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException, HttpResponseException
|
||||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.appservice import (
|
||||
ApplicationService,
|
||||
TransactionOneTimeKeysCount,
|
||||
@@ -41,7 +38,7 @@ from synapse.appservice import (
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import SerializeEventConfig, serialize_event
|
||||
from synapse.http.client import SimpleHttpClient, is_unknown_endpoint
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.types import DeviceListUpdates, JsonDict, ThirdPartyInstanceID
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
|
||||
@@ -81,11 +78,7 @@ sent_todevice_counter = Counter(
|
||||
HOUR_IN_MS = 60 * 60 * 1000
|
||||
|
||||
|
||||
APP_SERVICE_PREFIX = "/_matrix/app/v1"
|
||||
APP_SERVICE_UNSTABLE_PREFIX = "/_matrix/app/unstable"
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
APP_SERVICE_PREFIX = "/_matrix/app/unstable"
|
||||
|
||||
|
||||
def _is_valid_3pe_metadata(info: JsonDict) -> bool:
|
||||
@@ -128,47 +121,6 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
)
|
||||
|
||||
async def _send_with_fallbacks(
|
||||
self,
|
||||
service: "ApplicationService",
|
||||
prefixes: List[str],
|
||||
path: str,
|
||||
func: Callable[Concatenate[str, P], Awaitable[R]],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> R:
|
||||
"""
|
||||
Attempt to call an application service with multiple paths, falling back
|
||||
until one succeeds.
|
||||
|
||||
Args:
|
||||
service: The appliacation service, this provides the base URL.
|
||||
prefixes: A last of paths to try in order for the requests.
|
||||
path: A suffix to append to each prefix.
|
||||
func: The function to call, the first argument will be the full
|
||||
endpoint to fetch. Other arguments are provided by args/kwargs.
|
||||
|
||||
Returns:
|
||||
The return value of func.
|
||||
"""
|
||||
for i, prefix in enumerate(prefixes, start=1):
|
||||
uri = f"{service.url}{prefix}{path}"
|
||||
try:
|
||||
return await func(uri, *args, **kwargs)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised path,
|
||||
# fallback to next path (if one exists). Otherwise, consider it
|
||||
# a legitimate error and raise.
|
||||
if i < len(prefixes) and is_unknown_endpoint(e):
|
||||
continue
|
||||
raise
|
||||
except Exception:
|
||||
# Unexpected exceptions get sent to the caller.
|
||||
raise
|
||||
|
||||
# The function should always exit via the return or raise above this.
|
||||
raise RuntimeError("Unexpected fallback behaviour. This should never be seen.")
|
||||
|
||||
async def query_user(self, service: "ApplicationService", user_id: str) -> bool:
|
||||
if service.url is None:
|
||||
return False
|
||||
@@ -176,12 +128,10 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
|
||||
try:
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/users/{urllib.parse.quote(user_id)}",
|
||||
self.get_json,
|
||||
response = await self.get_json(
|
||||
uri,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
@@ -190,9 +140,9 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
except CodeMessageException as e:
|
||||
if e.code == 404:
|
||||
return False
|
||||
logger.warning("query_user to %s received %s", service.url, e.code)
|
||||
logger.warning("query_user to %s received %s", uri, e.code)
|
||||
except Exception as ex:
|
||||
logger.warning("query_user to %s threw exception %s", service.url, ex)
|
||||
logger.warning("query_user to %s threw exception %s", uri, ex)
|
||||
return False
|
||||
|
||||
async def query_alias(self, service: "ApplicationService", alias: str) -> bool:
|
||||
@@ -202,23 +152,21 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
|
||||
try:
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/rooms/{urllib.parse.quote(alias)}",
|
||||
self.get_json,
|
||||
response = await self.get_json(
|
||||
uri,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
except CodeMessageException as e:
|
||||
logger.warning("query_alias to %s received %s", service.url, e.code)
|
||||
logger.warning("query_alias to %s received %s", uri, e.code)
|
||||
if e.code == 404:
|
||||
return False
|
||||
except Exception as ex:
|
||||
logger.warning("query_alias to %s threw exception %s", service.url, ex)
|
||||
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
||||
return False
|
||||
|
||||
async def query_3pe(
|
||||
@@ -240,24 +188,25 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
uri = "%s%s/thirdparty/%s/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
kind,
|
||||
urllib.parse.quote(protocol),
|
||||
)
|
||||
try:
|
||||
args: Mapping[Any, Any] = {
|
||||
**fields,
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
|
||||
f"/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
|
||||
self.get_json,
|
||||
response = await self.get_json(
|
||||
uri,
|
||||
args=args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if not isinstance(response, list):
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid response %r",
|
||||
service.url,
|
||||
response,
|
||||
"query_3pe to %s returned an invalid response %r", uri, response
|
||||
)
|
||||
return []
|
||||
|
||||
@@ -267,12 +216,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
ret.append(r)
|
||||
else:
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid result %r", service.url, r
|
||||
"query_3pe to %s returned an invalid result %r", uri, r
|
||||
)
|
||||
|
||||
return ret
|
||||
except Exception as ex:
|
||||
logger.warning("query_3pe to %s threw exception %s", service.url, ex)
|
||||
logger.warning("query_3pe to %s threw exception %s", uri, ex)
|
||||
return []
|
||||
|
||||
async def get_3pe_protocol(
|
||||
@@ -284,20 +233,21 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
async def _get() -> Optional[JsonDict]:
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
uri = "%s%s/thirdparty/protocol/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
urllib.parse.quote(protocol),
|
||||
)
|
||||
try:
|
||||
info = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
|
||||
f"/thirdparty/protocol/{urllib.parse.quote(protocol)}",
|
||||
self.get_json,
|
||||
info = await self.get_json(
|
||||
uri,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
|
||||
if not _is_valid_3pe_metadata(info):
|
||||
logger.warning(
|
||||
"query_3pe_protocol to %s did not return a valid result",
|
||||
service.url,
|
||||
"query_3pe_protocol to %s did not return a valid result", uri
|
||||
)
|
||||
return None
|
||||
|
||||
@@ -310,27 +260,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
return info
|
||||
except Exception as ex:
|
||||
logger.warning(
|
||||
"query_3pe_protocol to %s threw exception %s", service.url, ex
|
||||
)
|
||||
logger.warning("query_3pe_protocol to %s threw exception %s", uri, ex)
|
||||
return None
|
||||
|
||||
key = (service.id, protocol)
|
||||
return await self.protocol_meta_cache.wrap(key, _get)
|
||||
|
||||
async def ping(self, service: "ApplicationService", txn_id: Optional[str]) -> None:
|
||||
# The caller should check that url is set
|
||||
assert service.url is not None, "ping called without URL being set"
|
||||
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
await self.post_json_get_json(
|
||||
uri=f"{service.url}{APP_SERVICE_UNSTABLE_PREFIX}/fi.mau.msc2659/ping",
|
||||
post_json={"transaction_id": txn_id},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
|
||||
async def push_bulk(
|
||||
self,
|
||||
service: "ApplicationService",
|
||||
@@ -370,6 +305,8 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
)
|
||||
txn_id = 0
|
||||
|
||||
uri = service.url + ("/transactions/%s" % urllib.parse.quote(str(txn_id)))
|
||||
|
||||
# Never send ephemeral events to appservices that do not support it
|
||||
body: JsonDict = {"events": serialized_events}
|
||||
if service.supports_ephemeral:
|
||||
@@ -401,11 +338,8 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
}
|
||||
|
||||
try:
|
||||
await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/transactions/{urllib.parse.quote(str(txn_id))}",
|
||||
self.put_json,
|
||||
await self.put_json(
|
||||
uri=uri,
|
||||
json_body=body,
|
||||
args={"access_token": service.hs_token},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
@@ -413,7 +347,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
"push_bulk to %s succeeded! events=%s",
|
||||
service.url,
|
||||
uri,
|
||||
[event.get("event_id") for event in events],
|
||||
)
|
||||
sent_transactions_counter.labels(service.id).inc()
|
||||
@@ -424,7 +358,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
except CodeMessageException as e:
|
||||
logger.warning(
|
||||
"push_bulk to %s received code=%s msg=%s",
|
||||
service.url,
|
||||
uri,
|
||||
e.code,
|
||||
e.msg,
|
||||
exc_info=logger.isEnabledFor(logging.DEBUG),
|
||||
@@ -432,7 +366,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
except Exception as ex:
|
||||
logger.warning(
|
||||
"push_bulk to %s threw exception(%s) %s args=%s",
|
||||
service.url,
|
||||
uri,
|
||||
type(ex).__name__,
|
||||
ex,
|
||||
ex.args,
|
||||
@@ -441,108 +375,6 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
failed_transactions_counter.labels(service.id).inc()
|
||||
return False
|
||||
|
||||
async def claim_client_keys(
|
||||
self, service: "ApplicationService", query: List[Tuple[str, str, str]]
|
||||
) -> Tuple[Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str]]]:
|
||||
"""Claim one time keys from an application service.
|
||||
|
||||
Note that any error (including a timeout) is treated as the application
|
||||
service having no information.
|
||||
|
||||
Args:
|
||||
service: The application service to query.
|
||||
query: An iterable of tuples of (user ID, device ID, algorithm).
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
A map of user ID -> a map device ID -> a map of key ID -> JSON dict.
|
||||
|
||||
A copy of the input which has not been fulfilled because the
|
||||
appservice doesn't support this endpoint or has not returned
|
||||
data for that tuple.
|
||||
"""
|
||||
if service.url is None:
|
||||
return {}, query
|
||||
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
# Create the expected payload shape.
|
||||
body: Dict[str, Dict[str, List[str]]] = {}
|
||||
for user_id, device, algorithm in query:
|
||||
body.setdefault(user_id, {}).setdefault(device, []).append(algorithm)
|
||||
|
||||
uri = f"{service.url}/_matrix/app/unstable/org.matrix.msc3983/keys/claim"
|
||||
try:
|
||||
response = await self.post_json_get_json(
|
||||
uri,
|
||||
body,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# The appservice doesn't support this endpoint.
|
||||
if is_unknown_endpoint(e):
|
||||
return {}, query
|
||||
logger.warning("claim_keys to %s received %s", uri, e.code)
|
||||
return {}, query
|
||||
except Exception as ex:
|
||||
logger.warning("claim_keys to %s threw exception %s", uri, ex)
|
||||
return {}, query
|
||||
|
||||
# Check if the appservice fulfilled all of the queried user/device/algorithms
|
||||
# or if some are still missing.
|
||||
#
|
||||
# TODO This places a lot of faith in the response shape being correct.
|
||||
missing = [
|
||||
(user_id, device, algorithm)
|
||||
for user_id, device, algorithm in query
|
||||
if algorithm not in response.get(user_id, {}).get(device, [])
|
||||
]
|
||||
|
||||
return response, missing
|
||||
|
||||
async def query_keys(
|
||||
self, service: "ApplicationService", query: Dict[str, List[str]]
|
||||
) -> Dict[str, Dict[str, Dict[str, JsonDict]]]:
|
||||
"""Query the application service for keys.
|
||||
|
||||
Note that any error (including a timeout) is treated as the application
|
||||
service having no information.
|
||||
|
||||
Args:
|
||||
service: The application service to query.
|
||||
query: An iterable of tuples of (user ID, device ID, algorithm).
|
||||
|
||||
Returns:
|
||||
A map of device_keys/master_keys/self_signing_keys/user_signing_keys:
|
||||
|
||||
device_keys is a map of user ID -> a map device ID -> device info.
|
||||
"""
|
||||
if service.url is None:
|
||||
return {}
|
||||
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
uri = f"{service.url}/_matrix/app/unstable/org.matrix.msc3984/keys/query"
|
||||
try:
|
||||
response = await self.post_json_get_json(
|
||||
uri,
|
||||
query,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# The appservice doesn't support this endpoint.
|
||||
if is_unknown_endpoint(e):
|
||||
return {}
|
||||
logger.warning("query_keys to %s received %s", uri, e.code)
|
||||
return {}
|
||||
except Exception as ex:
|
||||
logger.warning("query_keys to %s threw exception %s", uri, ex)
|
||||
return {}
|
||||
|
||||
return response
|
||||
|
||||
def _serialize(
|
||||
self, service: "ApplicationService", events: Iterable[EventBase]
|
||||
) -> List[JsonDict]:
|
||||
|
||||
@@ -74,16 +74,6 @@ class ExperimentalConfig(Config):
|
||||
"msc3202_transaction_extensions", False
|
||||
)
|
||||
|
||||
# MSC3983: Proxying OTK claim requests to exclusive ASes.
|
||||
self.msc3983_appservice_otk_claims: bool = experimental.get(
|
||||
"msc3983_appservice_otk_claims", False
|
||||
)
|
||||
|
||||
# MSC3984: Proxying key queries to exclusive ASes.
|
||||
self.msc3984_appservice_key_query: bool = experimental.get(
|
||||
"msc3984_appservice_key_query", False
|
||||
)
|
||||
|
||||
# MSC3706 (server-side support for partial state in /send_join responses)
|
||||
# Synapse will always serve partial state responses to requests using the stable
|
||||
# query parameter `omit_members`. If this flag is set, Synapse will also serve
|
||||
@@ -188,6 +178,3 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC3967: Do not require UIA when first uploading cross signing keys
|
||||
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
|
||||
|
||||
# MSC2659: Application service ping endpoint
|
||||
self.msc2659_enabled = experimental.get("msc2659_enabled", False)
|
||||
|
||||
@@ -136,7 +136,6 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
"type": "array",
|
||||
"items": SsoAttributeRequirement.JSON_SCHEMA,
|
||||
},
|
||||
"enable_registration": {"type": "boolean"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -307,7 +306,6 @@ def _parse_oidc_config_dict(
|
||||
user_mapping_provider_class=user_mapping_provider_class,
|
||||
user_mapping_provider_config=user_mapping_provider_config,
|
||||
attribute_requirements=attribute_requirements,
|
||||
enable_registration=oidc_config.get("enable_registration", True),
|
||||
)
|
||||
|
||||
|
||||
@@ -407,6 +405,3 @@ class OidcProviderConfig:
|
||||
|
||||
# required attributes to require in userinfo to allow login/registration
|
||||
attribute_requirements: List[SsoAttributeRequirement]
|
||||
|
||||
# Whether automatic registrations are enabled in the ODIC flow. Defaults to True
|
||||
enable_registration: bool
|
||||
|
||||
@@ -214,52 +214,17 @@ class HttpListenerConfig:
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class TCPListenerConfig:
|
||||
"""Object describing the configuration of a single TCP listener."""
|
||||
class ListenerConfig:
|
||||
"""Object describing the configuration of a single listener."""
|
||||
|
||||
port: int = attr.ib(validator=attr.validators.instance_of(int))
|
||||
bind_addresses: List[str] = attr.ib(validator=attr.validators.instance_of(List))
|
||||
bind_addresses: List[str]
|
||||
type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
|
||||
tls: bool = False
|
||||
|
||||
# http_options is only populated if type=http
|
||||
http_options: Optional[HttpListenerConfig] = None
|
||||
|
||||
def get_site_tag(self) -> str:
|
||||
"""Retrieves http_options.tag if it exists, otherwise the port number."""
|
||||
if self.http_options and self.http_options.tag is not None:
|
||||
return self.http_options.tag
|
||||
else:
|
||||
return str(self.port)
|
||||
|
||||
def is_tls(self) -> bool:
|
||||
return self.tls
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class UnixListenerConfig:
|
||||
"""Object describing the configuration of a single Unix socket listener."""
|
||||
|
||||
# Note: unix sockets can not be tls encrypted, so HAVE to be behind a tls-handling
|
||||
# reverse proxy
|
||||
path: str = attr.ib()
|
||||
# A default(0o666) for this is set in parse_listener_def() below
|
||||
mode: int
|
||||
type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
|
||||
|
||||
# http_options is only populated if type=http
|
||||
http_options: Optional[HttpListenerConfig] = None
|
||||
|
||||
def get_site_tag(self) -> str:
|
||||
return "unix"
|
||||
|
||||
def is_tls(self) -> bool:
|
||||
"""Unix sockets can't have TLS"""
|
||||
return False
|
||||
|
||||
|
||||
ListenerConfig = Union[TCPListenerConfig, UnixListenerConfig]
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class ManholeConfig:
|
||||
@@ -566,12 +531,12 @@ class ServerConfig(Config):
|
||||
|
||||
self.listeners = [parse_listener_def(i, x) for i, x in enumerate(listeners)]
|
||||
|
||||
# no_tls is not really supported anymore, but let's grandfather it in here.
|
||||
# no_tls is not really supported any more, but let's grandfather it in
|
||||
# here.
|
||||
if config.get("no_tls", False):
|
||||
l2 = []
|
||||
for listener in self.listeners:
|
||||
if isinstance(listener, TCPListenerConfig) and listener.tls:
|
||||
# Use isinstance() as the assertion this *has* a listener.port
|
||||
if listener.tls:
|
||||
logger.info(
|
||||
"Ignoring TLS-enabled listener on port %i due to no_tls",
|
||||
listener.port,
|
||||
@@ -612,7 +577,7 @@ class ServerConfig(Config):
|
||||
)
|
||||
|
||||
self.listeners.append(
|
||||
TCPListenerConfig(
|
||||
ListenerConfig(
|
||||
port=bind_port,
|
||||
bind_addresses=[bind_host],
|
||||
tls=True,
|
||||
@@ -624,7 +589,7 @@ class ServerConfig(Config):
|
||||
unsecure_port = config.get("unsecure_port", bind_port - 400)
|
||||
if unsecure_port:
|
||||
self.listeners.append(
|
||||
TCPListenerConfig(
|
||||
ListenerConfig(
|
||||
port=unsecure_port,
|
||||
bind_addresses=[bind_host],
|
||||
tls=False,
|
||||
@@ -636,7 +601,7 @@ class ServerConfig(Config):
|
||||
manhole = config.get("manhole")
|
||||
if manhole:
|
||||
self.listeners.append(
|
||||
TCPListenerConfig(
|
||||
ListenerConfig(
|
||||
port=manhole,
|
||||
bind_addresses=["127.0.0.1"],
|
||||
type="manhole",
|
||||
@@ -683,7 +648,7 @@ class ServerConfig(Config):
|
||||
logger.warning(METRICS_PORT_WARNING)
|
||||
|
||||
self.listeners.append(
|
||||
TCPListenerConfig(
|
||||
ListenerConfig(
|
||||
port=metrics_port,
|
||||
bind_addresses=[config.get("metrics_bind_host", "127.0.0.1")],
|
||||
type="http",
|
||||
@@ -759,7 +724,7 @@ class ServerConfig(Config):
|
||||
self.delete_stale_devices_after = None
|
||||
|
||||
def has_tls_listener(self) -> bool:
|
||||
return any(listener.is_tls() for listener in self.listeners)
|
||||
return any(listener.tls for listener in self.listeners)
|
||||
|
||||
def generate_config_section(
|
||||
self,
|
||||
@@ -939,25 +904,25 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
||||
raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type"))
|
||||
|
||||
port = listener.get("port")
|
||||
socket_path = listener.get("path")
|
||||
# Either a port or a path should be declared at a minimum. Using both would be bad.
|
||||
if port is not None and not isinstance(port, int):
|
||||
if type(port) is not int:
|
||||
raise ConfigError("Listener configuration is lacking a valid 'port' option")
|
||||
if socket_path is not None and not isinstance(socket_path, str):
|
||||
raise ConfigError("Listener configuration is lacking a valid 'path' option")
|
||||
if port and socket_path:
|
||||
raise ConfigError(
|
||||
"Can not have both a UNIX socket and an IP/port declared for the same "
|
||||
"resource!"
|
||||
)
|
||||
if port is None and socket_path is None:
|
||||
raise ConfigError(
|
||||
"Must have either a UNIX socket or an IP/port declared for a given "
|
||||
"resource!"
|
||||
)
|
||||
|
||||
tls = listener.get("tls", False)
|
||||
|
||||
bind_addresses = listener.get("bind_addresses", [])
|
||||
bind_address = listener.get("bind_address")
|
||||
# if bind_address was specified, add it to the list of addresses
|
||||
if bind_address:
|
||||
bind_addresses.append(bind_address)
|
||||
|
||||
# if we still have an empty list of addresses, use the default list
|
||||
if not bind_addresses:
|
||||
if listener_type == "metrics":
|
||||
# the metrics listener doesn't support IPv6
|
||||
bind_addresses.append("0.0.0.0")
|
||||
else:
|
||||
bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
|
||||
|
||||
http_config = None
|
||||
if listener_type == "http":
|
||||
try:
|
||||
@@ -967,12 +932,8 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
||||
except ValueError as e:
|
||||
raise ConfigError("Unknown listener resource") from e
|
||||
|
||||
# For a unix socket, default x_forwarded to True, as this is the only way of
|
||||
# getting a client IP.
|
||||
# Note: a reverse proxy is required anyway, as there is no way of exposing a
|
||||
# unix socket to the internet.
|
||||
http_config = HttpListenerConfig(
|
||||
x_forwarded=listener.get("x_forwarded", (True if socket_path else False)),
|
||||
x_forwarded=listener.get("x_forwarded", False),
|
||||
resources=resources,
|
||||
additional_resources=listener.get("additional_resources", {}),
|
||||
tag=listener.get("tag"),
|
||||
@@ -980,30 +941,7 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
||||
experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
|
||||
)
|
||||
|
||||
if socket_path:
|
||||
# TODO: Add in path validation, like if the directory exists and is writable?
|
||||
# Set a default for the permission, in case it's left out
|
||||
socket_mode = listener.get("mode", 0o666)
|
||||
|
||||
return UnixListenerConfig(socket_path, socket_mode, listener_type, http_config)
|
||||
|
||||
else:
|
||||
assert port is not None
|
||||
bind_addresses = listener.get("bind_addresses", [])
|
||||
bind_address = listener.get("bind_address")
|
||||
# if bind_address was specified, add it to the list of addresses
|
||||
if bind_address:
|
||||
bind_addresses.append(bind_address)
|
||||
|
||||
# if we still have an empty list of addresses, use the default list
|
||||
if not bind_addresses:
|
||||
if listener_type == "metrics":
|
||||
# the metrics listener doesn't support IPv6
|
||||
bind_addresses.append("0.0.0.0")
|
||||
else:
|
||||
bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
|
||||
|
||||
return TCPListenerConfig(port, bind_addresses, listener_type, tls, http_config)
|
||||
return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
|
||||
|
||||
|
||||
_MANHOLE_SETTINGS_SCHEMA = {
|
||||
|
||||
@@ -18,20 +18,16 @@ import logging
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
import attr
|
||||
from pydantic import BaseModel, StrictBool, StrictInt, StrictStr, parse_obj_as
|
||||
|
||||
from synapse.config._base import (
|
||||
from synapse.types import JsonDict
|
||||
|
||||
from ._base import (
|
||||
Config,
|
||||
ConfigError,
|
||||
RoutableShardedWorkerHandlingConfig,
|
||||
ShardedWorkerHandlingConfig,
|
||||
)
|
||||
from synapse.config.server import (
|
||||
DIRECT_TCP_ERROR,
|
||||
TCPListenerConfig,
|
||||
parse_listener_def,
|
||||
)
|
||||
from synapse.types import JsonDict
|
||||
from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def
|
||||
|
||||
_DEPRECATED_WORKER_DUTY_OPTION_USED = """
|
||||
The '%s' configuration option is deprecated and will be removed in a future
|
||||
@@ -51,23 +47,13 @@ def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
|
||||
return obj
|
||||
|
||||
|
||||
class UnixSocketInstanceLocationConfig(BaseModel):
|
||||
"""The path to talk to an instance via HTTP replication over Unix socket."""
|
||||
|
||||
socket_path: StrictStr
|
||||
|
||||
|
||||
class TcpInstanceLocationConfig(BaseModel):
|
||||
@attr.s(auto_attribs=True)
|
||||
class InstanceLocationConfig:
|
||||
"""The host and port to talk to an instance via HTTP replication."""
|
||||
|
||||
host: StrictStr
|
||||
port: StrictInt
|
||||
tls: StrictBool = False
|
||||
|
||||
|
||||
InstanceLocationConfig = Union[
|
||||
UnixSocketInstanceLocationConfig, TcpInstanceLocationConfig
|
||||
]
|
||||
host: str
|
||||
port: int
|
||||
tls: bool = False
|
||||
|
||||
|
||||
@attr.s
|
||||
@@ -175,7 +161,7 @@ class WorkerConfig(Config):
|
||||
manhole = config.get("worker_manhole")
|
||||
if manhole:
|
||||
self.worker_listeners.append(
|
||||
TCPListenerConfig(
|
||||
ListenerConfig(
|
||||
port=manhole,
|
||||
bind_addresses=["127.0.0.1"],
|
||||
type="manhole",
|
||||
@@ -193,10 +179,11 @@ class WorkerConfig(Config):
|
||||
federation_sender_instances
|
||||
)
|
||||
|
||||
# A map from instance name to connection details for their HTTP replication endpoint.
|
||||
self.instance_map = parse_obj_as(
|
||||
Dict[str, InstanceLocationConfig], config.get("instance_map") or {}
|
||||
)
|
||||
# A map from instance name to host/port of their HTTP replication endpoint.
|
||||
instance_map = config.get("instance_map") or {}
|
||||
self.instance_map = {
|
||||
name: InstanceLocationConfig(**c) for name, c in instance_map.items()
|
||||
}
|
||||
|
||||
# Map from type of streams to source, c.f. WriterLocations.
|
||||
writers = config.get("stream_writers") or {}
|
||||
|
||||
@@ -51,7 +51,7 @@ def check_event_content_hash(
|
||||
# some malformed events lack a 'hashes'. Protect against it being missing
|
||||
# or a weird type by basically treating it the same as an unhashed event.
|
||||
hashes = event.get("hashes")
|
||||
# nb it might be a immutabledict or a dict
|
||||
# nb it might be a frozendict or a dict
|
||||
if not isinstance(hashes, collections.abc.Mapping):
|
||||
raise SynapseError(
|
||||
400, "Malformed 'hashes': %s" % (type(hashes),), Codes.UNAUTHORIZED
|
||||
|
||||
@@ -462,7 +462,7 @@ class FrozenEvent(EventBase):
|
||||
# Signatures is a dict of dicts, and this is faster than doing a
|
||||
# copy.deepcopy
|
||||
signatures = {
|
||||
name: dict(sigs.items())
|
||||
name: {sig_id: sig for sig_id, sig in sigs.items()}
|
||||
for name, sigs in event_dict.pop("signatures", {}).items()
|
||||
}
|
||||
|
||||
@@ -510,7 +510,7 @@ class FrozenEventV2(EventBase):
|
||||
# Signatures is a dict of dicts, and this is faster than doing a
|
||||
# copy.deepcopy
|
||||
signatures = {
|
||||
name: dict(sigs.items())
|
||||
name: {sig_id: sig for sig_id, sig in sigs.items()}
|
||||
for name, sigs in event_dict.pop("signatures", {}).items()
|
||||
}
|
||||
|
||||
|
||||
@@ -12,93 +12,19 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from typing_extensions import ParamSpec
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, Set, Union
|
||||
|
||||
from twisted.internet.defer import CancelledError
|
||||
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import delay_cancellation
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
GET_USERS_FOR_STATES_CALLBACK = Callable[
|
||||
[Iterable[UserPresenceState]], Awaitable[Dict[str, Set[UserPresenceState]]]
|
||||
]
|
||||
# This must either return a set of strings or the constant PresenceRouter.ALL_USERS.
|
||||
GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[Set[str], str]]]
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||
"""Wrapper that loads a presence router module configured using the old
|
||||
configuration, and registers the hooks they implement.
|
||||
"""
|
||||
|
||||
if hs.config.server.presence_router_module_class is None:
|
||||
return
|
||||
|
||||
module = hs.config.server.presence_router_module_class
|
||||
config = hs.config.server.presence_router_config
|
||||
api = hs.get_module_api()
|
||||
|
||||
presence_router = module(config=config, module_api=api)
|
||||
|
||||
# The known hooks. If a module implements a method which name appears in this set,
|
||||
# we'll want to register it.
|
||||
presence_router_methods = {
|
||||
"get_users_for_states",
|
||||
"get_interested_users",
|
||||
}
|
||||
|
||||
# All methods that the module provides should be async, but this wasn't enforced
|
||||
# in the old module system, so we wrap them if needed
|
||||
def async_wrapper(
|
||||
f: Optional[Callable[P, R]]
|
||||
) -> Optional[Callable[P, Awaitable[R]]]:
|
||||
# f might be None if the callback isn't implemented by the module. In this
|
||||
# case we don't want to register a callback at all so we return None.
|
||||
if f is None:
|
||||
return None
|
||||
|
||||
def run(*args: P.args, **kwargs: P.kwargs) -> Awaitable[R]:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
return maybe_awaitable(f(*args, **kwargs))
|
||||
|
||||
return run
|
||||
|
||||
# Register the hooks through the module API.
|
||||
hooks: Dict[str, Optional[Callable[..., Any]]] = {
|
||||
hook: async_wrapper(getattr(presence_router, hook, None))
|
||||
for hook in presence_router_methods
|
||||
}
|
||||
|
||||
api.register_presence_router_callbacks(**hooks)
|
||||
|
||||
|
||||
class PresenceRouter:
|
||||
"""
|
||||
A module that the homeserver will call upon to help route user presence updates to
|
||||
@@ -108,30 +34,7 @@ class PresenceRouter:
|
||||
ALL_USERS = "ALL"
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
# Initially there are no callbacks
|
||||
self._get_users_for_states_callbacks: List[GET_USERS_FOR_STATES_CALLBACK] = []
|
||||
self._get_interested_users_callbacks: List[GET_INTERESTED_USERS_CALLBACK] = []
|
||||
|
||||
def register_presence_router_callbacks(
|
||||
self,
|
||||
get_users_for_states: Optional[GET_USERS_FOR_STATES_CALLBACK] = None,
|
||||
get_interested_users: Optional[GET_INTERESTED_USERS_CALLBACK] = None,
|
||||
) -> None:
|
||||
# PresenceRouter modules are required to implement both of these methods
|
||||
# or neither of them as they are assumed to act in a complementary manner
|
||||
paired_methods = [get_users_for_states, get_interested_users]
|
||||
if paired_methods.count(None) == 1:
|
||||
raise RuntimeError(
|
||||
"PresenceRouter modules must register neither or both of the paired callbacks: "
|
||||
"[get_users_for_states, get_interested_users]"
|
||||
)
|
||||
|
||||
# Append the methods provided to the lists of callbacks
|
||||
if get_users_for_states is not None:
|
||||
self._get_users_for_states_callbacks.append(get_users_for_states)
|
||||
|
||||
if get_interested_users is not None:
|
||||
self._get_interested_users_callbacks.append(get_interested_users)
|
||||
self._module_api_callbacks = hs.get_module_api_callbacks().presence_router
|
||||
|
||||
async def get_users_for_states(
|
||||
self,
|
||||
@@ -150,13 +53,13 @@ class PresenceRouter:
|
||||
"""
|
||||
|
||||
# Bail out early if we don't have any callbacks to run.
|
||||
if len(self._get_users_for_states_callbacks) == 0:
|
||||
if len(self._module_api_callbacks.get_users_for_states_callbacks) == 0:
|
||||
# Don't include any extra destinations for presence updates
|
||||
return {}
|
||||
|
||||
users_for_states: Dict[str, Set[UserPresenceState]] = {}
|
||||
# run all the callbacks for get_users_for_states and combine the results
|
||||
for callback in self._get_users_for_states_callbacks:
|
||||
for callback in self._module_api_callbacks.get_users_for_states_callbacks:
|
||||
try:
|
||||
# Note: result is an object here, because we don't trust modules to
|
||||
# return the types they're supposed to.
|
||||
@@ -206,13 +109,13 @@ class PresenceRouter:
|
||||
"""
|
||||
|
||||
# Bail out early if we don't have any callbacks to run.
|
||||
if len(self._get_interested_users_callbacks) == 0:
|
||||
if len(self._module_api_callbacks.get_interested_users_callbacks) == 0:
|
||||
# Don't report any additional interested users
|
||||
return set()
|
||||
|
||||
interested_users = set()
|
||||
# run all the callbacks for get_interested_users and combine the results
|
||||
for callback in self._get_interested_users_callbacks:
|
||||
for callback in self._module_api_callbacks.get_interested_users_callbacks:
|
||||
try:
|
||||
result = await delay_cancellation(callback(user_id))
|
||||
except CancelledError:
|
||||
|
||||
@@ -15,7 +15,7 @@ from abc import ABC, abstractmethod
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
import attr
|
||||
from immutabledict import immutabledict
|
||||
from frozendict import frozendict
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.events import EventBase
|
||||
@@ -489,4 +489,4 @@ def _decode_state_dict(
|
||||
if input is None:
|
||||
return None
|
||||
|
||||
return immutabledict({(etype, state_key): v for etype, state_key, v in input})
|
||||
return frozendict({(etype, state_key): v for etype, state_key, v in input})
|
||||
|
||||
@@ -13,19 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Collection,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
from typing import TYPE_CHECKING, Collection, Optional, Tuple, Union
|
||||
|
||||
# `Literal` appears with Python 3.8.
|
||||
from typing_extensions import Literal
|
||||
@@ -37,7 +26,7 @@ from synapse.media._base import FileInfo
|
||||
from synapse.media.media_storage import ReadableFileWrapper
|
||||
from synapse.spam_checker_api import RegistrationBehaviour
|
||||
from synapse.types import JsonDict, RoomAlias, UserProfile
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import delay_cancellation
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -46,338 +35,13 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
|
||||
["synapse.events.EventBase"],
|
||||
Awaitable[
|
||||
Union[
|
||||
str,
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK = Callable[
|
||||
["synapse.events.EventBase"],
|
||||
Awaitable[Union[bool, str]],
|
||||
]
|
||||
USER_MAY_JOIN_ROOM_CALLBACK = Callable[
|
||||
[str, str, bool],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
USER_MAY_INVITE_CALLBACK = Callable[
|
||||
[str, str, str],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[
|
||||
[str, str, str, str],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
USER_MAY_CREATE_ROOM_CALLBACK = Callable[
|
||||
[str],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[
|
||||
[str, RoomAlias],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[
|
||||
[str, str],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[UserProfile], Awaitable[bool]]
|
||||
LEGACY_CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[
|
||||
[
|
||||
Optional[dict],
|
||||
Optional[str],
|
||||
Collection[Tuple[str, str]],
|
||||
],
|
||||
Awaitable[RegistrationBehaviour],
|
||||
]
|
||||
CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[
|
||||
[
|
||||
Optional[dict],
|
||||
Optional[str],
|
||||
Collection[Tuple[str, str]],
|
||||
Optional[str],
|
||||
],
|
||||
Awaitable[RegistrationBehaviour],
|
||||
]
|
||||
CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[
|
||||
[ReadableFileWrapper, FileInfo],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
|
||||
|
||||
def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None:
|
||||
"""Wrapper that loads spam checkers configured using the old configuration, and
|
||||
registers the spam checker hooks they implement.
|
||||
"""
|
||||
spam_checkers: List[Any] = []
|
||||
api = hs.get_module_api()
|
||||
for module, config in hs.config.spamchecker.spam_checkers:
|
||||
# Older spam checkers don't accept the `api` argument, so we
|
||||
# try and detect support.
|
||||
spam_args = inspect.getfullargspec(module)
|
||||
if "api" in spam_args.args:
|
||||
spam_checkers.append(module(config=config, api=api))
|
||||
else:
|
||||
spam_checkers.append(module(config=config))
|
||||
|
||||
# The known spam checker hooks. If a spam checker module implements a method
|
||||
# which name appears in this set, we'll want to register it.
|
||||
spam_checker_methods = {
|
||||
"check_event_for_spam",
|
||||
"user_may_invite",
|
||||
"user_may_create_room",
|
||||
"user_may_create_room_alias",
|
||||
"user_may_publish_room",
|
||||
"check_username_for_spam",
|
||||
"check_registration_for_spam",
|
||||
"check_media_file_for_spam",
|
||||
}
|
||||
|
||||
for spam_checker in spam_checkers:
|
||||
# Methods on legacy spam checkers might not be async, so we wrap them around a
|
||||
# wrapper that will call maybe_awaitable on the result.
|
||||
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
|
||||
# f might be None if the callback isn't implemented by the module. In this
|
||||
# case we don't want to register a callback at all so we return None.
|
||||
if f is None:
|
||||
return None
|
||||
|
||||
wrapped_func = f
|
||||
|
||||
if f.__name__ == "check_registration_for_spam":
|
||||
checker_args = inspect.signature(f)
|
||||
if len(checker_args.parameters) == 3:
|
||||
# Backwards compatibility; some modules might implement a hook that
|
||||
# doesn't expect a 4th argument. In this case, wrap it in a function
|
||||
# that gives it only 3 arguments and drops the auth_provider_id on
|
||||
# the floor.
|
||||
def wrapper(
|
||||
email_threepid: Optional[dict],
|
||||
username: Optional[str],
|
||||
request_info: Collection[Tuple[str, str]],
|
||||
auth_provider_id: Optional[str],
|
||||
) -> Union[Awaitable[RegistrationBehaviour], RegistrationBehaviour]:
|
||||
# Assertion required because mypy can't prove we won't
|
||||
# change `f` back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
return f(
|
||||
email_threepid,
|
||||
username,
|
||||
request_info,
|
||||
)
|
||||
|
||||
wrapped_func = wrapper
|
||||
elif len(checker_args.parameters) != 4:
|
||||
raise RuntimeError(
|
||||
"Bad signature for callback check_registration_for_spam",
|
||||
)
|
||||
|
||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert wrapped_func is not None
|
||||
|
||||
return maybe_awaitable(wrapped_func(*args, **kwargs))
|
||||
|
||||
return run
|
||||
|
||||
# Register the hooks through the module API.
|
||||
hooks = {
|
||||
hook: async_wrapper(getattr(spam_checker, hook, None))
|
||||
for hook in spam_checker_methods
|
||||
}
|
||||
|
||||
api.register_spam_checker_callbacks(**hooks)
|
||||
|
||||
|
||||
class SpamChecker:
|
||||
NOT_SPAM: Literal["NOT_SPAM"] = "NOT_SPAM"
|
||||
|
||||
def __init__(self, hs: "synapse.server.HomeServer") -> None:
|
||||
self.hs = hs
|
||||
def __init__(self, hs: "synapse.server.HomeServer"):
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
|
||||
self._should_drop_federated_event_callbacks: List[
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK
|
||||
] = []
|
||||
self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = []
|
||||
self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
|
||||
self._user_may_send_3pid_invite_callbacks: List[
|
||||
USER_MAY_SEND_3PID_INVITE_CALLBACK
|
||||
] = []
|
||||
self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = []
|
||||
self._user_may_create_room_alias_callbacks: List[
|
||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
|
||||
] = []
|
||||
self._user_may_publish_room_callbacks: List[USER_MAY_PUBLISH_ROOM_CALLBACK] = []
|
||||
self._check_username_for_spam_callbacks: List[
|
||||
CHECK_USERNAME_FOR_SPAM_CALLBACK
|
||||
] = []
|
||||
self._check_registration_for_spam_callbacks: List[
|
||||
CHECK_REGISTRATION_FOR_SPAM_CALLBACK
|
||||
] = []
|
||||
self._check_media_file_for_spam_callbacks: List[
|
||||
CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK
|
||||
] = []
|
||||
|
||||
def register_callbacks(
|
||||
self,
|
||||
check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
|
||||
should_drop_federated_event: Optional[
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK
|
||||
] = None,
|
||||
user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None,
|
||||
user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
|
||||
user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None,
|
||||
user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None,
|
||||
user_may_create_room_alias: Optional[
|
||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
|
||||
] = None,
|
||||
user_may_publish_room: Optional[USER_MAY_PUBLISH_ROOM_CALLBACK] = None,
|
||||
check_username_for_spam: Optional[CHECK_USERNAME_FOR_SPAM_CALLBACK] = None,
|
||||
check_registration_for_spam: Optional[
|
||||
CHECK_REGISTRATION_FOR_SPAM_CALLBACK
|
||||
] = None,
|
||||
check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None,
|
||||
) -> None:
|
||||
"""Register callbacks from module for each hook."""
|
||||
if check_event_for_spam is not None:
|
||||
self._check_event_for_spam_callbacks.append(check_event_for_spam)
|
||||
|
||||
if should_drop_federated_event is not None:
|
||||
self._should_drop_federated_event_callbacks.append(
|
||||
should_drop_federated_event
|
||||
)
|
||||
|
||||
if user_may_join_room is not None:
|
||||
self._user_may_join_room_callbacks.append(user_may_join_room)
|
||||
|
||||
if user_may_invite is not None:
|
||||
self._user_may_invite_callbacks.append(user_may_invite)
|
||||
|
||||
if user_may_send_3pid_invite is not None:
|
||||
self._user_may_send_3pid_invite_callbacks.append(
|
||||
user_may_send_3pid_invite,
|
||||
)
|
||||
|
||||
if user_may_create_room is not None:
|
||||
self._user_may_create_room_callbacks.append(user_may_create_room)
|
||||
|
||||
if user_may_create_room_alias is not None:
|
||||
self._user_may_create_room_alias_callbacks.append(
|
||||
user_may_create_room_alias,
|
||||
)
|
||||
|
||||
if user_may_publish_room is not None:
|
||||
self._user_may_publish_room_callbacks.append(user_may_publish_room)
|
||||
|
||||
if check_username_for_spam is not None:
|
||||
self._check_username_for_spam_callbacks.append(check_username_for_spam)
|
||||
|
||||
if check_registration_for_spam is not None:
|
||||
self._check_registration_for_spam_callbacks.append(
|
||||
check_registration_for_spam,
|
||||
)
|
||||
|
||||
if check_media_file_for_spam is not None:
|
||||
self._check_media_file_for_spam_callbacks.append(check_media_file_for_spam)
|
||||
self._module_api_callbacks = hs.get_module_api_callbacks().spam_checker
|
||||
|
||||
@trace
|
||||
async def check_event_for_spam(
|
||||
@@ -401,7 +65,7 @@ class SpamChecker:
|
||||
string should be used as the client-facing error message. This usage is
|
||||
generally discouraged as it doesn't support internationalization.
|
||||
"""
|
||||
for callback in self._check_event_for_spam_callbacks:
|
||||
for callback in self._module_api_callbacks.check_event_for_spam_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
@@ -456,7 +120,9 @@ class SpamChecker:
|
||||
Returns:
|
||||
True if the event should be silently dropped
|
||||
"""
|
||||
for callback in self._should_drop_federated_event_callbacks:
|
||||
for (
|
||||
callback
|
||||
) in self._module_api_callbacks.should_drop_federated_event_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
@@ -480,7 +146,7 @@ class SpamChecker:
|
||||
Returns:
|
||||
NOT_SPAM if the operation is permitted, [Codes, Dict] otherwise.
|
||||
"""
|
||||
for callback in self._user_may_join_room_callbacks:
|
||||
for callback in self._module_api_callbacks.user_may_join_room_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
@@ -521,7 +187,7 @@ class SpamChecker:
|
||||
Returns:
|
||||
NOT_SPAM if the operation is permitted, Codes otherwise.
|
||||
"""
|
||||
for callback in self._user_may_invite_callbacks:
|
||||
for callback in self._module_api_callbacks.user_may_invite_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
@@ -568,7 +234,7 @@ class SpamChecker:
|
||||
Returns:
|
||||
NOT_SPAM if the operation is permitted, Codes otherwise.
|
||||
"""
|
||||
for callback in self._user_may_send_3pid_invite_callbacks:
|
||||
for callback in self._module_api_callbacks.user_may_send_3pid_invite_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
@@ -605,7 +271,7 @@ class SpamChecker:
|
||||
Args:
|
||||
userid: The ID of the user attempting to create a room
|
||||
"""
|
||||
for callback in self._user_may_create_room_callbacks:
|
||||
for callback in self._module_api_callbacks.user_may_create_room_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
@@ -641,7 +307,7 @@ class SpamChecker:
|
||||
room_alias: The alias to be created
|
||||
|
||||
"""
|
||||
for callback in self._user_may_create_room_alias_callbacks:
|
||||
for callback in self._module_api_callbacks.user_may_create_room_alias_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
@@ -676,7 +342,7 @@ class SpamChecker:
|
||||
userid: The user ID attempting to publish the room
|
||||
room_id: The ID of the room that would be published
|
||||
"""
|
||||
for callback in self._user_may_publish_room_callbacks:
|
||||
for callback in self._module_api_callbacks.user_may_publish_room_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
@@ -717,7 +383,7 @@ class SpamChecker:
|
||||
Returns:
|
||||
True if the user is spammy.
|
||||
"""
|
||||
for callback in self._check_username_for_spam_callbacks:
|
||||
for callback in self._module_api_callbacks.check_username_for_spam_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
@@ -751,7 +417,9 @@ class SpamChecker:
|
||||
Enum for how the request should be handled
|
||||
"""
|
||||
|
||||
for callback in self._check_registration_for_spam_callbacks:
|
||||
for (
|
||||
callback
|
||||
) in self._module_api_callbacks.check_registration_for_spam_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
@@ -794,7 +462,7 @@ class SpamChecker:
|
||||
file_info: Metadata about the file.
|
||||
"""
|
||||
|
||||
for callback in self._check_media_file_for_spam_callbacks:
|
||||
for callback in self._module_api_callbacks.check_media_file_for_spam_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
|
||||
from twisted.internet.defer import CancelledError
|
||||
|
||||
@@ -21,7 +21,7 @@ from synapse.events import EventBase
|
||||
from synapse.events.snapshot import UnpersistedEventContextBase
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import Requester, StateMap
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import delay_cancellation
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -29,117 +29,6 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
CHECK_EVENT_ALLOWED_CALLBACK = Callable[
|
||||
[EventBase, StateMap[EventBase]], Awaitable[Tuple[bool, Optional[dict]]]
|
||||
]
|
||||
ON_CREATE_ROOM_CALLBACK = Callable[[Requester, dict, bool], Awaitable]
|
||||
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK = Callable[
|
||||
[str, str, StateMap[EventBase]], Awaitable[bool]
|
||||
]
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK = Callable[
|
||||
[str, StateMap[EventBase], str], Awaitable[bool]
|
||||
]
|
||||
ON_NEW_EVENT_CALLBACK = Callable[[EventBase, StateMap[EventBase]], Awaitable]
|
||||
CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]]
|
||||
CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]]
|
||||
ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable]
|
||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable]
|
||||
ON_THREEPID_BIND_CALLBACK = Callable[[str, str, str], Awaitable]
|
||||
ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable]
|
||||
ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable]
|
||||
|
||||
|
||||
def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
|
||||
"""Wrapper that loads a third party event rules module configured using the old
|
||||
configuration, and registers the hooks they implement.
|
||||
"""
|
||||
if hs.config.thirdpartyrules.third_party_event_rules is None:
|
||||
return
|
||||
|
||||
module, config = hs.config.thirdpartyrules.third_party_event_rules
|
||||
|
||||
api = hs.get_module_api()
|
||||
third_party_rules = module(config=config, module_api=api)
|
||||
|
||||
# The known hooks. If a module implements a method which name appears in this set,
|
||||
# we'll want to register it.
|
||||
third_party_event_rules_methods = {
|
||||
"check_event_allowed",
|
||||
"on_create_room",
|
||||
"check_threepid_can_be_invited",
|
||||
"check_visibility_can_be_modified",
|
||||
}
|
||||
|
||||
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
|
||||
# f might be None if the callback isn't implemented by the module. In this
|
||||
# case we don't want to register a callback at all so we return None.
|
||||
if f is None:
|
||||
return None
|
||||
|
||||
# We return a separate wrapper for these methods because, in order to wrap them
|
||||
# correctly, we need to await its result. Therefore it doesn't make a lot of
|
||||
# sense to make it go through the run() wrapper.
|
||||
if f.__name__ == "check_event_allowed":
|
||||
# We need to wrap check_event_allowed because its old form would return either
|
||||
# a boolean or a dict, but now we want to return the dict separately from the
|
||||
# boolean.
|
||||
async def wrap_check_event_allowed(
|
||||
event: EventBase,
|
||||
state_events: StateMap[EventBase],
|
||||
) -> Tuple[bool, Optional[dict]]:
|
||||
# Assertion required because mypy can't prove we won't change
|
||||
# `f` back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
res = await f(event, state_events)
|
||||
if isinstance(res, dict):
|
||||
return True, res
|
||||
else:
|
||||
return res, None
|
||||
|
||||
return wrap_check_event_allowed
|
||||
|
||||
if f.__name__ == "on_create_room":
|
||||
# We need to wrap on_create_room because its old form would return a boolean
|
||||
# if the room creation is denied, but now we just want it to raise an
|
||||
# exception.
|
||||
async def wrap_on_create_room(
|
||||
requester: Requester, config: dict, is_requester_admin: bool
|
||||
) -> None:
|
||||
# Assertion required because mypy can't prove we won't change
|
||||
# `f` back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
res = await f(requester, config, is_requester_admin)
|
||||
if res is False:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Room creation forbidden with these parameters",
|
||||
)
|
||||
|
||||
return wrap_on_create_room
|
||||
|
||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
return maybe_awaitable(f(*args, **kwargs))
|
||||
|
||||
return run
|
||||
|
||||
# Register the hooks through the module API.
|
||||
hooks = {
|
||||
hook: async_wrapper(getattr(third_party_rules, hook, None))
|
||||
for hook in third_party_event_rules_methods
|
||||
}
|
||||
|
||||
api.register_third_party_rules_callbacks(**hooks)
|
||||
|
||||
|
||||
class ThirdPartyEventRules:
|
||||
"""Allows server admins to provide a Python module implementing an extra
|
||||
set of rules to apply when processing events.
|
||||
@@ -153,104 +42,9 @@ class ThirdPartyEventRules:
|
||||
|
||||
self.store = hs.get_datastores().main
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
self._check_event_allowed_callbacks: List[CHECK_EVENT_ALLOWED_CALLBACK] = []
|
||||
self._on_create_room_callbacks: List[ON_CREATE_ROOM_CALLBACK] = []
|
||||
self._check_threepid_can_be_invited_callbacks: List[
|
||||
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK
|
||||
] = []
|
||||
self._check_visibility_can_be_modified_callbacks: List[
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
||||
] = []
|
||||
self._on_new_event_callbacks: List[ON_NEW_EVENT_CALLBACK] = []
|
||||
self._check_can_shutdown_room_callbacks: List[
|
||||
CHECK_CAN_SHUTDOWN_ROOM_CALLBACK
|
||||
] = []
|
||||
self._check_can_deactivate_user_callbacks: List[
|
||||
CHECK_CAN_DEACTIVATE_USER_CALLBACK
|
||||
] = []
|
||||
self._on_profile_update_callbacks: List[ON_PROFILE_UPDATE_CALLBACK] = []
|
||||
self._on_user_deactivation_status_changed_callbacks: List[
|
||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
|
||||
] = []
|
||||
self._on_threepid_bind_callbacks: List[ON_THREEPID_BIND_CALLBACK] = []
|
||||
self._on_add_user_third_party_identifier_callbacks: List[
|
||||
ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
|
||||
] = []
|
||||
self._on_remove_user_third_party_identifier_callbacks: List[
|
||||
ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
|
||||
] = []
|
||||
|
||||
def register_third_party_rules_callbacks(
|
||||
self,
|
||||
check_event_allowed: Optional[CHECK_EVENT_ALLOWED_CALLBACK] = None,
|
||||
on_create_room: Optional[ON_CREATE_ROOM_CALLBACK] = None,
|
||||
check_threepid_can_be_invited: Optional[
|
||||
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK
|
||||
] = None,
|
||||
check_visibility_can_be_modified: Optional[
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
||||
] = None,
|
||||
on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None,
|
||||
check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None,
|
||||
check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None,
|
||||
on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None,
|
||||
on_user_deactivation_status_changed: Optional[
|
||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
|
||||
] = None,
|
||||
on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None,
|
||||
on_add_user_third_party_identifier: Optional[
|
||||
ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
|
||||
] = None,
|
||||
on_remove_user_third_party_identifier: Optional[
|
||||
ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
|
||||
] = None,
|
||||
) -> None:
|
||||
"""Register callbacks from modules for each hook."""
|
||||
if check_event_allowed is not None:
|
||||
self._check_event_allowed_callbacks.append(check_event_allowed)
|
||||
|
||||
if on_create_room is not None:
|
||||
self._on_create_room_callbacks.append(on_create_room)
|
||||
|
||||
if check_threepid_can_be_invited is not None:
|
||||
self._check_threepid_can_be_invited_callbacks.append(
|
||||
check_threepid_can_be_invited,
|
||||
)
|
||||
|
||||
if check_visibility_can_be_modified is not None:
|
||||
self._check_visibility_can_be_modified_callbacks.append(
|
||||
check_visibility_can_be_modified,
|
||||
)
|
||||
|
||||
if on_new_event is not None:
|
||||
self._on_new_event_callbacks.append(on_new_event)
|
||||
|
||||
if check_can_shutdown_room is not None:
|
||||
self._check_can_shutdown_room_callbacks.append(check_can_shutdown_room)
|
||||
|
||||
if check_can_deactivate_user is not None:
|
||||
self._check_can_deactivate_user_callbacks.append(check_can_deactivate_user)
|
||||
if on_profile_update is not None:
|
||||
self._on_profile_update_callbacks.append(on_profile_update)
|
||||
|
||||
if on_user_deactivation_status_changed is not None:
|
||||
self._on_user_deactivation_status_changed_callbacks.append(
|
||||
on_user_deactivation_status_changed,
|
||||
)
|
||||
|
||||
if on_threepid_bind is not None:
|
||||
self._on_threepid_bind_callbacks.append(on_threepid_bind)
|
||||
|
||||
if on_add_user_third_party_identifier is not None:
|
||||
self._on_add_user_third_party_identifier_callbacks.append(
|
||||
on_add_user_third_party_identifier
|
||||
)
|
||||
|
||||
if on_remove_user_third_party_identifier is not None:
|
||||
self._on_remove_user_third_party_identifier_callbacks.append(
|
||||
on_remove_user_third_party_identifier
|
||||
)
|
||||
self._module_api_callbacks = (
|
||||
hs.get_module_api_callbacks().third_party_event_rules
|
||||
)
|
||||
|
||||
async def check_event_allowed(
|
||||
self,
|
||||
@@ -274,7 +68,7 @@ class ThirdPartyEventRules:
|
||||
The result from the ThirdPartyRules module, as above.
|
||||
"""
|
||||
# Bail out early without hitting the store if we don't have any callbacks to run.
|
||||
if len(self._check_event_allowed_callbacks) == 0:
|
||||
if len(self._module_api_callbacks.check_event_allowed_callbacks) == 0:
|
||||
return True, None
|
||||
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
@@ -288,7 +82,7 @@ class ThirdPartyEventRules:
|
||||
# the hashes and signatures.
|
||||
event.freeze()
|
||||
|
||||
for callback in self._check_event_allowed_callbacks:
|
||||
for callback in self._module_api_callbacks.check_event_allowed_callbacks:
|
||||
try:
|
||||
res, replacement_data = await delay_cancellation(
|
||||
callback(event, state_events)
|
||||
@@ -329,7 +123,7 @@ class ThirdPartyEventRules:
|
||||
config: The creation config from the client.
|
||||
is_requester_admin: If the requester is an admin
|
||||
"""
|
||||
for callback in self._on_create_room_callbacks:
|
||||
for callback in self._module_api_callbacks.on_create_room_callbacks:
|
||||
try:
|
||||
await callback(requester, config, is_requester_admin)
|
||||
except Exception as e:
|
||||
@@ -357,12 +151,14 @@ class ThirdPartyEventRules:
|
||||
True if the 3PID can be invited, False if not.
|
||||
"""
|
||||
# Bail out early without hitting the store if we don't have any callbacks to run.
|
||||
if len(self._check_threepid_can_be_invited_callbacks) == 0:
|
||||
if len(self._module_api_callbacks.check_threepid_can_be_invited_callbacks) == 0:
|
||||
return True
|
||||
|
||||
state_events = await self._get_state_map_for_room(room_id)
|
||||
|
||||
for callback in self._check_threepid_can_be_invited_callbacks:
|
||||
for (
|
||||
callback
|
||||
) in self._module_api_callbacks.check_threepid_can_be_invited_callbacks:
|
||||
try:
|
||||
threepid_can_be_invited = await delay_cancellation(
|
||||
callback(medium, address, state_events)
|
||||
@@ -390,12 +186,17 @@ class ThirdPartyEventRules:
|
||||
True if the room's visibility can be modified, False if not.
|
||||
"""
|
||||
# Bail out early without hitting the store if we don't have any callback
|
||||
if len(self._check_visibility_can_be_modified_callbacks) == 0:
|
||||
if (
|
||||
len(self._module_api_callbacks.check_visibility_can_be_modified_callbacks)
|
||||
== 0
|
||||
):
|
||||
return True
|
||||
|
||||
state_events = await self._get_state_map_for_room(room_id)
|
||||
|
||||
for callback in self._check_visibility_can_be_modified_callbacks:
|
||||
for (
|
||||
callback
|
||||
) in self._module_api_callbacks.check_visibility_can_be_modified_callbacks:
|
||||
try:
|
||||
visibility_can_be_modified = await delay_cancellation(
|
||||
callback(room_id, state_events, new_visibility)
|
||||
@@ -417,13 +218,13 @@ class ThirdPartyEventRules:
|
||||
event_id: The ID of the event.
|
||||
"""
|
||||
# Bail out early without hitting the store if we don't have any callbacks
|
||||
if len(self._on_new_event_callbacks) == 0:
|
||||
if len(self._module_api_callbacks.on_new_event_callbacks) == 0:
|
||||
return
|
||||
|
||||
event = await self.store.get_event(event_id)
|
||||
state_events = await self._get_state_map_for_room(event.room_id)
|
||||
|
||||
for callback in self._on_new_event_callbacks:
|
||||
for callback in self._module_api_callbacks.on_new_event_callbacks:
|
||||
try:
|
||||
await callback(event, state_events)
|
||||
except Exception as e:
|
||||
@@ -439,7 +240,7 @@ class ThirdPartyEventRules:
|
||||
requester: The ID of the user requesting the shutdown.
|
||||
room_id: The ID of the room.
|
||||
"""
|
||||
for callback in self._check_can_shutdown_room_callbacks:
|
||||
for callback in self._module_api_callbacks.check_can_shutdown_room_callbacks:
|
||||
try:
|
||||
can_shutdown_room = await delay_cancellation(callback(user_id, room_id))
|
||||
if can_shutdown_room is False:
|
||||
@@ -464,7 +265,7 @@ class ThirdPartyEventRules:
|
||||
requester
|
||||
user_id: The ID of the room.
|
||||
"""
|
||||
for callback in self._check_can_deactivate_user_callbacks:
|
||||
for callback in self._module_api_callbacks.check_can_deactivate_user_callbacks:
|
||||
try:
|
||||
can_deactivate_user = await delay_cancellation(
|
||||
callback(user_id, by_admin)
|
||||
@@ -502,7 +303,7 @@ class ThirdPartyEventRules:
|
||||
by_admin: Whether the profile update was performed by a server admin.
|
||||
deactivation: Whether this change was made while deactivating the user.
|
||||
"""
|
||||
for callback in self._on_profile_update_callbacks:
|
||||
for callback in self._module_api_callbacks.on_profile_update_callbacks:
|
||||
try:
|
||||
await callback(user_id, new_profile, by_admin, deactivation)
|
||||
except Exception as e:
|
||||
@@ -520,7 +321,9 @@ class ThirdPartyEventRules:
|
||||
deactivated: Whether the user is now deactivated.
|
||||
by_admin: Whether the deactivation was performed by a server admin.
|
||||
"""
|
||||
for callback in self._on_user_deactivation_status_changed_callbacks:
|
||||
for (
|
||||
callback
|
||||
) in self._module_api_callbacks.on_user_deactivation_status_changed_callbacks:
|
||||
try:
|
||||
await callback(user_id, deactivated, by_admin)
|
||||
except Exception as e:
|
||||
@@ -543,7 +346,7 @@ class ThirdPartyEventRules:
|
||||
medium: the threepid's medium.
|
||||
address: the threepid's address.
|
||||
"""
|
||||
for callback in self._on_threepid_bind_callbacks:
|
||||
for callback in self._module_api_callbacks.on_threepid_bind_callbacks:
|
||||
try:
|
||||
await callback(user_id, medium, address)
|
||||
except Exception as e:
|
||||
@@ -562,7 +365,9 @@ class ThirdPartyEventRules:
|
||||
medium: The medium of the third-party ID (email, msisdn).
|
||||
address: The address of the third-party ID (i.e. an email address).
|
||||
"""
|
||||
for callback in self._on_add_user_third_party_identifier_callbacks:
|
||||
for (
|
||||
callback
|
||||
) in self._module_api_callbacks.on_add_user_third_party_identifier_callbacks:
|
||||
try:
|
||||
await callback(user_id, medium, address)
|
||||
except Exception as e:
|
||||
@@ -584,7 +389,9 @@ class ThirdPartyEventRules:
|
||||
medium: The medium of the third-party ID (email, msisdn).
|
||||
address: The address of the third-party ID (i.e. an email address).
|
||||
"""
|
||||
for callback in self._on_remove_user_third_party_identifier_callbacks:
|
||||
for (
|
||||
callback
|
||||
) in self._module_api_callbacks.on_remove_user_third_party_identifier_callbacks:
|
||||
try:
|
||||
await callback(user_id, medium, address)
|
||||
except Exception as e:
|
||||
|
||||
@@ -355,7 +355,7 @@ def serialize_event(
|
||||
time_now_ms = int(time_now_ms)
|
||||
|
||||
# Should this strip out None's?
|
||||
d = dict(e.get_dict().items())
|
||||
d = {k: v for k, v in e.get_dict().items()}
|
||||
|
||||
d["event_id"] = e.event_id
|
||||
|
||||
@@ -567,7 +567,7 @@ PowerLevelsContent = Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]]
|
||||
def copy_and_fixup_power_levels_contents(
|
||||
old_power_levels: PowerLevelsContent,
|
||||
) -> Dict[str, Union[int, Dict[str, int]]]:
|
||||
"""Copy the content of a power_levels event, unfreezing immutabledicts along the way.
|
||||
"""Copy the content of a power_levels event, unfreezing frozendicts along the way.
|
||||
|
||||
We accept as input power level values which are strings, provided they represent an
|
||||
integer, e.g. `"`100"` instead of 100. Such strings are converted to integers
|
||||
|
||||
@@ -12,17 +12,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import collections.abc
|
||||
from typing import Iterable, List, Type, Union, cast
|
||||
from typing import Iterable, Type, Union, cast
|
||||
|
||||
import jsonschema
|
||||
from pydantic import Field, StrictBool, StrictStr
|
||||
|
||||
from synapse.api.constants import (
|
||||
MAX_ALIAS_LENGTH,
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
Membership,
|
||||
)
|
||||
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import EventFormatVersions
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
@@ -34,8 +28,6 @@ from synapse.events.utils import (
|
||||
validate_canonicaljson,
|
||||
)
|
||||
from synapse.federation.federation_server import server_matches_acl_event
|
||||
from synapse.http.servlet import validate_json_object
|
||||
from synapse.rest.models import RequestBodyModel
|
||||
from synapse.types import EventID, JsonDict, RoomID, UserID
|
||||
|
||||
|
||||
@@ -96,27 +88,27 @@ class EventValidator:
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
elif event.type == EventTypes.Retention:
|
||||
if event.type == EventTypes.Retention:
|
||||
self._validate_retention(event)
|
||||
|
||||
elif event.type == EventTypes.ServerACL:
|
||||
if event.type == EventTypes.ServerACL:
|
||||
if not server_matches_acl_event(config.server.server_name, event):
|
||||
raise SynapseError(
|
||||
400, "Can't create an ACL event that denies the local server"
|
||||
)
|
||||
|
||||
elif event.type == EventTypes.PowerLevels:
|
||||
if event.type == EventTypes.PowerLevels:
|
||||
try:
|
||||
jsonschema.validate(
|
||||
instance=event.content,
|
||||
schema=POWER_LEVELS_SCHEMA,
|
||||
cls=POWER_LEVELS_VALIDATOR,
|
||||
cls=plValidator,
|
||||
)
|
||||
except jsonschema.ValidationError as e:
|
||||
if e.path:
|
||||
# example: "users_default": '0' is not of type 'integer'
|
||||
# cast safety: path entries can be integers, if we fail to validate
|
||||
# items in an array. However, the POWER_LEVELS_SCHEMA doesn't expect
|
||||
# items in an array. However the POWER_LEVELS_SCHEMA doesn't expect
|
||||
# to see any arrays.
|
||||
message = (
|
||||
'"' + cast(str, e.path[-1]) + '": ' + e.message # noqa: B306
|
||||
@@ -133,15 +125,6 @@ class EventValidator:
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
# If the event contains a mentions key, validate it.
|
||||
if (
|
||||
EventContentFields.MSC3952_MENTIONS in event.content
|
||||
and config.experimental.msc3952_intentional_mentions
|
||||
):
|
||||
validate_json_object(
|
||||
event.content[EventContentFields.MSC3952_MENTIONS], Mentions
|
||||
)
|
||||
|
||||
def _validate_retention(self, event: EventBase) -> None:
|
||||
"""Checks that an event that defines the retention policy for a room respects the
|
||||
format enforced by the spec.
|
||||
@@ -270,17 +253,12 @@ POWER_LEVELS_SCHEMA = {
|
||||
}
|
||||
|
||||
|
||||
class Mentions(RequestBodyModel):
|
||||
user_ids: List[StrictStr] = Field(default_factory=list)
|
||||
room: StrictBool = False
|
||||
|
||||
|
||||
# This could return something newer than Draft 7, but that's the current "latest"
|
||||
# validator.
|
||||
def _create_validator(schema: JsonDict) -> Type[jsonschema.Draft7Validator]:
|
||||
validator = jsonschema.validators.validator_for(schema)
|
||||
def _create_power_level_validator() -> Type[jsonschema.Draft7Validator]:
|
||||
validator = jsonschema.validators.validator_for(POWER_LEVELS_SCHEMA)
|
||||
|
||||
# by default jsonschema does not consider a immutabledict to be an object so
|
||||
# by default jsonschema does not consider a frozendict to be an object so
|
||||
# we need to use a custom type checker
|
||||
# https://python-jsonschema.readthedocs.io/en/stable/validate/?highlight=object#validating-with-additional-types
|
||||
type_checker = validator.TYPE_CHECKER.redefine(
|
||||
@@ -290,4 +268,4 @@ def _create_validator(schema: JsonDict) -> Type[jsonschema.Draft7Validator]:
|
||||
return jsonschema.validators.extend(validator, type_checker=type_checker)
|
||||
|
||||
|
||||
POWER_LEVELS_VALIDATOR = _create_validator(POWER_LEVELS_SCHEMA)
|
||||
plValidator = _create_power_level_validator()
|
||||
|
||||
@@ -61,7 +61,6 @@ from synapse.federation.federation_base import (
|
||||
event_from_pdu_json,
|
||||
)
|
||||
from synapse.federation.transport.client import SendJoinResponse
|
||||
from synapse.http.client import is_unknown_endpoint
|
||||
from synapse.http.types import QueryParams
|
||||
from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace
|
||||
from synapse.types import JsonDict, UserID, get_domain_from_id
|
||||
@@ -760,6 +759,43 @@ class FederationClient(FederationBase):
|
||||
|
||||
return signed_auth
|
||||
|
||||
def _is_unknown_endpoint(
|
||||
self, e: HttpResponseException, synapse_error: Optional[SynapseError] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Returns true if the response was due to an endpoint being unimplemented.
|
||||
|
||||
Args:
|
||||
e: The error response received from the remote server.
|
||||
synapse_error: The above error converted to a SynapseError. This is
|
||||
automatically generated if not provided.
|
||||
|
||||
"""
|
||||
if synapse_error is None:
|
||||
synapse_error = e.to_synapse_error()
|
||||
# MSC3743 specifies that servers should return a 404 or 405 with an errcode
|
||||
# of M_UNRECOGNIZED when they receive a request to an unknown endpoint or
|
||||
# to an unknown method, respectively.
|
||||
#
|
||||
# Older versions of servers don't properly handle this. This needs to be
|
||||
# rather specific as some endpoints truly do return 404 errors.
|
||||
return (
|
||||
# 404 is an unknown endpoint, 405 is a known endpoint, but unknown method.
|
||||
(e.code == 404 or e.code == 405)
|
||||
and (
|
||||
# Older Dendrites returned a text or empty body.
|
||||
# Older Conduit returned an empty body.
|
||||
not e.response
|
||||
or e.response == b"404 page not found"
|
||||
# The proper response JSON with M_UNRECOGNIZED errcode.
|
||||
or synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
)
|
||||
) or (
|
||||
# Older Synapses returned a 400 error.
|
||||
e.code == 400
|
||||
and synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
)
|
||||
|
||||
async def _try_destination_list(
|
||||
self,
|
||||
description: str,
|
||||
@@ -851,7 +887,7 @@ class FederationClient(FederationBase):
|
||||
elif 400 <= e.code < 500 and synapse_error.errcode in failover_errcodes:
|
||||
failover = True
|
||||
|
||||
elif failover_on_unknown_endpoint and is_unknown_endpoint(
|
||||
elif failover_on_unknown_endpoint and self._is_unknown_endpoint(
|
||||
e, synapse_error
|
||||
):
|
||||
failover = True
|
||||
@@ -1187,7 +1223,7 @@ class FederationClient(FederationBase):
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint. Otherwise, consider it a legitimate error
|
||||
# and raise.
|
||||
if not is_unknown_endpoint(e):
|
||||
if not self._is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
|
||||
@@ -1261,7 +1297,7 @@ class FederationClient(FederationBase):
|
||||
# fallback to the v1 endpoint if the room uses old-style event IDs.
|
||||
# Otherwise, consider it a legitimate error and raise.
|
||||
err = e.to_synapse_error()
|
||||
if is_unknown_endpoint(e, err):
|
||||
if self._is_unknown_endpoint(e, err):
|
||||
if room_version.event_format != EventFormatVersions.ROOM_V1_V2:
|
||||
raise SynapseError(
|
||||
400,
|
||||
@@ -1322,7 +1358,7 @@ class FederationClient(FederationBase):
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint. Otherwise, consider it a legitimate error
|
||||
# and raise.
|
||||
if not is_unknown_endpoint(e):
|
||||
if not self._is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
|
||||
@@ -1593,7 +1629,7 @@ class FederationClient(FederationBase):
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the unstable endpoint. Otherwise, consider it a
|
||||
# legitimate error and raise.
|
||||
if not is_unknown_endpoint(e):
|
||||
if not self._is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug(
|
||||
|
||||
@@ -86,7 +86,7 @@ from synapse.storage.databases.main.lock import Lock
|
||||
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
|
||||
from synapse.storage.roommember import MemberSummary
|
||||
from synapse.types import JsonDict, StateMap, get_domain_from_id
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util import json_decoder, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.util.stringutils import parse_server_name
|
||||
@@ -135,7 +135,6 @@ class FederationServer(FederationBase):
|
||||
self.state = hs.get_state_handler()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
self._room_member_handler = hs.get_room_member_handler()
|
||||
self._e2e_keys_handler = hs.get_e2e_keys_handler()
|
||||
|
||||
self._state_storage_controller = hs.get_storage_controllers().state
|
||||
|
||||
@@ -1013,14 +1012,15 @@ class FederationServer(FederationBase):
|
||||
query.append((user_id, device_id, algorithm))
|
||||
|
||||
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
|
||||
results = await self._e2e_keys_handler.claim_local_one_time_keys(query)
|
||||
results = await self.store.claim_e2e_one_time_keys(query)
|
||||
|
||||
json_result: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
|
||||
for result in results:
|
||||
for user_id, device_keys in result.items():
|
||||
for device_id, keys in device_keys.items():
|
||||
for key_id, key in keys.items():
|
||||
json_result.setdefault(user_id, {})[device_id] = {key_id: key}
|
||||
json_result: Dict[str, Dict[str, dict]] = {}
|
||||
for user_id, device_keys in results.items():
|
||||
for device_id, keys in device_keys.items():
|
||||
for key_id, json_str in keys.items():
|
||||
json_result.setdefault(user_id, {})[device_id] = {
|
||||
key_id: json_decoder.decode(json_str)
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"Claimed one-time-keys: %s",
|
||||
|
||||
@@ -244,7 +244,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_device_messages(self, destination: str, immediate: bool = True) -> None:
|
||||
def send_device_messages(self, destination: str, immediate: bool = False) -> None:
|
||||
"""As per FederationSender"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
|
||||
@@ -11,119 +11,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
The Federation Sender is responsible for sending Persistent Data Units (PDUs)
|
||||
and Ephemeral Data Units (EDUs) to other homeservers using
|
||||
the `/send` Federation API.
|
||||
|
||||
|
||||
## How do PDUs get sent?
|
||||
|
||||
The Federation Sender is made aware of new PDUs due to `FederationSender.notify_new_events`.
|
||||
When the sender is notified about a newly-persisted PDU that originates from this homeserver
|
||||
and is not an out-of-band event, we pass the PDU to the `_PerDestinationQueue` for each
|
||||
remote homeserver that is in the room at that point in the DAG.
|
||||
|
||||
|
||||
### Per-Destination Queues
|
||||
|
||||
There is one `PerDestinationQueue` per 'destination' homeserver.
|
||||
The `PerDestinationQueue` maintains the following information about the destination:
|
||||
|
||||
- whether the destination is currently in [catch-up mode (see below)](#catch-up-mode);
|
||||
- a queue of PDUs to be sent to the destination; and
|
||||
- a queue of EDUs to be sent to the destination (not considered in this section).
|
||||
|
||||
Upon a new PDU being enqueued, `attempt_new_transaction` is called to start a new
|
||||
transaction if there is not already one in progress.
|
||||
|
||||
|
||||
### Transactions and the Transaction Transmission Loop
|
||||
|
||||
Each federation HTTP request to the `/send` endpoint is referred to as a 'transaction'.
|
||||
The body of the HTTP request contains a list of PDUs and EDUs to send to the destination.
|
||||
|
||||
The *Transaction Transmission Loop* (`_transaction_transmission_loop`) is responsible
|
||||
for emptying the queued PDUs (and EDUs) from a `PerDestinationQueue` by sending
|
||||
them to the destination.
|
||||
|
||||
There can only be one transaction in flight for a given destination at any time.
|
||||
(Other than preventing us from overloading the destination, this also makes it easier to
|
||||
reason about because we process events sequentially for each destination.
|
||||
This is useful for *Catch-Up Mode*, described later.)
|
||||
|
||||
The loop continues so long as there is anything to send. At each iteration of the loop, we:
|
||||
|
||||
- dequeue up to 50 PDUs (and up to 100 EDUs).
|
||||
- make the `/send` request to the destination homeserver with the dequeued PDUs and EDUs.
|
||||
- if successful, make note of the fact that we succeeded in transmitting PDUs up to
|
||||
the given `stream_ordering` of the latest PDU by
|
||||
- if unsuccessful, back off from the remote homeserver for some time.
|
||||
If we have been unsuccessful for too long (when the backoff interval grows to exceed 1 hour),
|
||||
the in-memory queues are emptied and we enter [*Catch-Up Mode*, described below](#catch-up-mode).
|
||||
|
||||
|
||||
### Catch-Up Mode
|
||||
|
||||
When the `PerDestinationQueue` has the catch-up flag set, the *Catch-Up Transmission Loop*
|
||||
(`_catch_up_transmission_loop`) is used in lieu of the regular `_transaction_transmission_loop`.
|
||||
(Only once the catch-up mode has been exited can the regular tranaction transmission behaviour
|
||||
be resumed.)
|
||||
|
||||
*Catch-Up Mode*, entered upon Synapse startup or once a homeserver has fallen behind due to
|
||||
connection problems, is responsible for sending PDUs that have been missed by the destination
|
||||
homeserver. (PDUs can be missed because the `PerDestinationQueue` is volatile — i.e. resets
|
||||
on startup — and it does not hold PDUs forever if `/send` requests to the destination fail.)
|
||||
|
||||
The catch-up mechanism makes use of the `last_successful_stream_ordering` column in the
|
||||
`destinations` table (which gives the `stream_ordering` of the most recent successfully
|
||||
sent PDU) and the `stream_ordering` column in the `destination_rooms` table (which gives,
|
||||
for each room, the `stream_ordering` of the most recent PDU that needs to be sent to this
|
||||
destination).
|
||||
|
||||
Each iteration of the loop pulls out 50 `destination_rooms` entries with the oldest
|
||||
`stream_ordering`s that are greater than the `last_successful_stream_ordering`.
|
||||
In other words, from the set of latest PDUs in each room to be sent to the destination,
|
||||
the 50 oldest such PDUs are pulled out.
|
||||
|
||||
These PDUs could, in principle, now be directly sent to the destination. However, as an
|
||||
optimisation intended to prevent overloading destination homeservers, we instead attempt
|
||||
to send the latest forward extremities so long as the destination homeserver is still
|
||||
eligible to receive those.
|
||||
This reduces load on the destination **in aggregate** because all Synapse homeservers
|
||||
will behave according to this principle and therefore avoid sending lots of different PDUs
|
||||
at different points in the DAG to a recovering homeserver.
|
||||
*This optimisation is not currently valid in rooms which are partial-state on this homeserver,
|
||||
since we are unable to determine whether the destination homeserver is eligible to receive
|
||||
the latest forward extremities unless this homeserver sent those PDUs — in this case, we
|
||||
just send the latest PDUs originating from this server and skip this optimisation.*
|
||||
|
||||
Whilst PDUs are sent through this mechanism, the position of `last_successful_stream_ordering`
|
||||
is advanced as normal.
|
||||
Once there are no longer any rooms containing outstanding PDUs to be sent to the destination
|
||||
*that are not already in the `PerDestinationQueue` because they arrived since Catch-Up Mode
|
||||
was enabled*, Catch-Up Mode is exited and we return to `_transaction_transmission_loop`.
|
||||
|
||||
|
||||
#### A note on failures and back-offs
|
||||
|
||||
If a remote server is unreachable over federation, we back off from that server,
|
||||
with an exponentially-increasing retry interval.
|
||||
Whilst we don't automatically retry after the interval, we prevent making new attempts
|
||||
until such time as the back-off has cleared.
|
||||
Once the back-off is cleared and a new PDU or EDU arrives for transmission, the transmission
|
||||
loop resumes and empties the queue by making federation requests.
|
||||
|
||||
If the backoff grows too large (> 1 hour), the in-memory queue is emptied (to prevent
|
||||
unbounded growth) and Catch-Up Mode is entered.
|
||||
|
||||
It is worth noting that the back-off for a remote server is cleared once an inbound
|
||||
request from that remote server is received (see `notify_remote_server_up`).
|
||||
At this point, the transaction transmission loop is also started up, to proactively
|
||||
send missed PDUs and EDUs to the destination (i.e. you don't need to wait for a new PDU
|
||||
or EDU, destined for that destination, to be created in order to send out missed PDUs and
|
||||
EDUs).
|
||||
"""
|
||||
|
||||
import abc
|
||||
import logging
|
||||
@@ -896,7 +783,7 @@ class FederationSender(AbstractFederationSender):
|
||||
else:
|
||||
queue.send_edu(edu)
|
||||
|
||||
def send_device_messages(self, destination: str, immediate: bool = True) -> None:
|
||||
def send_device_messages(self, destination: str, immediate: bool = False) -> None:
|
||||
if destination == self.server_name:
|
||||
logger.warning("Not sending device update to ourselves")
|
||||
return
|
||||
|
||||
@@ -497,8 +497,8 @@ class PerDestinationQueue:
|
||||
#
|
||||
# Note: `catchup_pdus` will have exactly one PDU per room.
|
||||
for pdu in catchup_pdus:
|
||||
# The PDU from the DB will be the newest PDU in the room from
|
||||
# *this server* that we tried---but were unable---to send to the remote.
|
||||
# The PDU from the DB will be the last PDU in the room from
|
||||
# *this server* that wasn't sent to the remote. However, other
|
||||
# servers may have sent lots of events since then, and we want
|
||||
# to try and tell the remote only about the *latest* events in
|
||||
# the room. This is so that it doesn't get inundated by events
|
||||
@@ -516,11 +516,6 @@ class PerDestinationQueue:
|
||||
# If the event is in the extremities, then great! We can just
|
||||
# use that without having to do further checks.
|
||||
room_catchup_pdus = [pdu]
|
||||
elif await self._store.is_partial_state_room(pdu.room_id):
|
||||
# We can't be sure which events the destination should
|
||||
# see using only partial state. Avoid doing so, and just retry
|
||||
# sending our the newest PDU the remote is missing from us.
|
||||
room_catchup_pdus = [pdu]
|
||||
else:
|
||||
# If not, fetch the extremities and figure out which we can
|
||||
# send.
|
||||
@@ -552,8 +547,6 @@ class PerDestinationQueue:
|
||||
self._server_name,
|
||||
new_pdus,
|
||||
redact=False,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
)
|
||||
|
||||
# If we've filtered out all the extremities, fall back to
|
||||
|
||||
@@ -108,7 +108,6 @@ class PublicRoomList(BaseFederationServlet):
|
||||
"""
|
||||
|
||||
PATH = "/publicRooms"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -213,7 +212,6 @@ class OpenIdUserInfo(BaseFederationServlet):
|
||||
"""
|
||||
|
||||
PATH = "/openid/userinfo"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
|
||||
@@ -70,7 +70,6 @@ class BaseFederationServerServlet(BaseFederationServlet):
|
||||
|
||||
class FederationSendServlet(BaseFederationServerServlet):
|
||||
PATH = "/send/(?P<transaction_id>[^/]*)/?"
|
||||
CATEGORY = "Inbound federation transaction request"
|
||||
|
||||
# We ratelimit manually in the handler as we queue up the requests and we
|
||||
# don't want to fill up the ratelimiter with blocked requests.
|
||||
@@ -139,7 +138,6 @@ class FederationSendServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationEventServlet(BaseFederationServerServlet):
|
||||
PATH = "/event/(?P<event_id>[^/]*)/?"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
# This is when someone asks for a data item for a given server data_id pair.
|
||||
async def on_GET(
|
||||
@@ -154,7 +152,6 @@ class FederationEventServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationStateV1Servlet(BaseFederationServerServlet):
|
||||
PATH = "/state/(?P<room_id>[^/]*)/?"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
# This is when someone asks for all data for a given room.
|
||||
async def on_GET(
|
||||
@@ -173,7 +170,6 @@ class FederationStateV1Servlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationStateIdsServlet(BaseFederationServerServlet):
|
||||
PATH = "/state_ids/(?P<room_id>[^/]*)/?"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -191,7 +187,6 @@ class FederationStateIdsServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationBackfillServlet(BaseFederationServerServlet):
|
||||
PATH = "/backfill/(?P<room_id>[^/]*)/?"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -230,7 +225,6 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
|
||||
PATH = "/timestamp_to_event/(?P<room_id>[^/]*)/?"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -252,7 +246,6 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationQueryServlet(BaseFederationServerServlet):
|
||||
PATH = "/query/(?P<query_type>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
# This is when we receive a server-server Query
|
||||
async def on_GET(
|
||||
@@ -269,7 +262,6 @@ class FederationQueryServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationMakeJoinServlet(BaseFederationServerServlet):
|
||||
PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -305,7 +297,6 @@ class FederationMakeJoinServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationMakeLeaveServlet(BaseFederationServerServlet):
|
||||
PATH = "/make_leave/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -321,7 +312,6 @@ class FederationMakeLeaveServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV1SendLeaveServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
@@ -337,7 +327,6 @@ class FederationV1SendLeaveServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV2SendLeaveServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
PREFIX = FEDERATION_V2_PREFIX
|
||||
|
||||
@@ -355,7 +344,6 @@ class FederationV2SendLeaveServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationMakeKnockServlet(BaseFederationServerServlet):
|
||||
PATH = "/make_knock/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -378,7 +366,6 @@ class FederationMakeKnockServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV1SendKnockServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_knock/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
@@ -394,7 +381,6 @@ class FederationV1SendKnockServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationEventAuthServlet(BaseFederationServerServlet):
|
||||
PATH = "/event_auth/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -409,7 +395,6 @@ class FederationEventAuthServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV1SendJoinServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
@@ -427,7 +412,6 @@ class FederationV1SendJoinServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV2SendJoinServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
PREFIX = FEDERATION_V2_PREFIX
|
||||
|
||||
@@ -471,7 +455,6 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV1InviteServlet(BaseFederationServerServlet):
|
||||
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
@@ -496,7 +479,6 @@ class FederationV1InviteServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV2InviteServlet(BaseFederationServerServlet):
|
||||
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
PREFIX = FEDERATION_V2_PREFIX
|
||||
|
||||
@@ -533,7 +515,6 @@ class FederationV2InviteServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
|
||||
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
@@ -548,7 +529,6 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationClientKeysQueryServlet(BaseFederationServerServlet):
|
||||
PATH = "/user/keys/query"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_POST(
|
||||
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
|
||||
@@ -558,7 +538,6 @@ class FederationClientKeysQueryServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
|
||||
PATH = "/user/devices/(?P<user_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -572,7 +551,6 @@ class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationClientKeysClaimServlet(BaseFederationServerServlet):
|
||||
PATH = "/user/keys/claim"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_POST(
|
||||
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
|
||||
@@ -583,7 +561,6 @@ class FederationClientKeysClaimServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationGetMissingEventsServlet(BaseFederationServerServlet):
|
||||
PATH = "/get_missing_events/(?P<room_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
@@ -609,7 +586,6 @@ class FederationGetMissingEventsServlet(BaseFederationServerServlet):
|
||||
|
||||
class On3pidBindServlet(BaseFederationServerServlet):
|
||||
PATH = "/3pid/onbind"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
@@ -642,7 +618,6 @@ class On3pidBindServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationVersionServlet(BaseFederationServlet):
|
||||
PATH = "/version"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
@@ -665,7 +640,6 @@ class FederationVersionServlet(BaseFederationServlet):
|
||||
|
||||
class FederationRoomHierarchyServlet(BaseFederationServlet):
|
||||
PATH = "/hierarchy/(?P<room_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -698,7 +672,6 @@ class RoomComplexityServlet(BaseFederationServlet):
|
||||
|
||||
PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX
|
||||
CATEGORY = "Federation requests (unstable)"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import random
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
from synapse.api.constants import AccountDataTypes
|
||||
from synapse.replication.http.account_data import (
|
||||
@@ -33,10 +33,6 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ON_ACCOUNT_DATA_UPDATED_CALLBACK = Callable[
|
||||
[str, Optional[str], str, JsonDict], Awaitable
|
||||
]
|
||||
|
||||
|
||||
class AccountDataHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
@@ -60,16 +56,7 @@ class AccountDataHandler:
|
||||
self._remove_tag_client = ReplicationRemoveTagRestServlet.make_client(hs)
|
||||
self._account_data_writers = hs.config.worker.writers.account_data
|
||||
|
||||
self._on_account_data_updated_callbacks: List[
|
||||
ON_ACCOUNT_DATA_UPDATED_CALLBACK
|
||||
] = []
|
||||
|
||||
def register_module_callbacks(
|
||||
self, on_account_data_updated: Optional[ON_ACCOUNT_DATA_UPDATED_CALLBACK] = None
|
||||
) -> None:
|
||||
"""Register callbacks from modules."""
|
||||
if on_account_data_updated is not None:
|
||||
self._on_account_data_updated_callbacks.append(on_account_data_updated)
|
||||
self._module_api_callbacks = hs.get_module_api_callbacks().account_data
|
||||
|
||||
async def _notify_modules(
|
||||
self,
|
||||
@@ -92,7 +79,7 @@ class AccountDataHandler:
|
||||
account_data_type: The type of the account data.
|
||||
content: The content that is now associated with this type.
|
||||
"""
|
||||
for callback in self._on_account_data_updated_callbacks:
|
||||
for callback in self._module_api_callbacks.on_account_data_updated_callbacks:
|
||||
try:
|
||||
await callback(user_id, room_id, account_data_type, content)
|
||||
except Exception as e:
|
||||
|
||||
@@ -12,17 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Union
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
@@ -839,126 +829,3 @@ class ApplicationServicesHandler:
|
||||
if unknown_user:
|
||||
return await self.query_user_exists(user_id)
|
||||
return True
|
||||
|
||||
async def claim_e2e_one_time_keys(
|
||||
self, query: Iterable[Tuple[str, str, str]]
|
||||
) -> Tuple[
|
||||
Iterable[Dict[str, Dict[str, Dict[str, JsonDict]]]], List[Tuple[str, str, str]]
|
||||
]:
|
||||
"""Claim one time keys from application services.
|
||||
|
||||
Users which are exclusively owned by an application service are sent a
|
||||
key claim request to check if the application service provides keys
|
||||
directly.
|
||||
|
||||
Args:
|
||||
query: An iterable of tuples of (user ID, device ID, algorithm).
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
An iterable of maps of user ID -> a map device ID -> a map of key ID -> JSON bytes.
|
||||
|
||||
A copy of the input which has not been fulfilled (either because
|
||||
they are not appservice users or the appservice does not support
|
||||
providing OTKs).
|
||||
"""
|
||||
services = self.store.get_app_services()
|
||||
|
||||
# Partition the users by appservice.
|
||||
query_by_appservice: Dict[str, List[Tuple[str, str, str]]] = {}
|
||||
missing = []
|
||||
for user_id, device, algorithm in query:
|
||||
if not self.store.get_if_app_services_interested_in_user(user_id):
|
||||
missing.append((user_id, device, algorithm))
|
||||
continue
|
||||
|
||||
# Find the associated appservice.
|
||||
for service in services:
|
||||
if service.is_exclusive_user(user_id):
|
||||
query_by_appservice.setdefault(service.id, []).append(
|
||||
(user_id, device, algorithm)
|
||||
)
|
||||
continue
|
||||
|
||||
# Query each service in parallel.
|
||||
results = await make_deferred_yieldable(
|
||||
defer.DeferredList(
|
||||
[
|
||||
run_in_background(
|
||||
self.appservice_api.claim_client_keys,
|
||||
# We know this must be an app service.
|
||||
self.store.get_app_service_by_id(service_id), # type: ignore[arg-type]
|
||||
service_query,
|
||||
)
|
||||
for service_id, service_query in query_by_appservice.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
)
|
||||
|
||||
# Patch together the results -- they are all independent (since they
|
||||
# require exclusive control over the users). They get returned as a list
|
||||
# and the caller combines them.
|
||||
claimed_keys: List[Dict[str, Dict[str, Dict[str, JsonDict]]]] = []
|
||||
for success, result in results:
|
||||
if success:
|
||||
claimed_keys.append(result[0])
|
||||
missing.extend(result[1])
|
||||
|
||||
return claimed_keys, missing
|
||||
|
||||
async def query_keys(
|
||||
self, query: Mapping[str, Optional[List[str]]]
|
||||
) -> Dict[str, Dict[str, Dict[str, JsonDict]]]:
|
||||
"""Query application services for device keys.
|
||||
|
||||
Users which are exclusively owned by an application service are queried
|
||||
for keys to check if the application service provides keys directly.
|
||||
|
||||
Args:
|
||||
query: map from user_id to a list of devices to query
|
||||
|
||||
Returns:
|
||||
A map from user_id -> device_id -> device details
|
||||
"""
|
||||
services = self.store.get_app_services()
|
||||
|
||||
# Partition the users by appservice.
|
||||
query_by_appservice: Dict[str, Dict[str, List[str]]] = {}
|
||||
for user_id, device_ids in query.items():
|
||||
if not self.store.get_if_app_services_interested_in_user(user_id):
|
||||
continue
|
||||
|
||||
# Find the associated appservice.
|
||||
for service in services:
|
||||
if service.is_exclusive_user(user_id):
|
||||
query_by_appservice.setdefault(service.id, {})[user_id] = (
|
||||
device_ids or []
|
||||
)
|
||||
continue
|
||||
|
||||
# Query each service in parallel.
|
||||
results = await make_deferred_yieldable(
|
||||
defer.DeferredList(
|
||||
[
|
||||
run_in_background(
|
||||
self.appservice_api.query_keys,
|
||||
# We know this must be an app service.
|
||||
self.store.get_app_service_by_id(service_id), # type: ignore[arg-type]
|
||||
service_query,
|
||||
)
|
||||
for service_id, service_query in query_by_appservice.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
)
|
||||
|
||||
# Patch together the results -- they are all independent (since they
|
||||
# require exclusive control over the users). They get returned as a single
|
||||
# dictionary.
|
||||
key_queries: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
|
||||
for success, result in results:
|
||||
if success:
|
||||
key_queries.update(result)
|
||||
|
||||
return key_queries
|
||||
|
||||
@@ -65,6 +65,10 @@ from synapse.http.server import finish_request, respond_with_html
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import defer_to_thread
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.module_api.callbacks.password_auth_provider_callbacks import (
|
||||
CHECK_3PID_AUTH_CALLBACK,
|
||||
ON_LOGGED_OUT_CALLBACK,
|
||||
)
|
||||
from synapse.storage.databases.main.registration import (
|
||||
LoginTokenExpired,
|
||||
LoginTokenLookupResult,
|
||||
@@ -1096,7 +1100,7 @@ class AuthHandler:
|
||||
return self._password_enabled_for_login and self._password_localdb_enabled
|
||||
|
||||
def get_supported_login_types(self) -> Iterable[str]:
|
||||
"""Get a the login types supported for the /login API
|
||||
"""Get the login types supported for the /login API
|
||||
|
||||
By default this is just 'm.login.password' (unless password_enabled is
|
||||
False in the config file), but password auth providers can provide
|
||||
@@ -1504,10 +1508,8 @@ class AuthHandler:
|
||||
)
|
||||
|
||||
# delete pushers associated with this access token
|
||||
# XXX(quenting): This is only needed until the 'set_device_id_for_pushers'
|
||||
# background update completes.
|
||||
if token.token_id is not None:
|
||||
await self.hs.get_pusherpool().remove_pushers_by_access_tokens(
|
||||
await self.hs.get_pusherpool().remove_pushers_by_access_token(
|
||||
token.user_id, (token.token_id,)
|
||||
)
|
||||
|
||||
@@ -1537,9 +1539,7 @@ class AuthHandler:
|
||||
)
|
||||
|
||||
# delete pushers associated with the access tokens
|
||||
# XXX(quenting): This is only needed until the 'set_device_id_for_pushers'
|
||||
# background update completes.
|
||||
await self.hs.get_pusherpool().remove_pushers_by_access_tokens(
|
||||
await self.hs.get_pusherpool().remove_pushers_by_access_token(
|
||||
user_id, (token_id for _, token_id, _ in tokens_and_devices)
|
||||
)
|
||||
|
||||
@@ -2003,124 +2003,16 @@ def load_single_legacy_password_auth_provider(
|
||||
)
|
||||
|
||||
|
||||
CHECK_3PID_AUTH_CALLBACK = Callable[
|
||||
[str, str, str],
|
||||
Awaitable[
|
||||
Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]
|
||||
],
|
||||
]
|
||||
ON_LOGGED_OUT_CALLBACK = Callable[[str, Optional[str], str], Awaitable]
|
||||
CHECK_AUTH_CALLBACK = Callable[
|
||||
[str, str, JsonDict],
|
||||
Awaitable[
|
||||
Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]
|
||||
],
|
||||
]
|
||||
GET_USERNAME_FOR_REGISTRATION_CALLBACK = Callable[
|
||||
[JsonDict, JsonDict],
|
||||
Awaitable[Optional[str]],
|
||||
]
|
||||
GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK = Callable[
|
||||
[JsonDict, JsonDict],
|
||||
Awaitable[Optional[str]],
|
||||
]
|
||||
IS_3PID_ALLOWED_CALLBACK = Callable[[str, str, bool], Awaitable[bool]]
|
||||
|
||||
|
||||
class PasswordAuthProvider:
|
||||
"""
|
||||
A class that the AuthHandler calls when authenticating users
|
||||
It allows modules to provide alternative methods for authentication
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
# lists of callbacks
|
||||
self.check_3pid_auth_callbacks: List[CHECK_3PID_AUTH_CALLBACK] = []
|
||||
self.on_logged_out_callbacks: List[ON_LOGGED_OUT_CALLBACK] = []
|
||||
self.get_username_for_registration_callbacks: List[
|
||||
GET_USERNAME_FOR_REGISTRATION_CALLBACK
|
||||
] = []
|
||||
self.get_displayname_for_registration_callbacks: List[
|
||||
GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK
|
||||
] = []
|
||||
self.is_3pid_allowed_callbacks: List[IS_3PID_ALLOWED_CALLBACK] = []
|
||||
|
||||
# Mapping from login type to login parameters
|
||||
self._supported_login_types: Dict[str, Tuple[str, ...]] = {}
|
||||
|
||||
# Mapping from login type to auth checker callbacks
|
||||
self.auth_checker_callbacks: Dict[str, List[CHECK_AUTH_CALLBACK]] = {}
|
||||
|
||||
def register_password_auth_provider_callbacks(
|
||||
self,
|
||||
check_3pid_auth: Optional[CHECK_3PID_AUTH_CALLBACK] = None,
|
||||
on_logged_out: Optional[ON_LOGGED_OUT_CALLBACK] = None,
|
||||
is_3pid_allowed: Optional[IS_3PID_ALLOWED_CALLBACK] = None,
|
||||
auth_checkers: Optional[
|
||||
Dict[Tuple[str, Tuple[str, ...]], CHECK_AUTH_CALLBACK]
|
||||
] = None,
|
||||
get_username_for_registration: Optional[
|
||||
GET_USERNAME_FOR_REGISTRATION_CALLBACK
|
||||
] = None,
|
||||
get_displayname_for_registration: Optional[
|
||||
GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK
|
||||
] = None,
|
||||
) -> None:
|
||||
# Register check_3pid_auth callback
|
||||
if check_3pid_auth is not None:
|
||||
self.check_3pid_auth_callbacks.append(check_3pid_auth)
|
||||
|
||||
# register on_logged_out callback
|
||||
if on_logged_out is not None:
|
||||
self.on_logged_out_callbacks.append(on_logged_out)
|
||||
|
||||
if auth_checkers is not None:
|
||||
# register a new supported login_type
|
||||
# Iterate through all of the types being registered
|
||||
for (login_type, fields), callback in auth_checkers.items():
|
||||
# Note: fields may be empty here. This would allow a modules auth checker to
|
||||
# be called with just 'login_type' and no password or other secrets
|
||||
|
||||
# Need to check that all the field names are strings or may get nasty errors later
|
||||
for f in fields:
|
||||
if not isinstance(f, str):
|
||||
raise RuntimeError(
|
||||
"A module tried to register support for login type: %s with parameters %s"
|
||||
" but all parameter names must be strings"
|
||||
% (login_type, fields)
|
||||
)
|
||||
|
||||
# 2 modules supporting the same login type must expect the same fields
|
||||
# e.g. 1 can't expect "pass" if the other expects "password"
|
||||
# so throw an exception if that happens
|
||||
if login_type not in self._supported_login_types.get(login_type, []):
|
||||
self._supported_login_types[login_type] = fields
|
||||
else:
|
||||
fields_currently_supported = self._supported_login_types.get(
|
||||
login_type
|
||||
)
|
||||
if fields_currently_supported != fields:
|
||||
raise RuntimeError(
|
||||
"A module tried to register support for login type: %s with parameters %s"
|
||||
" but another module had already registered support for that type with parameters %s"
|
||||
% (login_type, fields, fields_currently_supported)
|
||||
)
|
||||
|
||||
# Add the new method to the list of auth_checker_callbacks for this login type
|
||||
self.auth_checker_callbacks.setdefault(login_type, []).append(callback)
|
||||
|
||||
if get_username_for_registration is not None:
|
||||
self.get_username_for_registration_callbacks.append(
|
||||
get_username_for_registration,
|
||||
)
|
||||
|
||||
if get_displayname_for_registration is not None:
|
||||
self.get_displayname_for_registration_callbacks.append(
|
||||
get_displayname_for_registration,
|
||||
)
|
||||
|
||||
if is_3pid_allowed is not None:
|
||||
self.is_3pid_allowed_callbacks.append(is_3pid_allowed)
|
||||
def __init__(self, hs: "HomeServer") -> None:
|
||||
self._module_api_callbacks = (
|
||||
hs.get_module_api_callbacks().password_auth_provider
|
||||
)
|
||||
|
||||
def get_supported_login_types(self) -> Mapping[str, Iterable[str]]:
|
||||
"""Get the login types supported by this password provider
|
||||
@@ -2130,7 +2022,7 @@ class PasswordAuthProvider:
|
||||
to the /login API.
|
||||
"""
|
||||
|
||||
return self._supported_login_types
|
||||
return self._module_api_callbacks.supported_login_types
|
||||
|
||||
async def check_auth(
|
||||
self, username: str, login_type: str, login_dict: JsonDict
|
||||
@@ -2153,7 +2045,7 @@ class PasswordAuthProvider:
|
||||
|
||||
# Go through all callbacks for the login type until one returns with a value
|
||||
# other than None (i.e. until a callback returns a success)
|
||||
for callback in self.auth_checker_callbacks[login_type]:
|
||||
for callback in self._module_api_callbacks.auth_checker_callbacks[login_type]:
|
||||
try:
|
||||
result = await delay_cancellation(
|
||||
callback(username, login_type, login_dict)
|
||||
@@ -2218,7 +2110,7 @@ class PasswordAuthProvider:
|
||||
# (user_id, callback_func), where callback_func should be run
|
||||
# after we've finished everything else
|
||||
|
||||
for callback in self.check_3pid_auth_callbacks:
|
||||
for callback in self._module_api_callbacks.check_3pid_auth_callbacks:
|
||||
try:
|
||||
result = await delay_cancellation(callback(medium, address, password))
|
||||
except CancelledError:
|
||||
@@ -2276,7 +2168,7 @@ class PasswordAuthProvider:
|
||||
self, user_id: str, device_id: Optional[str], access_token: str
|
||||
) -> None:
|
||||
# call all of the on_logged_out callbacks
|
||||
for callback in self.on_logged_out_callbacks:
|
||||
for callback in self._module_api_callbacks.on_logged_out_callbacks:
|
||||
try:
|
||||
await callback(user_id, device_id, access_token)
|
||||
except Exception as e:
|
||||
@@ -2301,7 +2193,9 @@ class PasswordAuthProvider:
|
||||
The localpart to use when registering this user, or None if no module
|
||||
returned a localpart.
|
||||
"""
|
||||
for callback in self.get_username_for_registration_callbacks:
|
||||
for (
|
||||
callback
|
||||
) in self._module_api_callbacks.get_username_for_registration_callbacks:
|
||||
try:
|
||||
res = await delay_cancellation(callback(uia_results, params))
|
||||
|
||||
@@ -2346,7 +2240,9 @@ class PasswordAuthProvider:
|
||||
A tuple which first element is the display name, and the second is an MXC URL
|
||||
to the user's avatar.
|
||||
"""
|
||||
for callback in self.get_displayname_for_registration_callbacks:
|
||||
for (
|
||||
callback
|
||||
) in self._module_api_callbacks.get_displayname_for_registration_callbacks:
|
||||
try:
|
||||
res = await delay_cancellation(callback(uia_results, params))
|
||||
|
||||
@@ -2389,7 +2285,7 @@ class PasswordAuthProvider:
|
||||
Returns:
|
||||
Whether the 3PID is allowed to be bound on this homeserver
|
||||
"""
|
||||
for callback in self.is_3pid_allowed_callbacks:
|
||||
for callback in self._module_api_callbacks.is_3pid_allowed_callbacks:
|
||||
try:
|
||||
res = await delay_cancellation(callback(medium, address, registration))
|
||||
|
||||
|
||||
@@ -176,9 +176,6 @@ class DeactivateAccountHandler:
|
||||
# Remove account data (including ignored users and push rules).
|
||||
await self.store.purge_account_data_for_user(user_id)
|
||||
|
||||
# Delete any server-side backup keys
|
||||
await self.store.bulk_delete_backup_keys_and_versions_for_user(user_id)
|
||||
|
||||
# Let modules know the user has been deactivated.
|
||||
await self._third_party_rules.on_user_deactivation_status_changed(
|
||||
user_id,
|
||||
|
||||
@@ -503,8 +503,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
else:
|
||||
raise
|
||||
|
||||
await self.hs.get_pusherpool().remove_pushers_by_devices(user_id, device_ids)
|
||||
|
||||
# Delete data specific to each device. Not optimised as it is not
|
||||
# considered as part of a critical path.
|
||||
for device_id in device_ids:
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple
|
||||
|
||||
@@ -52,7 +53,6 @@ class E2eKeysHandler:
|
||||
self.store = hs.get_datastores().main
|
||||
self.federation = hs.get_federation_client()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self._appservice_handler = hs.get_application_service_handler()
|
||||
self.is_mine = hs.is_mine
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
@@ -88,13 +88,6 @@ class E2eKeysHandler:
|
||||
max_count=10,
|
||||
)
|
||||
|
||||
self._query_appservices_for_otks = (
|
||||
hs.config.experimental.msc3983_appservice_otk_claims
|
||||
)
|
||||
self._query_appservices_for_keys = (
|
||||
hs.config.experimental.msc3984_appservice_key_query
|
||||
)
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def query_devices(
|
||||
@@ -500,19 +493,6 @@ class E2eKeysHandler:
|
||||
local_query, include_displaynames
|
||||
)
|
||||
|
||||
# Check if the application services have any additional results.
|
||||
if self._query_appservices_for_keys:
|
||||
# Query the appservices for any keys.
|
||||
appservice_results = await self._appservice_handler.query_keys(query)
|
||||
|
||||
# Merge results, overriding with what the appservice returned.
|
||||
for user_id, devices in appservice_results.get("device_keys", {}).items():
|
||||
# Copy the appservice device info over the homeserver device info, but
|
||||
# don't completely overwrite it.
|
||||
results.setdefault(user_id, {}).update(devices)
|
||||
|
||||
# TODO Handle cross-signing keys.
|
||||
|
||||
# Build the result structure
|
||||
for user_id, device_keys in results.items():
|
||||
for device_id, device_info in device_keys.items():
|
||||
@@ -562,42 +542,6 @@ class E2eKeysHandler:
|
||||
|
||||
return ret
|
||||
|
||||
async def claim_local_one_time_keys(
|
||||
self, local_query: List[Tuple[str, str, str]]
|
||||
) -> Iterable[Dict[str, Dict[str, Dict[str, JsonDict]]]]:
|
||||
"""Claim one time keys for local users.
|
||||
|
||||
1. Attempt to claim OTKs from the database.
|
||||
2. Ask application services if they provide OTKs.
|
||||
3. Attempt to fetch fallback keys from the database.
|
||||
|
||||
Args:
|
||||
local_query: An iterable of tuples of (user ID, device ID, algorithm).
|
||||
|
||||
Returns:
|
||||
An iterable of maps of user ID -> a map device ID -> a map of key ID -> JSON bytes.
|
||||
"""
|
||||
|
||||
otk_results, not_found = await self.store.claim_e2e_one_time_keys(local_query)
|
||||
|
||||
# If the application services have not provided any keys via the C-S
|
||||
# API, query it directly for one-time keys.
|
||||
if self._query_appservices_for_otks:
|
||||
(
|
||||
appservice_results,
|
||||
not_found,
|
||||
) = await self._appservice_handler.claim_e2e_one_time_keys(not_found)
|
||||
else:
|
||||
appservice_results = []
|
||||
|
||||
# For each user that does not have a one-time keys available, see if
|
||||
# there is a fallback key.
|
||||
fallback_results = await self.store.claim_e2e_fallback_keys(not_found)
|
||||
|
||||
# Return the results in order, each item from the input query should
|
||||
# only appear once in the combined list.
|
||||
return (otk_results, *appservice_results, fallback_results)
|
||||
|
||||
@trace
|
||||
async def claim_one_time_keys(
|
||||
self, query: Dict[str, Dict[str, Dict[str, str]]], timeout: Optional[int]
|
||||
@@ -617,18 +561,17 @@ class E2eKeysHandler:
|
||||
set_tag("local_key_query", str(local_query))
|
||||
set_tag("remote_key_query", str(remote_queries))
|
||||
|
||||
results = await self.claim_local_one_time_keys(local_query)
|
||||
results = await self.store.claim_e2e_one_time_keys(local_query)
|
||||
|
||||
# A map of user ID -> device ID -> key ID -> key.
|
||||
json_result: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
|
||||
for result in results:
|
||||
for user_id, device_keys in result.items():
|
||||
for device_id, keys in device_keys.items():
|
||||
for key_id, key in keys.items():
|
||||
json_result.setdefault(user_id, {})[device_id] = {key_id: key}
|
||||
|
||||
# Remote failures.
|
||||
failures: Dict[str, JsonDict] = {}
|
||||
for user_id, device_keys in results.items():
|
||||
for device_id, keys in device_keys.items():
|
||||
for key_id, json_str in keys.items():
|
||||
json_result.setdefault(user_id, {})[device_id] = {
|
||||
key_id: json_decoder.decode(json_str)
|
||||
}
|
||||
|
||||
@trace
|
||||
async def claim_client_keys(destination: str) -> None:
|
||||
|
||||
@@ -159,16 +159,15 @@ class EventHandler:
|
||||
Returns:
|
||||
An event, or None if there is no event matching this ID.
|
||||
Raises:
|
||||
AuthError: if the user does not have the rights to inspect this event.
|
||||
SynapseError if there was a problem retrieving this event, or
|
||||
AuthError if the user does not have the rights to inspect this
|
||||
event.
|
||||
"""
|
||||
redact_behaviour = (
|
||||
EventRedactBehaviour.as_is if show_redacted else EventRedactBehaviour.redact
|
||||
)
|
||||
event = await self.store.get_event(
|
||||
event_id,
|
||||
check_room_id=room_id,
|
||||
redact_behaviour=redact_behaviour,
|
||||
allow_none=True,
|
||||
event_id, check_room_id=room_id, redact_behaviour=redact_behaviour
|
||||
)
|
||||
|
||||
if not event:
|
||||
|
||||
@@ -392,7 +392,7 @@ class FederationHandler:
|
||||
get_prev_content=False,
|
||||
)
|
||||
|
||||
# We unset `filter_out_erased_senders` as we might otherwise get false
|
||||
# We set `check_history_visibility_only` as we might otherwise get false
|
||||
# positives from users having been erased.
|
||||
filtered_extremities = await filter_events_for_server(
|
||||
self._storage_controllers,
|
||||
@@ -400,8 +400,7 @@ class FederationHandler:
|
||||
self.server_name,
|
||||
events_to_check,
|
||||
redact=False,
|
||||
filter_out_erased_senders=False,
|
||||
filter_out_remote_partial_state_events=False,
|
||||
check_history_visibility_only=True,
|
||||
)
|
||||
if filtered_extremities:
|
||||
extremities_to_request.append(bp.event_id)
|
||||
@@ -1332,13 +1331,7 @@ class FederationHandler:
|
||||
)
|
||||
|
||||
events = await filter_events_for_server(
|
||||
self._storage_controllers,
|
||||
origin,
|
||||
self.server_name,
|
||||
events,
|
||||
redact=True,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
self._storage_controllers, origin, self.server_name, events
|
||||
)
|
||||
|
||||
return events
|
||||
@@ -1369,13 +1362,7 @@ class FederationHandler:
|
||||
await self._event_auth_handler.assert_host_in_room(event.room_id, origin)
|
||||
|
||||
events = await filter_events_for_server(
|
||||
self._storage_controllers,
|
||||
origin,
|
||||
self.server_name,
|
||||
[event],
|
||||
redact=True,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
self._storage_controllers, origin, self.server_name, [event]
|
||||
)
|
||||
event = events[0]
|
||||
return event
|
||||
@@ -1403,13 +1390,7 @@ class FederationHandler:
|
||||
)
|
||||
|
||||
missing_events = await filter_events_for_server(
|
||||
self._storage_controllers,
|
||||
origin,
|
||||
self.server_name,
|
||||
missing_events,
|
||||
redact=True,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
self._storage_controllers, origin, self.server_name, missing_events
|
||||
)
|
||||
|
||||
return missing_events
|
||||
@@ -1949,25 +1930,27 @@ class FederationHandler:
|
||||
)
|
||||
for event in events:
|
||||
for attempt in itertools.count():
|
||||
# We try a new destination on every iteration.
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
await self._federation_event_handler.update_state_for_partial_state_event(
|
||||
destination, event
|
||||
)
|
||||
break
|
||||
except FederationPullAttemptBackoffError as e:
|
||||
# We are in the backoff period for one of the event's
|
||||
# prev_events. Wait it out and try again after.
|
||||
logger.warning(
|
||||
"%s; waiting for %d ms...", e, e.retry_after_ms
|
||||
)
|
||||
await self.clock.sleep(e.retry_after_ms / 1000)
|
||||
|
||||
# Success, no need to try the rest of the destinations.
|
||||
await self._federation_event_handler.update_state_for_partial_state_event(
|
||||
destination, event
|
||||
)
|
||||
break
|
||||
except FederationPullAttemptBackoffError as exc:
|
||||
# Log a warning about why we failed to process the event (the error message
|
||||
# for `FederationPullAttemptBackoffError` is pretty good)
|
||||
logger.warning("_sync_partial_state_room: %s", exc)
|
||||
# We do not record a failed pull attempt when we backoff fetching a missing
|
||||
# `prev_event` because not being able to fetch the `prev_events` just means
|
||||
# we won't be able to de-outlier the pulled event. But we can still use an
|
||||
# `outlier` in the state/auth chain for another event. So we shouldn't stop
|
||||
# a downstream event from trying to pull it.
|
||||
#
|
||||
# This avoids a cascade of backoff for all events in the DAG downstream from
|
||||
# one event backoff upstream.
|
||||
except FederationError as e:
|
||||
# TODO: We should `record_event_failed_pull_attempt` here,
|
||||
# see https://github.com/matrix-org/synapse/issues/13700
|
||||
|
||||
if attempt == len(destinations) - 1:
|
||||
# We have tried every remote server for this event. Give up.
|
||||
# TODO(faster_joins) giving up isn't the right thing to do
|
||||
@@ -1984,8 +1967,6 @@ class FederationHandler:
|
||||
destination,
|
||||
e,
|
||||
)
|
||||
# TODO: We should `record_event_failed_pull_attempt` here,
|
||||
# see https://github.com/matrix-org/synapse/issues/13700
|
||||
raise
|
||||
|
||||
# Try the next remote server.
|
||||
|
||||
@@ -140,7 +140,6 @@ class FederationEventHandler:
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._clock = hs.get_clock()
|
||||
self._store = hs.get_datastores().main
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._state_storage_controller = self._storage_controllers.state
|
||||
@@ -584,7 +583,7 @@ class FederationEventHandler:
|
||||
|
||||
await self._check_event_auth(origin, event, context)
|
||||
if context.rejected:
|
||||
raise SynapseError(403, "Join event was rejected")
|
||||
raise SynapseError(400, "Join event was rejected")
|
||||
|
||||
# the remote server is responsible for sending our join event to the rest
|
||||
# of the federation. Indeed, attempting to do so will result in problems
|
||||
@@ -1039,8 +1038,8 @@ class FederationEventHandler:
|
||||
|
||||
Raises:
|
||||
FederationPullAttemptBackoffError if we are are deliberately not attempting
|
||||
to pull one of the given event's `prev_event`s over federation because
|
||||
we've already done so recently and are backing off.
|
||||
to pull the given event over federation because we've already done so
|
||||
recently and are backing off.
|
||||
FederationError if we fail to get the state from the remote server after any
|
||||
missing `prev_event`s.
|
||||
"""
|
||||
@@ -1054,22 +1053,13 @@ class FederationEventHandler:
|
||||
# If we've already recently attempted to pull this missing event, don't
|
||||
# try it again so soon. Since we have to fetch all of the prev_events, we can
|
||||
# bail early here if we find any to ignore.
|
||||
prevs_with_pull_backoff = (
|
||||
await self._store.get_event_ids_to_not_pull_from_backoff(
|
||||
room_id, missing_prevs
|
||||
)
|
||||
prevs_to_ignore = await self._store.get_event_ids_to_not_pull_from_backoff(
|
||||
room_id, missing_prevs
|
||||
)
|
||||
if len(prevs_with_pull_backoff) > 0:
|
||||
if len(prevs_to_ignore) > 0:
|
||||
raise FederationPullAttemptBackoffError(
|
||||
event_ids=prevs_with_pull_backoff.keys(),
|
||||
message=(
|
||||
f"While computing context for event={event_id}, not attempting to "
|
||||
f"pull missing prev_events={list(prevs_with_pull_backoff.keys())} "
|
||||
"because we already tried to pull recently (backing off)."
|
||||
),
|
||||
retry_after_ms=(
|
||||
max(prevs_with_pull_backoff.values()) - self._clock.time_msec()
|
||||
),
|
||||
event_ids=prevs_to_ignore,
|
||||
message=f"While computing context for event={event_id}, not attempting to pull missing prev_event={prevs_to_ignore[0]} because we already tried to pull recently (backing off).",
|
||||
)
|
||||
|
||||
if not missing_prevs:
|
||||
|
||||
@@ -987,11 +987,10 @@ class EventCreationHandler:
|
||||
# a situation where event persistence can't keep up, causing
|
||||
# extremities to pile up, which in turn leads to state resolution
|
||||
# taking longer.
|
||||
room_id = event_dict["room_id"]
|
||||
async with self.limiter.queue(room_id):
|
||||
async with self.limiter.queue(event_dict["room_id"]):
|
||||
if txn_id:
|
||||
event = await self.get_event_from_transaction(
|
||||
requester, txn_id, room_id
|
||||
requester, txn_id, event_dict["room_id"]
|
||||
)
|
||||
if event:
|
||||
# we know it was persisted, so must have a stream ordering
|
||||
@@ -1001,18 +1000,6 @@ class EventCreationHandler:
|
||||
event.internal_metadata.stream_ordering,
|
||||
)
|
||||
|
||||
# If we don't have any prev event IDs specified then we need to
|
||||
# check that the host is in the room (as otherwise populating the
|
||||
# prev events will fail), at which point we may as well check the
|
||||
# local user is in the room.
|
||||
if not prev_event_ids:
|
||||
user_id = requester.user.to_string()
|
||||
is_user_in_room = await self.store.check_local_user_in_room(
|
||||
user_id, room_id
|
||||
)
|
||||
if not is_user_in_room:
|
||||
raise AuthError(403, f"User {user_id} not in room {room_id}")
|
||||
|
||||
# Try several times, it could fail with PartialStateConflictError
|
||||
# in handle_new_client_event, cf comment in except block.
|
||||
max_retries = 5
|
||||
|
||||
@@ -1239,7 +1239,6 @@ class OidcProvider:
|
||||
grandfather_existing_users,
|
||||
extra_attributes,
|
||||
auth_provider_session_id=sid,
|
||||
registration_enabled=self._config.enable_registration,
|
||||
)
|
||||
|
||||
def _remote_id_from_userinfo(self, userinfo: UserInfo) -> str:
|
||||
|
||||
@@ -683,7 +683,7 @@ class PaginationHandler:
|
||||
|
||||
await self._storage_controllers.purge_events.purge_room(room_id)
|
||||
|
||||
logger.info("purge complete for room_id %s", room_id)
|
||||
logger.info("complete")
|
||||
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE
|
||||
except Exception:
|
||||
f = Failure()
|
||||
|
||||
@@ -63,7 +63,7 @@ class ProfileHandler:
|
||||
|
||||
self._third_party_rules = hs.get_third_party_event_rules()
|
||||
|
||||
async def get_profile(self, user_id: str, ignore_backoff: bool = True) -> JsonDict:
|
||||
async def get_profile(self, user_id: str) -> JsonDict:
|
||||
target_user = UserID.from_string(user_id)
|
||||
|
||||
if self.hs.is_mine(target_user):
|
||||
@@ -81,7 +81,7 @@ class ProfileHandler:
|
||||
destination=target_user.domain,
|
||||
query_type="profile",
|
||||
args={"user_id": user_id},
|
||||
ignore_backoff=ignore_backoff,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
return result
|
||||
except RequestSendFailed as e:
|
||||
|
||||
@@ -596,20 +596,14 @@ class RegistrationHandler:
|
||||
Args:
|
||||
user_id: The user to join
|
||||
"""
|
||||
# If there are no rooms to auto-join, just bail.
|
||||
if not self.hs.config.registration.auto_join_rooms:
|
||||
return
|
||||
|
||||
# auto-join the user to any rooms we're supposed to dump them into
|
||||
|
||||
# try to create the room if we're the first real user on the server. Note
|
||||
# that an auto-generated support or bot user is not a real user and will never be
|
||||
# the user to create the room
|
||||
should_auto_create_rooms = False
|
||||
if (
|
||||
self.hs.config.registration.autocreate_auto_join_rooms
|
||||
and await self.store.is_real_user(user_id)
|
||||
):
|
||||
is_real_user = await self.store.is_real_user(user_id)
|
||||
if self.hs.config.registration.autocreate_auto_join_rooms and is_real_user:
|
||||
count = await self.store.count_real_users()
|
||||
should_auto_create_rooms = count == 1
|
||||
|
||||
@@ -1013,11 +1007,11 @@ class RegistrationHandler:
|
||||
user_tuple = await self.store.get_user_by_access_token(token)
|
||||
# The token better still exist.
|
||||
assert user_tuple
|
||||
device_id = user_tuple.device_id
|
||||
token_id = user_tuple.token_id
|
||||
|
||||
await self.pusher_pool.add_or_update_pusher(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
access_token=token_id,
|
||||
kind="email",
|
||||
app_id="m.email",
|
||||
app_display_name="Email Notifications",
|
||||
|
||||
@@ -569,7 +569,7 @@ class RoomCreationHandler:
|
||||
new_room_id,
|
||||
# we expect to override all the presets with initial_state, so this is
|
||||
# somewhat arbitrary.
|
||||
room_config={"preset": RoomCreationPreset.PRIVATE_CHAT},
|
||||
preset_config=RoomCreationPreset.PRIVATE_CHAT,
|
||||
invite_list=[],
|
||||
initial_state=initial_state,
|
||||
creation_content=creation_content,
|
||||
@@ -904,6 +904,13 @@ class RoomCreationHandler:
|
||||
check_membership=False,
|
||||
)
|
||||
|
||||
preset_config = config.get(
|
||||
"preset",
|
||||
RoomCreationPreset.PRIVATE_CHAT
|
||||
if visibility == "private"
|
||||
else RoomCreationPreset.PUBLIC_CHAT,
|
||||
)
|
||||
|
||||
raw_initial_state = config.get("initial_state", [])
|
||||
|
||||
initial_state = OrderedDict()
|
||||
@@ -922,7 +929,7 @@ class RoomCreationHandler:
|
||||
) = await self._send_events_for_new_room(
|
||||
requester,
|
||||
room_id,
|
||||
room_config=config,
|
||||
preset_config=preset_config,
|
||||
invite_list=invite_list,
|
||||
initial_state=initial_state,
|
||||
creation_content=creation_content,
|
||||
@@ -931,6 +938,48 @@ class RoomCreationHandler:
|
||||
creator_join_profile=creator_join_profile,
|
||||
)
|
||||
|
||||
if "name" in config:
|
||||
name = config["name"]
|
||||
(
|
||||
name_event,
|
||||
last_stream_id,
|
||||
) = await self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Name,
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"state_key": "",
|
||||
"content": {"name": name},
|
||||
},
|
||||
ratelimit=False,
|
||||
prev_event_ids=[last_sent_event_id],
|
||||
depth=depth,
|
||||
)
|
||||
last_sent_event_id = name_event.event_id
|
||||
depth += 1
|
||||
|
||||
if "topic" in config:
|
||||
topic = config["topic"]
|
||||
(
|
||||
topic_event,
|
||||
last_stream_id,
|
||||
) = await self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Topic,
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"state_key": "",
|
||||
"content": {"topic": topic},
|
||||
},
|
||||
ratelimit=False,
|
||||
prev_event_ids=[last_sent_event_id],
|
||||
depth=depth,
|
||||
)
|
||||
last_sent_event_id = topic_event.event_id
|
||||
depth += 1
|
||||
|
||||
# we avoid dropping the lock between invites, as otherwise joins can
|
||||
# start coming in and making the createRoom slow.
|
||||
#
|
||||
@@ -998,7 +1047,7 @@ class RoomCreationHandler:
|
||||
self,
|
||||
creator: Requester,
|
||||
room_id: str,
|
||||
room_config: JsonDict,
|
||||
preset_config: str,
|
||||
invite_list: List[str],
|
||||
initial_state: MutableStateMap,
|
||||
creation_content: JsonDict,
|
||||
@@ -1015,33 +1064,11 @@ class RoomCreationHandler:
|
||||
|
||||
Rate limiting should already have been applied by this point.
|
||||
|
||||
Args:
|
||||
creator:
|
||||
the user requesting the room creation
|
||||
room_id:
|
||||
room id for the room being created
|
||||
room_config:
|
||||
A dict of configuration options. This will be the body of
|
||||
a /createRoom request; see
|
||||
https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom
|
||||
invite_list:
|
||||
a list of user ids to invite to the room
|
||||
initial_state:
|
||||
A list of state events to set in the new room.
|
||||
creation_content:
|
||||
Extra keys, such as m.federate, to be added to the content of the m.room.create event.
|
||||
room_alias:
|
||||
alias for the room
|
||||
power_level_content_override:
|
||||
The power level content to override in the default power level event.
|
||||
creator_join_profile:
|
||||
Set to override the displayname and avatar for the creating
|
||||
user in this room.
|
||||
|
||||
Returns:
|
||||
A tuple containing the stream ID, event ID and depth of the last
|
||||
event sent to the room.
|
||||
"""
|
||||
|
||||
creator_id = creator.user.to_string()
|
||||
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
|
||||
depth = 1
|
||||
@@ -1052,6 +1079,9 @@ class RoomCreationHandler:
|
||||
# created (but not persisted to the db) to determine state for future created events
|
||||
# (as this info can't be pulled from the db)
|
||||
state_map: MutableStateMap[str] = {}
|
||||
# current_state_group of last event created. Used for computing event context of
|
||||
# events to be batched
|
||||
current_state_group: Optional[int] = None
|
||||
|
||||
def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
|
||||
e = {"type": etype, "content": content}
|
||||
@@ -1105,14 +1135,6 @@ class RoomCreationHandler:
|
||||
|
||||
return new_event, new_unpersisted_context
|
||||
|
||||
visibility = room_config.get("visibility", "private")
|
||||
preset_config = room_config.get(
|
||||
"preset",
|
||||
RoomCreationPreset.PRIVATE_CHAT
|
||||
if visibility == "private"
|
||||
else RoomCreationPreset.PUBLIC_CHAT,
|
||||
)
|
||||
|
||||
try:
|
||||
config = self._presets_dict[preset_config]
|
||||
except KeyError:
|
||||
@@ -1264,24 +1286,6 @@ class RoomCreationHandler:
|
||||
)
|
||||
events_to_send.append((encryption_event, encryption_context))
|
||||
|
||||
if "name" in room_config:
|
||||
name = room_config["name"]
|
||||
name_event, name_context = await create_event(
|
||||
EventTypes.Name,
|
||||
{"name": name},
|
||||
True,
|
||||
)
|
||||
events_to_send.append((name_event, name_context))
|
||||
|
||||
if "topic" in room_config:
|
||||
topic = room_config["topic"]
|
||||
topic_event, topic_context = await create_event(
|
||||
EventTypes.Topic,
|
||||
{"topic": topic},
|
||||
True,
|
||||
)
|
||||
events_to_send.append((topic_event, topic_context))
|
||||
|
||||
datastore = self.hs.get_datastores().state
|
||||
events_and_context = (
|
||||
await UnpersistedEventContext.batch_persist_unpersisted_contexts(
|
||||
|
||||
@@ -850,68 +850,63 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
# `is_partial_state_room` also indicates whether `partial_state_before_join` is
|
||||
# partial.
|
||||
|
||||
is_host_in_room = await self._is_host_in_room(partial_state_before_join)
|
||||
|
||||
# if we are not in the room, we won't have the current state
|
||||
if is_host_in_room:
|
||||
# TODO: Refactor into dictionary of explicitly allowed transitions
|
||||
# between old and new state, with specific error messages for some
|
||||
# transitions and generic otherwise
|
||||
old_state_id = partial_state_before_join.get(
|
||||
(EventTypes.Member, target.to_string())
|
||||
)
|
||||
|
||||
if old_state_id:
|
||||
old_state = await self.store.get_event(old_state_id, allow_none=True)
|
||||
old_membership = (
|
||||
old_state.content.get("membership") if old_state else None
|
||||
# TODO: Refactor into dictionary of explicitly allowed transitions
|
||||
# between old and new state, with specific error messages for some
|
||||
# transitions and generic otherwise
|
||||
old_state_id = partial_state_before_join.get(
|
||||
(EventTypes.Member, target.to_string())
|
||||
)
|
||||
if old_state_id:
|
||||
old_state = await self.store.get_event(old_state_id, allow_none=True)
|
||||
old_membership = old_state.content.get("membership") if old_state else None
|
||||
if action == "unban" and old_membership != "ban":
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Cannot unban user who was not banned"
|
||||
" (membership=%s)" % old_membership,
|
||||
errcode=Codes.BAD_STATE,
|
||||
)
|
||||
if action == "unban" and old_membership != "ban":
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Cannot unban user who was not banned"
|
||||
" (membership=%s)" % old_membership,
|
||||
errcode=Codes.BAD_STATE,
|
||||
)
|
||||
if old_membership == "ban" and action not in ["ban", "unban", "leave"]:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Cannot %s user who was banned" % (action,),
|
||||
errcode=Codes.BAD_STATE,
|
||||
if old_membership == "ban" and action not in ["ban", "unban", "leave"]:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Cannot %s user who was banned" % (action,),
|
||||
errcode=Codes.BAD_STATE,
|
||||
)
|
||||
|
||||
if old_state:
|
||||
same_content = content == old_state.content
|
||||
same_membership = old_membership == effective_membership_state
|
||||
same_sender = requester.user.to_string() == old_state.sender
|
||||
if same_sender and same_membership and same_content:
|
||||
# duplicate event.
|
||||
# we know it was persisted, so must have a stream ordering.
|
||||
assert old_state.internal_metadata.stream_ordering
|
||||
return (
|
||||
old_state.event_id,
|
||||
old_state.internal_metadata.stream_ordering,
|
||||
)
|
||||
|
||||
if old_state:
|
||||
same_content = content == old_state.content
|
||||
same_membership = old_membership == effective_membership_state
|
||||
same_sender = requester.user.to_string() == old_state.sender
|
||||
if same_sender and same_membership and same_content:
|
||||
# duplicate event.
|
||||
# we know it was persisted, so must have a stream ordering.
|
||||
assert old_state.internal_metadata.stream_ordering
|
||||
return (
|
||||
old_state.event_id,
|
||||
old_state.internal_metadata.stream_ordering,
|
||||
)
|
||||
if old_membership in ["ban", "leave"] and action == "kick":
|
||||
raise AuthError(403, "The target user is not in the room")
|
||||
|
||||
if old_membership in ["ban", "leave"] and action == "kick":
|
||||
raise AuthError(403, "The target user is not in the room")
|
||||
# we don't allow people to reject invites to the server notice
|
||||
# room, but they can leave it once they are joined.
|
||||
if (
|
||||
old_membership == Membership.INVITE
|
||||
and effective_membership_state == Membership.LEAVE
|
||||
):
|
||||
is_blocked = await self.store.is_server_notice_room(room_id)
|
||||
if is_blocked:
|
||||
raise SynapseError(
|
||||
HTTPStatus.FORBIDDEN,
|
||||
"You cannot reject this invite",
|
||||
errcode=Codes.CANNOT_LEAVE_SERVER_NOTICE_ROOM,
|
||||
)
|
||||
else:
|
||||
if action == "kick":
|
||||
raise AuthError(403, "The target user is not in the room")
|
||||
|
||||
# we don't allow people to reject invites to the server notice
|
||||
# room, but they can leave it once they are joined.
|
||||
if (
|
||||
old_membership == Membership.INVITE
|
||||
and effective_membership_state == Membership.LEAVE
|
||||
):
|
||||
is_blocked = await self.store.is_server_notice_room(room_id)
|
||||
if is_blocked:
|
||||
raise SynapseError(
|
||||
HTTPStatus.FORBIDDEN,
|
||||
"You cannot reject this invite",
|
||||
errcode=Codes.CANNOT_LEAVE_SERVER_NOTICE_ROOM,
|
||||
)
|
||||
else:
|
||||
if action == "kick":
|
||||
raise AuthError(403, "The target user is not in the room")
|
||||
is_host_in_room = await self._is_host_in_room(partial_state_before_join)
|
||||
|
||||
if effective_membership_state == Membership.JOIN:
|
||||
if requester.is_guest:
|
||||
|
||||
@@ -383,7 +383,6 @@ class SsoHandler:
|
||||
grandfather_existing_users: Callable[[], Awaitable[Optional[str]]],
|
||||
extra_login_attributes: Optional[JsonDict] = None,
|
||||
auth_provider_session_id: Optional[str] = None,
|
||||
registration_enabled: bool = True,
|
||||
) -> None:
|
||||
"""
|
||||
Given an SSO ID, retrieve the user ID for it and possibly register the user.
|
||||
@@ -436,10 +435,6 @@ class SsoHandler:
|
||||
|
||||
auth_provider_session_id: An optional session ID from the IdP.
|
||||
|
||||
registration_enabled: An optional boolean to enable/disable automatic
|
||||
registrations of new users. If false and the user does not exist then the
|
||||
flow is aborted. Defaults to true.
|
||||
|
||||
Raises:
|
||||
MappingException if there was a problem mapping the response to a user.
|
||||
RedirectException: if the mapping provider needs to redirect the user
|
||||
@@ -467,16 +462,8 @@ class SsoHandler:
|
||||
auth_provider_id, remote_user_id, user_id
|
||||
)
|
||||
|
||||
if not user_id and not registration_enabled:
|
||||
logger.info(
|
||||
"User does not exist and registration are disabled for IdP '%s' and remote_user_id '%s'",
|
||||
auth_provider_id,
|
||||
remote_user_id,
|
||||
)
|
||||
raise MappingException(
|
||||
"User does not exist and registrations are disabled"
|
||||
)
|
||||
elif not user_id: # Otherwise, generate a new user.
|
||||
# Otherwise, generate a new user.
|
||||
if not user_id:
|
||||
attributes = await self._call_attribute_mapper(sso_to_matrix_id_mapper)
|
||||
|
||||
next_step_url = self._get_url_for_next_new_user_step(
|
||||
|
||||
@@ -943,8 +943,6 @@ class SyncHandler:
|
||||
|
||||
timeline_state = {}
|
||||
|
||||
# Membership events to fetch that can be found in the room state, or in
|
||||
# the case of partial state rooms, the auth events of timeline events.
|
||||
members_to_fetch = set()
|
||||
first_event_by_sender_map = {}
|
||||
for event in batch.events:
|
||||
@@ -966,19 +964,9 @@ class SyncHandler:
|
||||
# (if we are) to fix https://github.com/vector-im/riot-web/issues/7209
|
||||
# We only need apply this on full state syncs given we disabled
|
||||
# LL for incr syncs in #3840.
|
||||
# We don't insert ourselves into `members_to_fetch`, because in some
|
||||
# rare cases (an empty event batch with a now_token after the user's
|
||||
# leave in a partial state room which another local user has
|
||||
# joined), the room state will be missing our membership and there
|
||||
# is no guarantee that our membership will be in the auth events of
|
||||
# timeline events when the room is partial stated.
|
||||
state_filter = StateFilter.from_lazy_load_member_list(
|
||||
members_to_fetch.union((sync_config.user.to_string(),))
|
||||
)
|
||||
else:
|
||||
state_filter = StateFilter.from_lazy_load_member_list(
|
||||
members_to_fetch
|
||||
)
|
||||
members_to_fetch.add(sync_config.user.to_string())
|
||||
|
||||
state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch)
|
||||
|
||||
# We are happy to use partial state to compute the `/sync` response.
|
||||
# Since partial state may not include the lazy-loaded memberships we
|
||||
@@ -1000,9 +988,7 @@ class SyncHandler:
|
||||
# sync's timeline and the start of the current sync's timeline.
|
||||
# See the docstring above for details.
|
||||
state_ids: StateMap[str]
|
||||
# We need to know whether the state we fetch may be partial, so check
|
||||
# whether the room is partial stated *before* fetching it.
|
||||
is_partial_state_room = await self.store.is_partial_state_room(room_id)
|
||||
|
||||
if full_state:
|
||||
if batch:
|
||||
state_at_timeline_end = (
|
||||
@@ -1133,7 +1119,7 @@ class SyncHandler:
|
||||
# If we only have partial state for the room, `state_ids` may be missing the
|
||||
# memberships we wanted. We attempt to find some by digging through the auth
|
||||
# events of timeline events.
|
||||
if lazy_load_members and is_partial_state_room:
|
||||
if lazy_load_members and await self.store.is_partial_state_room(room_id):
|
||||
assert members_to_fetch is not None
|
||||
assert first_event_by_sender_map is not None
|
||||
|
||||
@@ -1240,10 +1226,6 @@ class SyncHandler:
|
||||
continue
|
||||
|
||||
event_with_membership_auth = events_with_membership_auth[member]
|
||||
is_create = (
|
||||
event_with_membership_auth.is_state()
|
||||
and event_with_membership_auth.type == EventTypes.Create
|
||||
)
|
||||
is_join = (
|
||||
event_with_membership_auth.is_state()
|
||||
and event_with_membership_auth.type == EventTypes.Member
|
||||
@@ -1251,10 +1233,9 @@ class SyncHandler:
|
||||
and event_with_membership_auth.content.get("membership")
|
||||
== Membership.JOIN
|
||||
)
|
||||
if not is_create and not is_join:
|
||||
if not is_join:
|
||||
# The event must include the desired membership as an auth event, unless
|
||||
# it's the `m.room.create` event for a room or the first join event for
|
||||
# a given user.
|
||||
# it's the first join event for a given user.
|
||||
missing_members.add(member)
|
||||
auth_event_ids.update(event_with_membership_auth.auth_event_ids())
|
||||
|
||||
|
||||
@@ -52,11 +52,6 @@ FEDERATION_TIMEOUT = 60 * 1000
|
||||
FEDERATION_PING_INTERVAL = 40 * 1000
|
||||
|
||||
|
||||
# How long to remember a typing notification happened in a room before
|
||||
# forgetting about it.
|
||||
FORGET_TIMEOUT = 10 * 60 * 1000
|
||||
|
||||
|
||||
class FollowerTypingHandler:
|
||||
"""A typing handler on a different process than the writer that is updated
|
||||
via replication.
|
||||
@@ -88,10 +83,7 @@ class FollowerTypingHandler:
|
||||
self.wheel_timer: WheelTimer[RoomMember] = WheelTimer(bucket_size=5000)
|
||||
self._latest_room_serial = 0
|
||||
|
||||
self._rooms_updated: Set[str] = set()
|
||||
|
||||
self.clock.looping_call(self._handle_timeouts, 5000)
|
||||
self.clock.looping_call(self._prune_old_typing, FORGET_TIMEOUT)
|
||||
|
||||
def _reset(self) -> None:
|
||||
"""Reset the typing handler's data caches."""
|
||||
@@ -100,8 +92,6 @@ class FollowerTypingHandler:
|
||||
# map room IDs to sets of users currently typing
|
||||
self._room_typing = {}
|
||||
|
||||
self._rooms_updated = set()
|
||||
|
||||
self._member_last_federation_poke = {}
|
||||
self.wheel_timer = WheelTimer(bucket_size=5000)
|
||||
|
||||
@@ -188,7 +178,6 @@ class FollowerTypingHandler:
|
||||
prev_typing = self._room_typing.get(row.room_id, set())
|
||||
now_typing = set(row.user_ids)
|
||||
self._room_typing[row.room_id] = now_typing
|
||||
self._rooms_updated.add(row.room_id)
|
||||
|
||||
if self.federation:
|
||||
run_as_background_process(
|
||||
@@ -220,19 +209,6 @@ class FollowerTypingHandler:
|
||||
def get_current_token(self) -> int:
|
||||
return self._latest_room_serial
|
||||
|
||||
def _prune_old_typing(self) -> None:
|
||||
"""Prune rooms that haven't seen typing updates since last time.
|
||||
|
||||
This is safe to do as clients should time out old typing notifications.
|
||||
"""
|
||||
stale_rooms = self._room_serials.keys() - self._rooms_updated
|
||||
|
||||
for room_id in stale_rooms:
|
||||
self._room_serials.pop(room_id, None)
|
||||
self._room_typing.pop(room_id, None)
|
||||
|
||||
self._rooms_updated = set()
|
||||
|
||||
|
||||
class TypingWriterHandler(FollowerTypingHandler):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
@@ -412,7 +388,6 @@ class TypingWriterHandler(FollowerTypingHandler):
|
||||
self._typing_stream_change_cache.entity_has_changed(
|
||||
member.room_id, self._latest_room_serial
|
||||
)
|
||||
self._rooms_updated.add(member.room_id)
|
||||
|
||||
self.notifier.on_new_event(
|
||||
StreamKeyType.TYPING, self._latest_room_serial, rooms=[member.room_id]
|
||||
|
||||
@@ -13,52 +13,21 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
|
||||
|
||||
from twisted.internet.interfaces import IDelayedCall
|
||||
|
||||
import synapse.metrics
|
||||
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.databases.main.user_directory import SearchResult
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import UserID
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
from synapse.util.stringutils import non_null_str_or_none
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Don't refresh a stale user directory entry, using a Federation /profile request,
|
||||
# for 60 seconds. This gives time for other state events to arrive (which will
|
||||
# then be coalesced such that only one /profile request is made).
|
||||
USER_DIRECTORY_STALE_REFRESH_TIME_MS = 60 * 1000
|
||||
|
||||
# Maximum number of remote servers that we will attempt to refresh profiles for
|
||||
# in one go.
|
||||
MAX_SERVERS_TO_REFRESH_PROFILES_FOR_IN_ONE_GO = 5
|
||||
|
||||
# As long as we have servers to refresh (without backoff), keep adding more
|
||||
# every 15 seconds.
|
||||
INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES = 15
|
||||
|
||||
|
||||
def calculate_time_of_next_retry(now_ts: int, retry_count: int) -> int:
|
||||
"""
|
||||
Calculates the time of a next retry given `now_ts` in ms and the number
|
||||
of failures encountered thus far.
|
||||
|
||||
Currently the sequence goes:
|
||||
1 min, 5 min, 25 min, 2 hour, 10 hour, 52 hour, 10 day, 7.75 week
|
||||
"""
|
||||
return now_ts + 60_000 * (5 ** min(retry_count, 7))
|
||||
|
||||
|
||||
class UserDirectoryHandler(StateDeltasHandler):
|
||||
"""Handles queries and updates for the user_directory.
|
||||
@@ -95,24 +64,12 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
self.update_user_directory = hs.config.worker.should_update_user_directory
|
||||
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self._hs = hs
|
||||
|
||||
# The current position in the current_state_delta stream
|
||||
self.pos: Optional[int] = None
|
||||
|
||||
# Guard to ensure we only process deltas one at a time
|
||||
self._is_processing = False
|
||||
|
||||
# Guard to ensure we only have one process for refreshing remote profiles
|
||||
self._is_refreshing_remote_profiles = False
|
||||
# Handle to cancel the `call_later` of `kick_off_remote_profile_refresh_process`
|
||||
self._refresh_remote_profiles_call_later: Optional[IDelayedCall] = None
|
||||
|
||||
# Guard to ensure we only have one process for refreshing remote profiles
|
||||
# for the given servers.
|
||||
# Set of server names.
|
||||
self._is_refreshing_remote_profiles_for_servers: Set[str] = set()
|
||||
|
||||
if self.update_user_directory:
|
||||
self.notifier.add_replication_callback(self.notify_new_event)
|
||||
|
||||
@@ -120,11 +77,6 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
# we start populating the user directory
|
||||
self.clock.call_later(0, self.notify_new_event)
|
||||
|
||||
# Kick off the profile refresh process on startup
|
||||
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||
10, self.kick_off_remote_profile_refresh_process
|
||||
)
|
||||
|
||||
async def search_users(
|
||||
self, user_id: str, search_term: str, limit: int
|
||||
) -> SearchResult:
|
||||
@@ -248,8 +200,8 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
typ = delta["type"]
|
||||
state_key = delta["state_key"]
|
||||
room_id = delta["room_id"]
|
||||
event_id: Optional[str] = delta["event_id"]
|
||||
prev_event_id: Optional[str] = delta["prev_event_id"]
|
||||
event_id = delta["event_id"]
|
||||
prev_event_id = delta["prev_event_id"]
|
||||
|
||||
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
||||
|
||||
@@ -345,8 +297,8 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
async def _handle_room_membership_event(
|
||||
self,
|
||||
room_id: str,
|
||||
prev_event_id: Optional[str],
|
||||
event_id: Optional[str],
|
||||
prev_event_id: str,
|
||||
event_id: str,
|
||||
state_key: str,
|
||||
) -> None:
|
||||
"""Process a single room membershp event.
|
||||
@@ -396,8 +348,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
# Handle any profile changes for remote users.
|
||||
# (For local users the rest of the application calls
|
||||
# `handle_local_profile_change`.)
|
||||
# Only process if there is an event_id.
|
||||
if is_remote and event_id is not None:
|
||||
if is_remote:
|
||||
await self._handle_possible_remote_profile_change(
|
||||
state_key, room_id, prev_event_id, event_id
|
||||
)
|
||||
@@ -405,13 +356,29 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
# This may be the first time we've seen a remote user. If
|
||||
# so, ensure we have a directory entry for them. (For local users,
|
||||
# the rest of the application calls `handle_local_profile_change`.)
|
||||
# Only process if there is an event_id.
|
||||
if is_remote and event_id is not None:
|
||||
await self._handle_possible_remote_profile_change(
|
||||
state_key, room_id, None, event_id
|
||||
)
|
||||
if is_remote:
|
||||
await self._upsert_directory_entry_for_remote_user(state_key, event_id)
|
||||
await self._track_user_joined_room(room_id, state_key)
|
||||
|
||||
async def _upsert_directory_entry_for_remote_user(
|
||||
self, user_id: str, event_id: str
|
||||
) -> None:
|
||||
"""A remote user has just joined a room. Ensure they have an entry in
|
||||
the user directory. The caller is responsible for making sure they're
|
||||
remote.
|
||||
"""
|
||||
event = await self.store.get_event(event_id, allow_none=True)
|
||||
# It isn't expected for this event to not exist, but we
|
||||
# don't want the entire background process to break.
|
||||
if event is None:
|
||||
return
|
||||
|
||||
logger.debug("Adding new user to dir, %r", user_id)
|
||||
|
||||
await self.store.update_profile_in_user_dir(
|
||||
user_id, event.content.get("displayname"), event.content.get("avatar_url")
|
||||
)
|
||||
|
||||
async def _track_user_joined_room(self, room_id: str, joining_user_id: str) -> None:
|
||||
"""Someone's just joined a room. Update `users_in_public_rooms` or
|
||||
`users_who_share_private_rooms` as appropriate.
|
||||
@@ -493,17 +460,14 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
user_id: str,
|
||||
room_id: str,
|
||||
prev_event_id: Optional[str],
|
||||
event_id: str,
|
||||
event_id: Optional[str],
|
||||
) -> None:
|
||||
"""Check member event changes for any profile changes and update the
|
||||
database if there are. This is intended for remote users only. The caller
|
||||
is responsible for checking that the given user is remote.
|
||||
"""
|
||||
|
||||
if not prev_event_id:
|
||||
# If we don't have an older event to fall back on, just fetch the same
|
||||
# event itself.
|
||||
prev_event_id = event_id
|
||||
if not prev_event_id or not event_id:
|
||||
return
|
||||
|
||||
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
|
||||
event = await self.store.get_event(event_id, allow_none=True)
|
||||
@@ -514,236 +478,17 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
if event.membership != Membership.JOIN:
|
||||
return
|
||||
|
||||
is_public = await self.store.is_room_world_readable_or_publicly_joinable(
|
||||
room_id
|
||||
)
|
||||
if not is_public:
|
||||
# Don't collect user profiles from private rooms as they are not guaranteed
|
||||
# to be the same as the user's global profile.
|
||||
now_ts = self.clock.time_msec()
|
||||
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||
user_id,
|
||||
next_try_at_ms=now_ts + USER_DIRECTORY_STALE_REFRESH_TIME_MS,
|
||||
retry_counter=0,
|
||||
)
|
||||
# Schedule a wake-up to refresh the user directory for this server.
|
||||
# We intentionally wake up this server directly because we don't want
|
||||
# other servers ahead of it in the queue to get in the way of updating
|
||||
# the profile if the server only just sent us an event.
|
||||
self.clock.call_later(
|
||||
USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1,
|
||||
self.kick_off_remote_profile_refresh_process_for_remote_server,
|
||||
UserID.from_string(user_id).domain,
|
||||
)
|
||||
# Schedule a wake-up to handle any backoffs that may occur in the future.
|
||||
self.clock.call_later(
|
||||
2 * USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1,
|
||||
self.kick_off_remote_profile_refresh_process,
|
||||
)
|
||||
return
|
||||
|
||||
prev_name = prev_event.content.get("displayname")
|
||||
new_name = event.content.get("displayname")
|
||||
# If the new name is an unexpected form, replace with None.
|
||||
# If the new name is an unexpected form, do not update the directory.
|
||||
if not isinstance(new_name, str):
|
||||
new_name = None
|
||||
new_name = prev_name
|
||||
|
||||
prev_avatar = prev_event.content.get("avatar_url")
|
||||
new_avatar = event.content.get("avatar_url")
|
||||
# If the new avatar is an unexpected form, replace with None.
|
||||
# If the new avatar is an unexpected form, do not update the directory.
|
||||
if not isinstance(new_avatar, str):
|
||||
new_avatar = None
|
||||
new_avatar = prev_avatar
|
||||
|
||||
if (
|
||||
prev_name != new_name
|
||||
or prev_avatar != new_avatar
|
||||
or prev_event_id == event_id
|
||||
):
|
||||
# Only update if something has changed, or we didn't have a previous event
|
||||
# in the first place.
|
||||
if prev_name != new_name or prev_avatar != new_avatar:
|
||||
await self.store.update_profile_in_user_dir(user_id, new_name, new_avatar)
|
||||
|
||||
def kick_off_remote_profile_refresh_process(self) -> None:
|
||||
"""Called when there may be remote users with stale profiles to be refreshed"""
|
||||
if not self.update_user_directory:
|
||||
return
|
||||
|
||||
if self._is_refreshing_remote_profiles:
|
||||
return
|
||||
|
||||
if self._refresh_remote_profiles_call_later:
|
||||
if self._refresh_remote_profiles_call_later.active():
|
||||
self._refresh_remote_profiles_call_later.cancel()
|
||||
self._refresh_remote_profiles_call_later = None
|
||||
|
||||
async def process() -> None:
|
||||
try:
|
||||
await self._unsafe_refresh_remote_profiles()
|
||||
finally:
|
||||
self._is_refreshing_remote_profiles = False
|
||||
|
||||
self._is_refreshing_remote_profiles = True
|
||||
run_as_background_process("user_directory.refresh_remote_profiles", process)
|
||||
|
||||
async def _unsafe_refresh_remote_profiles(self) -> None:
|
||||
limit = MAX_SERVERS_TO_REFRESH_PROFILES_FOR_IN_ONE_GO - len(
|
||||
self._is_refreshing_remote_profiles_for_servers
|
||||
)
|
||||
if limit <= 0:
|
||||
# nothing to do: already refreshing the maximum number of servers
|
||||
# at once.
|
||||
# Come back later.
|
||||
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||
INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES,
|
||||
self.kick_off_remote_profile_refresh_process,
|
||||
)
|
||||
return
|
||||
|
||||
servers_to_refresh = (
|
||||
await self.store.get_remote_servers_with_profiles_to_refresh(
|
||||
now_ts=self.clock.time_msec(), limit=limit
|
||||
)
|
||||
)
|
||||
|
||||
if not servers_to_refresh:
|
||||
# Do we have any backing-off servers that we should try again
|
||||
# for eventually?
|
||||
# By setting `now` is a point in the far future, we can ask for
|
||||
# which server/user is next to be refreshed, even though it is
|
||||
# not actually refreshable *now*.
|
||||
end_of_time = 1 << 62
|
||||
backing_off_servers = (
|
||||
await self.store.get_remote_servers_with_profiles_to_refresh(
|
||||
now_ts=end_of_time, limit=1
|
||||
)
|
||||
)
|
||||
if backing_off_servers:
|
||||
# Find out when the next user is refreshable and schedule a
|
||||
# refresh then.
|
||||
backing_off_server_name = backing_off_servers[0]
|
||||
users = await self.store.get_remote_users_to_refresh_on_server(
|
||||
backing_off_server_name, now_ts=end_of_time, limit=1
|
||||
)
|
||||
if not users:
|
||||
return
|
||||
_, _, next_try_at_ts = users[0]
|
||||
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||
((next_try_at_ts - self.clock.time_msec()) // 1000) + 2,
|
||||
self.kick_off_remote_profile_refresh_process,
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
for server_to_refresh in servers_to_refresh:
|
||||
self.kick_off_remote_profile_refresh_process_for_remote_server(
|
||||
server_to_refresh
|
||||
)
|
||||
|
||||
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||
INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES,
|
||||
self.kick_off_remote_profile_refresh_process,
|
||||
)
|
||||
|
||||
def kick_off_remote_profile_refresh_process_for_remote_server(
|
||||
self, server_name: str
|
||||
) -> None:
|
||||
"""Called when there may be remote users with stale profiles to be refreshed
|
||||
on the given server."""
|
||||
if not self.update_user_directory:
|
||||
return
|
||||
|
||||
if server_name in self._is_refreshing_remote_profiles_for_servers:
|
||||
return
|
||||
|
||||
async def process() -> None:
|
||||
try:
|
||||
await self._unsafe_refresh_remote_profiles_for_remote_server(
|
||||
server_name
|
||||
)
|
||||
finally:
|
||||
self._is_refreshing_remote_profiles_for_servers.remove(server_name)
|
||||
|
||||
self._is_refreshing_remote_profiles_for_servers.add(server_name)
|
||||
run_as_background_process(
|
||||
"user_directory.refresh_remote_profiles_for_remote_server", process
|
||||
)
|
||||
|
||||
async def _unsafe_refresh_remote_profiles_for_remote_server(
|
||||
self, server_name: str
|
||||
) -> None:
|
||||
logger.info("Refreshing profiles in user directory for %s", server_name)
|
||||
|
||||
while True:
|
||||
# Get a handful of users to process.
|
||||
next_batch = await self.store.get_remote_users_to_refresh_on_server(
|
||||
server_name, now_ts=self.clock.time_msec(), limit=10
|
||||
)
|
||||
if not next_batch:
|
||||
# Finished for now
|
||||
return
|
||||
|
||||
for user_id, retry_counter, _ in next_batch:
|
||||
# Request the profile of the user.
|
||||
try:
|
||||
profile = await self._hs.get_profile_handler().get_profile(
|
||||
user_id, ignore_backoff=False
|
||||
)
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(
|
||||
"Failed to refresh profile for %r because the destination is undergoing backoff",
|
||||
user_id,
|
||||
)
|
||||
# As a special-case, we back off until the destination is no longer
|
||||
# backed off from.
|
||||
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||
user_id,
|
||||
e.retry_last_ts + e.retry_interval,
|
||||
retry_counter=retry_counter + 1,
|
||||
)
|
||||
continue
|
||||
except SynapseError as e:
|
||||
if e.code == HTTPStatus.NOT_FOUND and e.errcode == Codes.NOT_FOUND:
|
||||
# The profile doesn't exist.
|
||||
# TODO Does this mean we should clear it from our user
|
||||
# directory?
|
||||
await self.store.clear_remote_user_profile_in_user_dir_stale(
|
||||
user_id
|
||||
)
|
||||
logger.warning(
|
||||
"Refresh of remote profile %r: not found (%r)",
|
||||
user_id,
|
||||
e.msg,
|
||||
)
|
||||
continue
|
||||
|
||||
logger.warning(
|
||||
"Failed to refresh profile for %r because %r", user_id, e
|
||||
)
|
||||
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||
user_id,
|
||||
calculate_time_of_next_retry(
|
||||
self.clock.time_msec(), retry_counter + 1
|
||||
),
|
||||
retry_counter=retry_counter + 1,
|
||||
)
|
||||
continue
|
||||
except Exception:
|
||||
logger.error(
|
||||
"Failed to refresh profile for %r due to unhandled exception",
|
||||
user_id,
|
||||
exc_info=True,
|
||||
)
|
||||
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||
user_id,
|
||||
calculate_time_of_next_retry(
|
||||
self.clock.time_msec(), retry_counter + 1
|
||||
),
|
||||
retry_counter=retry_counter + 1,
|
||||
)
|
||||
continue
|
||||
|
||||
await self.store.update_profile_in_user_dir(
|
||||
user_id,
|
||||
display_name=non_null_str_or_none(profile.get("displayname")),
|
||||
avatar_url=non_null_str_or_none(profile.get("avatar_url")),
|
||||
)
|
||||
|
||||
@@ -268,8 +268,8 @@ class BlacklistingAgentWrapper(Agent):
|
||||
def __init__(
|
||||
self,
|
||||
agent: IAgent,
|
||||
ip_blacklist: IPSet,
|
||||
ip_whitelist: Optional[IPSet] = None,
|
||||
ip_blacklist: Optional[IPSet] = None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
@@ -291,9 +291,7 @@ class BlacklistingAgentWrapper(Agent):
|
||||
h = urllib.parse.urlparse(uri.decode("ascii"))
|
||||
|
||||
try:
|
||||
# h.hostname is Optional[str], None raises an AddrFormatError, so
|
||||
# this is safe even though IPAddress requires a str.
|
||||
ip_address = IPAddress(h.hostname) # type: ignore[arg-type]
|
||||
ip_address = IPAddress(h.hostname)
|
||||
except AddrFormatError:
|
||||
# Not an IP
|
||||
pass
|
||||
@@ -390,8 +388,8 @@ class SimpleHttpClient:
|
||||
# by the DNS resolution.
|
||||
self.agent = BlacklistingAgentWrapper(
|
||||
self.agent,
|
||||
ip_blacklist=self._ip_blacklist,
|
||||
ip_whitelist=self._ip_whitelist,
|
||||
ip_blacklist=self._ip_blacklist,
|
||||
)
|
||||
|
||||
async def request(
|
||||
@@ -966,42 +964,3 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory):
|
||||
|
||||
def creatorForNetloc(self, hostname: bytes, port: int) -> IOpenSSLContextFactory:
|
||||
return self
|
||||
|
||||
|
||||
def is_unknown_endpoint(
|
||||
e: HttpResponseException, synapse_error: Optional[SynapseError] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Returns true if the response was due to an endpoint being unimplemented.
|
||||
|
||||
Args:
|
||||
e: The error response received from the remote server.
|
||||
synapse_error: The above error converted to a SynapseError. This is
|
||||
automatically generated if not provided.
|
||||
|
||||
"""
|
||||
if synapse_error is None:
|
||||
synapse_error = e.to_synapse_error()
|
||||
|
||||
# Matrix v1.6 specifies that servers should return a 404 or 405 with an errcode
|
||||
# of M_UNRECOGNIZED when they receive a request to an unknown endpoint or
|
||||
# to an unknown method, respectively.
|
||||
#
|
||||
# Older versions of servers don't return proper errors, so be graceful. But,
|
||||
# also handle that some endpoints truly do return 404 errors.
|
||||
return (
|
||||
# 404 is an unknown endpoint, 405 is a known endpoint, but unknown method.
|
||||
(e.code == 404 or e.code == 405)
|
||||
and (
|
||||
# Consider empty body or non-JSON bodies to be unrecognised (matches
|
||||
# older Dendrites & Conduits).
|
||||
not e.response
|
||||
or not e.response.startswith(b"{")
|
||||
# The proper response JSON with M_UNRECOGNIZED errcode.
|
||||
or synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
)
|
||||
) or (
|
||||
# Older Synapses returned a 400 error.
|
||||
e.code == 400
|
||||
and synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
)
|
||||
|
||||
@@ -87,7 +87,7 @@ class MatrixFederationAgent:
|
||||
reactor: ISynapseReactor,
|
||||
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
|
||||
user_agent: bytes,
|
||||
ip_whitelist: Optional[IPSet],
|
||||
ip_whitelist: IPSet,
|
||||
ip_blacklist: IPSet,
|
||||
_srv_resolver: Optional[SrvResolver] = None,
|
||||
_well_known_resolver: Optional[WellKnownResolver] = None,
|
||||
|
||||
@@ -892,10 +892,6 @@ def set_cors_headers(request: SynapseRequest) -> None:
|
||||
b"Access-Control-Allow-Headers",
|
||||
b"X-Requested-With, Content-Type, Authorization, Date",
|
||||
)
|
||||
request.setHeader(
|
||||
b"Access-Control-Expose-Headers",
|
||||
b"Synapse-Trace-Id",
|
||||
)
|
||||
|
||||
|
||||
def set_corp_headers(request: Request) -> None:
|
||||
|
||||
@@ -778,13 +778,17 @@ def parse_json_object_from_request(
|
||||
Model = TypeVar("Model", bound=BaseModel)
|
||||
|
||||
|
||||
def validate_json_object(content: JsonDict, model_type: Type[Model]) -> Model:
|
||||
"""Validate a deserialized JSON object using the given pydantic model.
|
||||
def parse_and_validate_json_object_from_request(
|
||||
request: Request, model_type: Type[Model]
|
||||
) -> Model:
|
||||
"""Parse a JSON object from the body of a twisted HTTP request, then deserialise and
|
||||
validate using the given pydantic model.
|
||||
|
||||
Raises:
|
||||
SynapseError if the request body couldn't be decoded as JSON or
|
||||
if it wasn't a JSON object.
|
||||
"""
|
||||
content = parse_json_object_from_request(request, allow_empty_body=False)
|
||||
try:
|
||||
instance = model_type.parse_obj(content)
|
||||
except ValidationError as e:
|
||||
@@ -807,20 +811,6 @@ def validate_json_object(content: JsonDict, model_type: Type[Model]) -> Model:
|
||||
return instance
|
||||
|
||||
|
||||
def parse_and_validate_json_object_from_request(
|
||||
request: Request, model_type: Type[Model]
|
||||
) -> Model:
|
||||
"""Parse a JSON object from the body of a twisted HTTP request, then deserialise and
|
||||
validate using the given pydantic model.
|
||||
|
||||
Raises:
|
||||
SynapseError if the request body couldn't be decoded as JSON or
|
||||
if it wasn't a JSON object.
|
||||
"""
|
||||
content = parse_json_object_from_request(request, allow_empty_body=False)
|
||||
return validate_json_object(content, model_type)
|
||||
|
||||
|
||||
def assert_params_in_dict(body: JsonDict, required: Iterable[str]) -> None:
|
||||
absent = []
|
||||
for k in required:
|
||||
|
||||
@@ -19,7 +19,6 @@ from typing import TYPE_CHECKING, Any, Generator, Optional, Tuple, Union
|
||||
import attr
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet.address import UNIXAddress
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.interfaces import IAddress, IReactorTime
|
||||
from twisted.python.failure import Failure
|
||||
@@ -258,7 +257,7 @@ class SynapseRequest(Request):
|
||||
request_id,
|
||||
request=ContextRequest(
|
||||
request_id=request_id,
|
||||
ip_address=self.get_client_ip_if_available(),
|
||||
ip_address=self.getClientAddress().host,
|
||||
site_tag=self.synapse_site.site_tag,
|
||||
# The requester is going to be unknown at this point.
|
||||
requester=None,
|
||||
@@ -415,7 +414,7 @@ class SynapseRequest(Request):
|
||||
|
||||
self.synapse_site.access_logger.debug(
|
||||
"%s - %s - Received request: %s %s",
|
||||
self.get_client_ip_if_available(),
|
||||
self.getClientAddress().host,
|
||||
self.synapse_site.site_tag,
|
||||
self.get_method(),
|
||||
self.get_redacted_uri(),
|
||||
@@ -463,7 +462,7 @@ class SynapseRequest(Request):
|
||||
"%s - %s - {%s}"
|
||||
" Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
|
||||
' %sB %s "%s %s %s" "%s" [%d dbevts]',
|
||||
self.get_client_ip_if_available(),
|
||||
self.getClientAddress().host,
|
||||
self.synapse_site.site_tag,
|
||||
requester,
|
||||
processing_time,
|
||||
@@ -501,26 +500,6 @@ class SynapseRequest(Request):
|
||||
|
||||
return True
|
||||
|
||||
def get_client_ip_if_available(self) -> str:
|
||||
"""Logging helper. Return something useful when a client IP is not retrievable
|
||||
from a unix socket.
|
||||
|
||||
In practice, this returns the socket file path on a SynapseRequest if using a
|
||||
unix socket and the normal IP address for TCP sockets.
|
||||
|
||||
"""
|
||||
# getClientAddress().host returns a proper IP address for a TCP socket. But
|
||||
# unix sockets have no concept of IP addresses or ports and return a
|
||||
# UNIXAddress containing a 'None' value. In order to get something usable for
|
||||
# logs(where this is used) get the unix socket file. getHost() returns a
|
||||
# UNIXAddress containing a value of the socket file and has an instance
|
||||
# variable of 'name' encoded as a byte string containing the path we want.
|
||||
# Decode to utf-8 so it looks nice.
|
||||
if isinstance(self.getClientAddress(), UNIXAddress):
|
||||
return self.getHost().name.decode("utf-8")
|
||||
else:
|
||||
return self.getClientAddress().host
|
||||
|
||||
|
||||
class XForwardedForRequest(SynapseRequest):
|
||||
"""Request object which honours proxy headers
|
||||
|
||||
@@ -1,833 +0,0 @@
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
# Copyright 2020-2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import datetime
|
||||
import errno
|
||||
import fnmatch
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import traceback
|
||||
from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, Tuple
|
||||
from urllib.parse import urljoin, urlparse, urlsplit
|
||||
from urllib.request import urlopen
|
||||
|
||||
import attr
|
||||
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.error import DNSLookupError
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.media._base import FileInfo, get_filename_from_headers
|
||||
from synapse.media.media_storage import MediaStorage
|
||||
from synapse.media.oembed import OEmbedProvider
|
||||
from synapse.media.preview_html import decode_body, parse_html_to_open_graph
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import JsonDict, UserID
|
||||
from synapse.util import json_encoder
|
||||
from synapse.util.async_helpers import ObservableDeferred
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.media.media_repository import MediaRepository
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
OG_TAG_NAME_MAXLEN = 50
|
||||
OG_TAG_VALUE_MAXLEN = 1000
|
||||
|
||||
ONE_HOUR = 60 * 60 * 1000
|
||||
ONE_DAY = 24 * ONE_HOUR
|
||||
IMAGE_CACHE_EXPIRY_MS = 2 * ONE_DAY
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class DownloadResult:
|
||||
length: int
|
||||
uri: str
|
||||
response_code: int
|
||||
media_type: str
|
||||
download_name: Optional[str]
|
||||
expires: int
|
||||
etag: Optional[str]
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class MediaInfo:
|
||||
"""
|
||||
Information parsed from downloading media being previewed.
|
||||
"""
|
||||
|
||||
# The Content-Type header of the response.
|
||||
media_type: str
|
||||
# The length (in bytes) of the downloaded media.
|
||||
media_length: int
|
||||
# The media filename, according to the server. This is parsed from the
|
||||
# returned headers, if possible.
|
||||
download_name: Optional[str]
|
||||
# The time of the preview.
|
||||
created_ts_ms: int
|
||||
# Information from the media storage provider about where the file is stored
|
||||
# on disk.
|
||||
filesystem_id: str
|
||||
filename: str
|
||||
# The URI being previewed.
|
||||
uri: str
|
||||
# The HTTP response code.
|
||||
response_code: int
|
||||
# The timestamp (in milliseconds) of when this preview expires.
|
||||
expires: int
|
||||
# The ETag header of the response.
|
||||
etag: Optional[str]
|
||||
|
||||
|
||||
class UrlPreviewer:
|
||||
"""
|
||||
Generates an Open Graph (https://ogp.me/) responses (with some Matrix
|
||||
specific additions) for a given URL.
|
||||
|
||||
When Synapse is asked to preview a URL it does the following:
|
||||
|
||||
1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the
|
||||
config).
|
||||
2. Checks the URL against an in-memory cache and returns the result if it exists. (This
|
||||
is also used to de-duplicate processing of multiple in-flight requests at once.)
|
||||
3. Kicks off a background process to generate a preview:
|
||||
1. Checks URL and timestamp against the database cache and returns the result if it
|
||||
has not expired and was successful (a 2xx return code).
|
||||
2. Checks if the URL matches an oEmbed (https://oembed.com/) pattern. If it
|
||||
does, update the URL to download.
|
||||
3. Downloads the URL and stores it into a file via the media storage provider
|
||||
and saves the local media metadata.
|
||||
4. If the media is an image:
|
||||
1. Generates thumbnails.
|
||||
2. Generates an Open Graph response based on image properties.
|
||||
5. If the media is HTML:
|
||||
1. Decodes the HTML via the stored file.
|
||||
2. Generates an Open Graph response from the HTML.
|
||||
3. If a JSON oEmbed URL was found in the HTML via autodiscovery:
|
||||
1. Downloads the URL and stores it into a file via the media storage provider
|
||||
and saves the local media metadata.
|
||||
2. Convert the oEmbed response to an Open Graph response.
|
||||
3. Override any Open Graph data from the HTML with data from oEmbed.
|
||||
4. If an image exists in the Open Graph response:
|
||||
1. Downloads the URL and stores it into a file via the media storage
|
||||
provider and saves the local media metadata.
|
||||
2. Generates thumbnails.
|
||||
3. Updates the Open Graph response based on image properties.
|
||||
6. If the media is JSON and an oEmbed URL was found:
|
||||
1. Convert the oEmbed response to an Open Graph response.
|
||||
2. If a thumbnail or image is in the oEmbed response:
|
||||
1. Downloads the URL and stores it into a file via the media storage
|
||||
provider and saves the local media metadata.
|
||||
2. Generates thumbnails.
|
||||
3. Updates the Open Graph response based on image properties.
|
||||
7. Stores the result in the database cache.
|
||||
4. Returns the result.
|
||||
|
||||
If any additional requests (e.g. from oEmbed autodiscovery, step 5.3 or
|
||||
image thumbnailing, step 5.4 or 6.4) fails then the URL preview as a whole
|
||||
does not fail. As much information as possible is returned.
|
||||
|
||||
The in-memory cache expires after 1 hour.
|
||||
|
||||
Expired entries in the database cache (and their associated media files) are
|
||||
deleted every 10 seconds. The default expiration time is 1 hour from download.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
media_repo: "MediaRepository",
|
||||
media_storage: MediaStorage,
|
||||
):
|
||||
self.clock = hs.get_clock()
|
||||
self.filepaths = media_repo.filepaths
|
||||
self.max_spider_size = hs.config.media.max_spider_size
|
||||
self.server_name = hs.hostname
|
||||
self.store = hs.get_datastores().main
|
||||
self.client = SimpleHttpClient(
|
||||
hs,
|
||||
treq_args={"browser_like_redirects": True},
|
||||
ip_whitelist=hs.config.media.url_preview_ip_range_whitelist,
|
||||
ip_blacklist=hs.config.media.url_preview_ip_range_blacklist,
|
||||
use_proxy=True,
|
||||
)
|
||||
self.media_repo = media_repo
|
||||
self.primary_base_path = media_repo.primary_base_path
|
||||
self.media_storage = media_storage
|
||||
|
||||
self._oembed = OEmbedProvider(hs)
|
||||
|
||||
# We run the background jobs if we're the instance specified (or no
|
||||
# instance is specified, where we assume there is only one instance
|
||||
# serving media).
|
||||
instance_running_jobs = hs.config.media.media_instance_running_background_jobs
|
||||
self._worker_run_media_background_jobs = (
|
||||
instance_running_jobs is None
|
||||
or instance_running_jobs == hs.get_instance_name()
|
||||
)
|
||||
|
||||
self.url_preview_url_blacklist = hs.config.media.url_preview_url_blacklist
|
||||
self.url_preview_accept_language = hs.config.media.url_preview_accept_language
|
||||
|
||||
# memory cache mapping urls to an ObservableDeferred returning
|
||||
# JSON-encoded OG metadata
|
||||
self._cache: ExpiringCache[str, ObservableDeferred] = ExpiringCache(
|
||||
cache_name="url_previews",
|
||||
clock=self.clock,
|
||||
# don't spider URLs more often than once an hour
|
||||
expiry_ms=ONE_HOUR,
|
||||
)
|
||||
|
||||
if self._worker_run_media_background_jobs:
|
||||
self._cleaner_loop = self.clock.looping_call(
|
||||
self._start_expire_url_cache_data, 10 * 1000
|
||||
)
|
||||
|
||||
async def preview(self, url: str, user: UserID, ts: int) -> bytes:
|
||||
# XXX: we could move this into _do_preview if we wanted.
|
||||
url_tuple = urlsplit(url)
|
||||
for entry in self.url_preview_url_blacklist:
|
||||
match = True
|
||||
for attrib in entry:
|
||||
pattern = entry[attrib]
|
||||
value = getattr(url_tuple, attrib)
|
||||
logger.debug(
|
||||
"Matching attrib '%s' with value '%s' against pattern '%s'",
|
||||
attrib,
|
||||
value,
|
||||
pattern,
|
||||
)
|
||||
|
||||
if value is None:
|
||||
match = False
|
||||
continue
|
||||
|
||||
# Some attributes might not be parsed as strings by urlsplit (such as the
|
||||
# port, which is parsed as an int). Because we use match functions that
|
||||
# expect strings, we want to make sure that's what we give them.
|
||||
value_str = str(value)
|
||||
|
||||
if pattern.startswith("^"):
|
||||
if not re.match(pattern, value_str):
|
||||
match = False
|
||||
continue
|
||||
else:
|
||||
if not fnmatch.fnmatch(value_str, pattern):
|
||||
match = False
|
||||
continue
|
||||
if match:
|
||||
logger.warning("URL %s blocked by url_blacklist entry %s", url, entry)
|
||||
raise SynapseError(
|
||||
403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN
|
||||
)
|
||||
|
||||
# the in-memory cache:
|
||||
# * ensures that only one request is active at a time
|
||||
# * takes load off the DB for the thundering herds
|
||||
# * also caches any failures (unlike the DB) so we don't keep
|
||||
# requesting the same endpoint
|
||||
|
||||
observable = self._cache.get(url)
|
||||
|
||||
if not observable:
|
||||
download = run_in_background(self._do_preview, url, user, ts)
|
||||
observable = ObservableDeferred(download, consumeErrors=True)
|
||||
self._cache[url] = observable
|
||||
else:
|
||||
logger.info("Returning cached response")
|
||||
|
||||
return await make_deferred_yieldable(observable.observe())
|
||||
|
||||
async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes:
|
||||
"""Check the db, and download the URL and build a preview
|
||||
|
||||
Args:
|
||||
url: The URL to preview.
|
||||
user: The user requesting the preview.
|
||||
ts: The timestamp requested for the preview.
|
||||
|
||||
Returns:
|
||||
json-encoded og data
|
||||
"""
|
||||
# check the URL cache in the DB (which will also provide us with
|
||||
# historical previews, if we have any)
|
||||
cache_result = await self.store.get_url_cache(url, ts)
|
||||
if (
|
||||
cache_result
|
||||
and cache_result["expires_ts"] > ts
|
||||
and cache_result["response_code"] / 100 == 2
|
||||
):
|
||||
# It may be stored as text in the database, not as bytes (such as
|
||||
# PostgreSQL). If so, encode it back before handing it on.
|
||||
og = cache_result["og"]
|
||||
if isinstance(og, str):
|
||||
og = og.encode("utf8")
|
||||
return og
|
||||
|
||||
# If this URL can be accessed via oEmbed, use that instead.
|
||||
url_to_download = url
|
||||
oembed_url = self._oembed.get_oembed_url(url)
|
||||
if oembed_url:
|
||||
url_to_download = oembed_url
|
||||
|
||||
media_info = await self._handle_url(url_to_download, user)
|
||||
|
||||
logger.debug("got media_info of '%s'", media_info)
|
||||
|
||||
# The number of milliseconds that the response should be considered valid.
|
||||
expiration_ms = media_info.expires
|
||||
author_name: Optional[str] = None
|
||||
|
||||
if _is_media(media_info.media_type):
|
||||
file_id = media_info.filesystem_id
|
||||
dims = await self.media_repo._generate_thumbnails(
|
||||
None, file_id, file_id, media_info.media_type, url_cache=True
|
||||
)
|
||||
|
||||
og = {
|
||||
"og:description": media_info.download_name,
|
||||
"og:image": f"mxc://{self.server_name}/{media_info.filesystem_id}",
|
||||
"og:image:type": media_info.media_type,
|
||||
"matrix:image:size": media_info.media_length,
|
||||
}
|
||||
|
||||
if dims:
|
||||
og["og:image:width"] = dims["width"]
|
||||
og["og:image:height"] = dims["height"]
|
||||
else:
|
||||
logger.warning("Couldn't get dims for %s" % url)
|
||||
|
||||
# define our OG response for this media
|
||||
elif _is_html(media_info.media_type):
|
||||
# TODO: somehow stop a big HTML tree from exploding synapse's RAM
|
||||
|
||||
with open(media_info.filename, "rb") as file:
|
||||
body = file.read()
|
||||
|
||||
tree = decode_body(body, media_info.uri, media_info.media_type)
|
||||
if tree is not None:
|
||||
# Check if this HTML document points to oEmbed information and
|
||||
# defer to that.
|
||||
oembed_url = self._oembed.autodiscover_from_html(tree)
|
||||
og_from_oembed: JsonDict = {}
|
||||
if oembed_url:
|
||||
try:
|
||||
oembed_info = await self._handle_url(
|
||||
oembed_url, user, allow_data_urls=True
|
||||
)
|
||||
except Exception as e:
|
||||
# Fetching the oEmbed info failed, don't block the entire URL preview.
|
||||
logger.warning(
|
||||
"oEmbed fetch failed during URL preview: %s errored with %s",
|
||||
oembed_url,
|
||||
e,
|
||||
)
|
||||
else:
|
||||
(
|
||||
og_from_oembed,
|
||||
author_name,
|
||||
expiration_ms,
|
||||
) = await self._handle_oembed_response(
|
||||
url, oembed_info, expiration_ms
|
||||
)
|
||||
|
||||
# Parse Open Graph information from the HTML in case the oEmbed
|
||||
# response failed or is incomplete.
|
||||
og_from_html = parse_html_to_open_graph(tree)
|
||||
|
||||
# Compile the Open Graph response by using the scraped
|
||||
# information from the HTML and overlaying any information
|
||||
# from the oEmbed response.
|
||||
og = {**og_from_html, **og_from_oembed}
|
||||
|
||||
await self._precache_image_url(user, media_info, og)
|
||||
else:
|
||||
og = {}
|
||||
|
||||
elif oembed_url:
|
||||
# Handle the oEmbed information.
|
||||
og, author_name, expiration_ms = await self._handle_oembed_response(
|
||||
url, media_info, expiration_ms
|
||||
)
|
||||
await self._precache_image_url(user, media_info, og)
|
||||
|
||||
else:
|
||||
logger.warning("Failed to find any OG data in %s", url)
|
||||
og = {}
|
||||
|
||||
# If we don't have a title but we have author_name, copy it as
|
||||
# title
|
||||
if not og.get("og:title") and author_name:
|
||||
og["og:title"] = author_name
|
||||
|
||||
# filter out any stupidly long values
|
||||
keys_to_remove = []
|
||||
for k, v in og.items():
|
||||
# values can be numeric as well as strings, hence the cast to str
|
||||
if len(k) > OG_TAG_NAME_MAXLEN or len(str(v)) > OG_TAG_VALUE_MAXLEN:
|
||||
logger.warning(
|
||||
"Pruning overlong tag %s from OG data", k[:OG_TAG_NAME_MAXLEN]
|
||||
)
|
||||
keys_to_remove.append(k)
|
||||
for k in keys_to_remove:
|
||||
del og[k]
|
||||
|
||||
logger.debug("Calculated OG for %s as %s", url, og)
|
||||
|
||||
jsonog = json_encoder.encode(og)
|
||||
|
||||
# Cap the amount of time to consider a response valid.
|
||||
expiration_ms = min(expiration_ms, ONE_DAY)
|
||||
|
||||
# store OG in history-aware DB cache
|
||||
await self.store.store_url_cache(
|
||||
url,
|
||||
media_info.response_code,
|
||||
media_info.etag,
|
||||
media_info.created_ts_ms + expiration_ms,
|
||||
jsonog,
|
||||
media_info.filesystem_id,
|
||||
media_info.created_ts_ms,
|
||||
)
|
||||
|
||||
return jsonog.encode("utf8")
|
||||
|
||||
async def _download_url(self, url: str, output_stream: BinaryIO) -> DownloadResult:
|
||||
"""
|
||||
Fetches a remote URL and parses the headers.
|
||||
|
||||
Args:
|
||||
url: The URL to fetch.
|
||||
output_stream: The stream to write the content to.
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
Media length, URL downloaded, the HTTP response code,
|
||||
the media type, the downloaded file name, the number of
|
||||
milliseconds the result is valid for, the etag header.
|
||||
"""
|
||||
|
||||
try:
|
||||
logger.debug("Trying to get preview for url '%s'", url)
|
||||
length, headers, uri, code = await self.client.get_file(
|
||||
url,
|
||||
output_stream=output_stream,
|
||||
max_size=self.max_spider_size,
|
||||
headers={
|
||||
b"Accept-Language": self.url_preview_accept_language,
|
||||
# Use a custom user agent for the preview because some sites will only return
|
||||
# Open Graph metadata to crawler user agents. Omit the Synapse version
|
||||
# string to avoid leaking information.
|
||||
b"User-Agent": [
|
||||
"Synapse (bot; +https://github.com/matrix-org/synapse)"
|
||||
],
|
||||
},
|
||||
is_allowed_content_type=_is_previewable,
|
||||
)
|
||||
except SynapseError:
|
||||
# Pass SynapseErrors through directly, so that the servlet
|
||||
# handler will return a SynapseError to the client instead of
|
||||
# blank data or a 500.
|
||||
raise
|
||||
except DNSLookupError:
|
||||
# DNS lookup returned no results
|
||||
# Note: This will also be the case if one of the resolved IP
|
||||
# addresses is blacklisted
|
||||
raise SynapseError(
|
||||
502,
|
||||
"DNS resolution failure during URL preview generation",
|
||||
Codes.UNKNOWN,
|
||||
)
|
||||
except Exception as e:
|
||||
# FIXME: pass through 404s and other error messages nicely
|
||||
logger.warning("Error downloading %s: %r", url, e)
|
||||
|
||||
raise SynapseError(
|
||||
500,
|
||||
"Failed to download content: %s"
|
||||
% (traceback.format_exception_only(sys.exc_info()[0], e),),
|
||||
Codes.UNKNOWN,
|
||||
)
|
||||
|
||||
if b"Content-Type" in headers:
|
||||
media_type = headers[b"Content-Type"][0].decode("ascii")
|
||||
else:
|
||||
media_type = "application/octet-stream"
|
||||
|
||||
download_name = get_filename_from_headers(headers)
|
||||
|
||||
# FIXME: we should calculate a proper expiration based on the
|
||||
# Cache-Control and Expire headers. But for now, assume 1 hour.
|
||||
expires = ONE_HOUR
|
||||
etag = headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None
|
||||
|
||||
return DownloadResult(
|
||||
length, uri, code, media_type, download_name, expires, etag
|
||||
)
|
||||
|
||||
async def _parse_data_url(
|
||||
self, url: str, output_stream: BinaryIO
|
||||
) -> DownloadResult:
|
||||
"""
|
||||
Parses a data: URL.
|
||||
|
||||
Args:
|
||||
url: The URL to parse.
|
||||
output_stream: The stream to write the content to.
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
Media length, URL downloaded, the HTTP response code,
|
||||
the media type, the downloaded file name, the number of
|
||||
milliseconds the result is valid for, the etag header.
|
||||
"""
|
||||
|
||||
try:
|
||||
logger.debug("Trying to parse data url '%s'", url)
|
||||
with urlopen(url) as url_info:
|
||||
# TODO Can this be more efficient.
|
||||
output_stream.write(url_info.read())
|
||||
except Exception as e:
|
||||
logger.warning("Error parsing data: URL %s: %r", url, e)
|
||||
|
||||
raise SynapseError(
|
||||
500,
|
||||
"Failed to parse data URL: %s"
|
||||
% (traceback.format_exception_only(sys.exc_info()[0], e),),
|
||||
Codes.UNKNOWN,
|
||||
)
|
||||
|
||||
return DownloadResult(
|
||||
# Read back the length that has been written.
|
||||
length=output_stream.tell(),
|
||||
uri=url,
|
||||
# If it was parsed, consider this a 200 OK.
|
||||
response_code=200,
|
||||
# urlopen shoves the media-type from the data URL into the content type
|
||||
# header object.
|
||||
media_type=url_info.headers.get_content_type(),
|
||||
# Some features are not supported by data: URLs.
|
||||
download_name=None,
|
||||
expires=ONE_HOUR,
|
||||
etag=None,
|
||||
)
|
||||
|
||||
async def _handle_url(
|
||||
self, url: str, user: UserID, allow_data_urls: bool = False
|
||||
) -> MediaInfo:
|
||||
"""
|
||||
Fetches content from a URL and parses the result to generate a MediaInfo.
|
||||
|
||||
It uses the media storage provider to persist the fetched content and
|
||||
stores the mapping into the database.
|
||||
|
||||
Args:
|
||||
url: The URL to fetch.
|
||||
user: The user who ahs requested this URL.
|
||||
allow_data_urls: True if data URLs should be allowed.
|
||||
|
||||
Returns:
|
||||
A MediaInfo object describing the fetched content.
|
||||
"""
|
||||
|
||||
# TODO: we should probably honour robots.txt... except in practice
|
||||
# we're most likely being explicitly triggered by a human rather than a
|
||||
# bot, so are we really a robot?
|
||||
|
||||
file_id = datetime.date.today().isoformat() + "_" + random_string(16)
|
||||
|
||||
file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True)
|
||||
|
||||
with self.media_storage.store_into_file(file_info) as (f, fname, finish):
|
||||
if url.startswith("data:"):
|
||||
if not allow_data_urls:
|
||||
raise SynapseError(
|
||||
500, "Previewing of data: URLs is forbidden", Codes.UNKNOWN
|
||||
)
|
||||
|
||||
download_result = await self._parse_data_url(url, f)
|
||||
else:
|
||||
download_result = await self._download_url(url, f)
|
||||
|
||||
await finish()
|
||||
|
||||
try:
|
||||
time_now_ms = self.clock.time_msec()
|
||||
|
||||
await self.store.store_local_media(
|
||||
media_id=file_id,
|
||||
media_type=download_result.media_type,
|
||||
time_now_ms=time_now_ms,
|
||||
upload_name=download_result.download_name,
|
||||
media_length=download_result.length,
|
||||
user_id=user,
|
||||
url_cache=url,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error handling downloaded %s: %r", url, e)
|
||||
# TODO: we really ought to delete the downloaded file in this
|
||||
# case, since we won't have recorded it in the db, and will
|
||||
# therefore not expire it.
|
||||
raise
|
||||
|
||||
return MediaInfo(
|
||||
media_type=download_result.media_type,
|
||||
media_length=download_result.length,
|
||||
download_name=download_result.download_name,
|
||||
created_ts_ms=time_now_ms,
|
||||
filesystem_id=file_id,
|
||||
filename=fname,
|
||||
uri=download_result.uri,
|
||||
response_code=download_result.response_code,
|
||||
expires=download_result.expires,
|
||||
etag=download_result.etag,
|
||||
)
|
||||
|
||||
async def _precache_image_url(
|
||||
self, user: UserID, media_info: MediaInfo, og: JsonDict
|
||||
) -> None:
|
||||
"""
|
||||
Pre-cache the image (if one exists) for posterity
|
||||
|
||||
Args:
|
||||
user: The user requesting the preview.
|
||||
media_info: The media being previewed.
|
||||
og: The Open Graph dictionary. This is modified with image information.
|
||||
"""
|
||||
# If there's no image or it is blank, there's nothing to do.
|
||||
if "og:image" not in og:
|
||||
return
|
||||
|
||||
# Remove the raw image URL, this will be replaced with an MXC URL, if successful.
|
||||
image_url = og.pop("og:image")
|
||||
if not image_url:
|
||||
return
|
||||
|
||||
# The image URL from the HTML might be relative to the previewed page,
|
||||
# convert it to an URL which can be requested directly.
|
||||
url_parts = urlparse(image_url)
|
||||
if url_parts.scheme != "data":
|
||||
image_url = urljoin(media_info.uri, image_url)
|
||||
|
||||
# FIXME: it might be cleaner to use the same flow as the main /preview_url
|
||||
# request itself and benefit from the same caching etc. But for now we
|
||||
# just rely on the caching on the master request to speed things up.
|
||||
try:
|
||||
image_info = await self._handle_url(image_url, user, allow_data_urls=True)
|
||||
except Exception as e:
|
||||
# Pre-caching the image failed, don't block the entire URL preview.
|
||||
logger.warning(
|
||||
"Pre-caching image failed during URL preview: %s errored with %s",
|
||||
image_url,
|
||||
e,
|
||||
)
|
||||
return
|
||||
|
||||
if _is_media(image_info.media_type):
|
||||
# TODO: make sure we don't choke on white-on-transparent images
|
||||
file_id = image_info.filesystem_id
|
||||
dims = await self.media_repo._generate_thumbnails(
|
||||
None, file_id, file_id, image_info.media_type, url_cache=True
|
||||
)
|
||||
if dims:
|
||||
og["og:image:width"] = dims["width"]
|
||||
og["og:image:height"] = dims["height"]
|
||||
else:
|
||||
logger.warning("Couldn't get dims for %s", image_url)
|
||||
|
||||
og["og:image"] = f"mxc://{self.server_name}/{image_info.filesystem_id}"
|
||||
og["og:image:type"] = image_info.media_type
|
||||
og["matrix:image:size"] = image_info.media_length
|
||||
|
||||
async def _handle_oembed_response(
|
||||
self, url: str, media_info: MediaInfo, expiration_ms: int
|
||||
) -> Tuple[JsonDict, Optional[str], int]:
|
||||
"""
|
||||
Parse the downloaded oEmbed info.
|
||||
|
||||
Args:
|
||||
url: The URL which is being previewed (not the one which was
|
||||
requested).
|
||||
media_info: The media being previewed.
|
||||
expiration_ms: The length of time, in milliseconds, the media is valid for.
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
The Open Graph dictionary, if the oEmbed info can be parsed.
|
||||
The author name if it could be retrieved from oEmbed.
|
||||
The (possibly updated) length of time, in milliseconds, the media is valid for.
|
||||
"""
|
||||
# If JSON was not returned, there's nothing to do.
|
||||
if not _is_json(media_info.media_type):
|
||||
return {}, None, expiration_ms
|
||||
|
||||
with open(media_info.filename, "rb") as file:
|
||||
body = file.read()
|
||||
|
||||
oembed_response = self._oembed.parse_oembed_response(url, body)
|
||||
open_graph_result = oembed_response.open_graph_result
|
||||
|
||||
# Use the cache age from the oEmbed result, if one was given.
|
||||
if open_graph_result and oembed_response.cache_age is not None:
|
||||
expiration_ms = oembed_response.cache_age
|
||||
|
||||
return open_graph_result, oembed_response.author_name, expiration_ms
|
||||
|
||||
def _start_expire_url_cache_data(self) -> Deferred:
|
||||
return run_as_background_process(
|
||||
"expire_url_cache_data", self._expire_url_cache_data
|
||||
)
|
||||
|
||||
async def _expire_url_cache_data(self) -> None:
|
||||
"""Clean up expired url cache content, media and thumbnails."""
|
||||
|
||||
assert self._worker_run_media_background_jobs
|
||||
|
||||
now = self.clock.time_msec()
|
||||
|
||||
logger.debug("Running url preview cache expiry")
|
||||
|
||||
def try_remove_parent_dirs(dirs: Iterable[str]) -> None:
|
||||
"""Attempt to remove the given chain of parent directories
|
||||
|
||||
Args:
|
||||
dirs: The list of directory paths to delete, with children appearing
|
||||
before their parents.
|
||||
"""
|
||||
for dir in dirs:
|
||||
try:
|
||||
os.rmdir(dir)
|
||||
except FileNotFoundError:
|
||||
# Already deleted, continue with deleting the rest
|
||||
pass
|
||||
except OSError as e:
|
||||
# Failed, skip deleting the rest of the parent dirs
|
||||
if e.errno != errno.ENOTEMPTY:
|
||||
logger.warning(
|
||||
"Failed to remove media directory while clearing url preview cache: %r: %s",
|
||||
dir,
|
||||
e,
|
||||
)
|
||||
break
|
||||
|
||||
# First we delete expired url cache entries
|
||||
media_ids = await self.store.get_expired_url_cache(now)
|
||||
|
||||
removed_media = []
|
||||
for media_id in media_ids:
|
||||
fname = self.filepaths.url_cache_filepath(media_id)
|
||||
try:
|
||||
os.remove(fname)
|
||||
except FileNotFoundError:
|
||||
pass # If the path doesn't exist, meh
|
||||
except OSError as e:
|
||||
logger.warning(
|
||||
"Failed to remove media while clearing url preview cache: %r: %s",
|
||||
media_id,
|
||||
e,
|
||||
)
|
||||
continue
|
||||
|
||||
removed_media.append(media_id)
|
||||
|
||||
dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
|
||||
try_remove_parent_dirs(dirs)
|
||||
|
||||
await self.store.delete_url_cache(removed_media)
|
||||
|
||||
if removed_media:
|
||||
logger.debug(
|
||||
"Deleted %d entries from url preview cache", len(removed_media)
|
||||
)
|
||||
else:
|
||||
logger.debug("No entries removed from url preview cache")
|
||||
|
||||
# Now we delete old images associated with the url cache.
|
||||
# These may be cached for a bit on the client (i.e., they
|
||||
# may have a room open with a preview url thing open).
|
||||
# So we wait a couple of days before deleting, just in case.
|
||||
expire_before = now - IMAGE_CACHE_EXPIRY_MS
|
||||
media_ids = await self.store.get_url_cache_media_before(expire_before)
|
||||
|
||||
removed_media = []
|
||||
for media_id in media_ids:
|
||||
fname = self.filepaths.url_cache_filepath(media_id)
|
||||
try:
|
||||
os.remove(fname)
|
||||
except FileNotFoundError:
|
||||
pass # If the path doesn't exist, meh
|
||||
except OSError as e:
|
||||
logger.warning(
|
||||
"Failed to remove media from url preview cache: %r: %s", media_id, e
|
||||
)
|
||||
continue
|
||||
|
||||
dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
|
||||
try_remove_parent_dirs(dirs)
|
||||
|
||||
thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id)
|
||||
try:
|
||||
shutil.rmtree(thumbnail_dir)
|
||||
except FileNotFoundError:
|
||||
pass # If the path doesn't exist, meh
|
||||
except OSError as e:
|
||||
logger.warning(
|
||||
"Failed to remove media from url preview cache: %r: %s", media_id, e
|
||||
)
|
||||
continue
|
||||
|
||||
removed_media.append(media_id)
|
||||
|
||||
dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id)
|
||||
# Note that one of the directories to be deleted has already been
|
||||
# removed by the `rmtree` above.
|
||||
try_remove_parent_dirs(dirs)
|
||||
|
||||
await self.store.delete_url_cache_media(removed_media)
|
||||
|
||||
if removed_media:
|
||||
logger.debug("Deleted %d media from url preview cache", len(removed_media))
|
||||
else:
|
||||
logger.debug("No media removed from url preview cache")
|
||||
|
||||
|
||||
def _is_media(content_type: str) -> bool:
|
||||
return content_type.lower().startswith("image/")
|
||||
|
||||
|
||||
def _is_html(content_type: str) -> bool:
|
||||
content_type = content_type.lower()
|
||||
return content_type.startswith("text/html") or content_type.startswith(
|
||||
"application/xhtml"
|
||||
)
|
||||
|
||||
|
||||
def _is_json(content_type: str) -> bool:
|
||||
return content_type.lower().startswith("application/json")
|
||||
|
||||
|
||||
def _is_previewable(content_type: str) -> bool:
|
||||
"""Returns True for content types for which we will perform URL preview and False
|
||||
otherwise."""
|
||||
|
||||
return _is_html(content_type) or _is_media(content_type) or _is_json(content_type)
|
||||
@@ -39,49 +39,9 @@ from twisted.web.resource import Resource
|
||||
from synapse.api import errors
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.presence_router import (
|
||||
GET_INTERESTED_USERS_CALLBACK,
|
||||
GET_USERS_FOR_STATES_CALLBACK,
|
||||
PresenceRouter,
|
||||
)
|
||||
from synapse.events.spamcheck import (
|
||||
CHECK_EVENT_FOR_SPAM_CALLBACK,
|
||||
CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK,
|
||||
CHECK_REGISTRATION_FOR_SPAM_CALLBACK,
|
||||
CHECK_USERNAME_FOR_SPAM_CALLBACK,
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK,
|
||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK,
|
||||
USER_MAY_CREATE_ROOM_CALLBACK,
|
||||
USER_MAY_INVITE_CALLBACK,
|
||||
USER_MAY_JOIN_ROOM_CALLBACK,
|
||||
USER_MAY_PUBLISH_ROOM_CALLBACK,
|
||||
USER_MAY_SEND_3PID_INVITE_CALLBACK,
|
||||
SpamChecker,
|
||||
)
|
||||
from synapse.events.third_party_rules import (
|
||||
CHECK_CAN_DEACTIVATE_USER_CALLBACK,
|
||||
CHECK_CAN_SHUTDOWN_ROOM_CALLBACK,
|
||||
CHECK_EVENT_ALLOWED_CALLBACK,
|
||||
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK,
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK,
|
||||
ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK,
|
||||
ON_CREATE_ROOM_CALLBACK,
|
||||
ON_NEW_EVENT_CALLBACK,
|
||||
ON_PROFILE_UPDATE_CALLBACK,
|
||||
ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK,
|
||||
ON_THREEPID_BIND_CALLBACK,
|
||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK,
|
||||
)
|
||||
from synapse.handlers.account_data import ON_ACCOUNT_DATA_UPDATED_CALLBACK
|
||||
from synapse.handlers.auth import (
|
||||
CHECK_3PID_AUTH_CALLBACK,
|
||||
CHECK_AUTH_CALLBACK,
|
||||
GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK,
|
||||
GET_USERNAME_FOR_REGISTRATION_CALLBACK,
|
||||
IS_3PID_ALLOWED_CALLBACK,
|
||||
ON_LOGGED_OUT_CALLBACK,
|
||||
AuthHandler,
|
||||
)
|
||||
from synapse.events.presence_router import PresenceRouter
|
||||
from synapse.events.spamcheck import SpamChecker
|
||||
from synapse.handlers.auth import AuthHandler
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.handlers.push_rules import RuleSpec, check_actions
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
@@ -98,6 +58,9 @@ from synapse.logging.context import (
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.module_api.callbacks.account_data_callbacks import (
|
||||
ON_ACCOUNT_DATA_UPDATED_CALLBACK,
|
||||
)
|
||||
from synapse.module_api.callbacks.account_validity_callbacks import (
|
||||
IS_USER_EXPIRED_CALLBACK,
|
||||
ON_LEGACY_ADMIN_REQUEST,
|
||||
@@ -105,13 +68,52 @@ from synapse.module_api.callbacks.account_validity_callbacks import (
|
||||
ON_LEGACY_SEND_MAIL_CALLBACK,
|
||||
ON_USER_REGISTRATION_CALLBACK,
|
||||
)
|
||||
from synapse.rest.client.login import LoginResponse
|
||||
from synapse.storage import DataStore
|
||||
from synapse.storage.background_updates import (
|
||||
from synapse.module_api.callbacks.background_updater_callbacks import (
|
||||
DEFAULT_BATCH_SIZE_CALLBACK,
|
||||
MIN_BATCH_SIZE_CALLBACK,
|
||||
ON_UPDATE_CALLBACK,
|
||||
)
|
||||
from synapse.module_api.callbacks.password_auth_provider_callbacks import (
|
||||
CHECK_3PID_AUTH_CALLBACK,
|
||||
CHECK_AUTH_CALLBACK,
|
||||
GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK,
|
||||
GET_USERNAME_FOR_REGISTRATION_CALLBACK,
|
||||
IS_3PID_ALLOWED_CALLBACK,
|
||||
ON_LOGGED_OUT_CALLBACK,
|
||||
)
|
||||
from synapse.module_api.callbacks.presence_router_callbacks import (
|
||||
GET_INTERESTED_USERS_CALLBACK,
|
||||
GET_USERS_FOR_STATES_CALLBACK,
|
||||
)
|
||||
from synapse.module_api.callbacks.spam_checker_callbacks import (
|
||||
CHECK_EVENT_FOR_SPAM_CALLBACK,
|
||||
CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK,
|
||||
CHECK_REGISTRATION_FOR_SPAM_CALLBACK,
|
||||
CHECK_USERNAME_FOR_SPAM_CALLBACK,
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK,
|
||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK,
|
||||
USER_MAY_CREATE_ROOM_CALLBACK,
|
||||
USER_MAY_INVITE_CALLBACK,
|
||||
USER_MAY_JOIN_ROOM_CALLBACK,
|
||||
USER_MAY_PUBLISH_ROOM_CALLBACK,
|
||||
USER_MAY_SEND_3PID_INVITE_CALLBACK,
|
||||
)
|
||||
from synapse.module_api.callbacks.third_party_event_rules_callbacks import (
|
||||
CHECK_CAN_DEACTIVATE_USER_CALLBACK,
|
||||
CHECK_CAN_SHUTDOWN_ROOM_CALLBACK,
|
||||
CHECK_EVENT_ALLOWED_CALLBACK,
|
||||
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK,
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK,
|
||||
ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK,
|
||||
ON_CREATE_ROOM_CALLBACK,
|
||||
ON_NEW_EVENT_CALLBACK,
|
||||
ON_PROFILE_UPDATE_CALLBACK,
|
||||
ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK,
|
||||
ON_THREEPID_BIND_CALLBACK,
|
||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK,
|
||||
)
|
||||
from synapse.rest.client.login import LoginResponse
|
||||
from synapse.storage import DataStore
|
||||
from synapse.storage.database import DatabasePool, LoggingTransaction
|
||||
from synapse.storage.databases.main.roommember import ProfileInfo
|
||||
from synapse.types import (
|
||||
@@ -271,12 +273,6 @@ class ModuleApi:
|
||||
self._public_room_list_manager = PublicRoomListManager(hs)
|
||||
self._account_data_manager = AccountDataManager(hs)
|
||||
|
||||
self._spam_checker = hs.get_spam_checker()
|
||||
self._third_party_event_rules = hs.get_third_party_event_rules()
|
||||
self._password_auth_provider = hs.get_password_auth_provider()
|
||||
self._presence_router = hs.get_presence_router()
|
||||
self._account_data_handler = hs.get_account_data_handler()
|
||||
|
||||
#################################################################################
|
||||
# The following methods should only be called during the module's initialisation.
|
||||
|
||||
@@ -305,7 +301,7 @@ class ModuleApi:
|
||||
|
||||
Added in Synapse v1.37.0.
|
||||
"""
|
||||
return self._spam_checker.register_callbacks(
|
||||
return self._callbacks.spam_checker.register_callbacks(
|
||||
check_event_for_spam=check_event_for_spam,
|
||||
should_drop_federated_event=should_drop_federated_event,
|
||||
user_may_join_room=user_may_join_room,
|
||||
@@ -370,7 +366,7 @@ class ModuleApi:
|
||||
|
||||
Added in Synapse v1.39.0.
|
||||
"""
|
||||
return self._third_party_event_rules.register_third_party_rules_callbacks(
|
||||
return self._callbacks.third_party_event_rules.register_callbacks(
|
||||
check_event_allowed=check_event_allowed,
|
||||
on_create_room=on_create_room,
|
||||
check_threepid_can_be_invited=check_threepid_can_be_invited,
|
||||
@@ -395,7 +391,7 @@ class ModuleApi:
|
||||
|
||||
Added in Synapse v1.42.0.
|
||||
"""
|
||||
return self._presence_router.register_presence_router_callbacks(
|
||||
return self._callbacks.presence_router.register_callbacks(
|
||||
get_users_for_states=get_users_for_states,
|
||||
get_interested_users=get_interested_users,
|
||||
)
|
||||
@@ -420,7 +416,7 @@ class ModuleApi:
|
||||
|
||||
Added in Synapse v1.46.0.
|
||||
"""
|
||||
return self._password_auth_provider.register_password_auth_provider_callbacks(
|
||||
return self._callbacks.password_auth_provider.register_callbacks(
|
||||
check_3pid_auth=check_3pid_auth,
|
||||
on_logged_out=on_logged_out,
|
||||
is_3pid_allowed=is_3pid_allowed,
|
||||
@@ -441,12 +437,11 @@ class ModuleApi:
|
||||
Added in Synapse v1.49.0.
|
||||
"""
|
||||
|
||||
for db in self._hs.get_datastores().databases:
|
||||
db.updates.register_update_controller_callbacks(
|
||||
on_update=on_update,
|
||||
default_batch_size=default_batch_size,
|
||||
min_batch_size=min_batch_size,
|
||||
)
|
||||
self._callbacks.background_updater.register_callbacks(
|
||||
on_update=on_update,
|
||||
default_batch_size=default_batch_size,
|
||||
min_batch_size=min_batch_size,
|
||||
)
|
||||
|
||||
def register_account_data_callbacks(
|
||||
self,
|
||||
@@ -457,7 +452,7 @@ class ModuleApi:
|
||||
|
||||
Added in Synapse 1.57.0.
|
||||
"""
|
||||
return self._account_data_handler.register_module_callbacks(
|
||||
return self._callbacks.account_data.register_callbacks(
|
||||
on_account_data_updated=on_account_data_updated,
|
||||
)
|
||||
|
||||
|
||||
@@ -12,11 +12,25 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.module_api.callbacks.account_validity_callbacks import (
|
||||
AccountValidityModuleApiCallbacks,
|
||||
)
|
||||
from .account_data_callbacks import AccountDataModuleApiCallbacks
|
||||
from .account_validity_callbacks import AccountValidityModuleApiCallbacks
|
||||
from .background_updater_callbacks import BackgroundUpdaterModuleApiCallbacks
|
||||
from .password_auth_provider_callbacks import PasswordAuthProviderModuleApiCallbacks
|
||||
from .presence_router_callbacks import PresenceRouterModuleApiCallbacks
|
||||
from .spam_checker_callbacks import SpamCheckerModuleApiCallbacks
|
||||
from .third_party_event_rules_callbacks import ThirdPartyEventRulesModuleApiCallbacks
|
||||
|
||||
__all__ = [
|
||||
"ModuleApiCallbacks",
|
||||
]
|
||||
|
||||
|
||||
class ModuleApiCallbacks:
|
||||
def __init__(self) -> None:
|
||||
self.account_data = AccountDataModuleApiCallbacks()
|
||||
self.account_validity = AccountValidityModuleApiCallbacks()
|
||||
self.background_updater = BackgroundUpdaterModuleApiCallbacks()
|
||||
self.password_auth_provider = PasswordAuthProviderModuleApiCallbacks()
|
||||
self.presence_router = PresenceRouterModuleApiCallbacks()
|
||||
self.spam_checker = SpamCheckerModuleApiCallbacks()
|
||||
self.third_party_event_rules = ThirdPartyEventRulesModuleApiCallbacks()
|
||||
|
||||
35
synapse/module_api/callbacks/account_data_callbacks.py
Normal file
35
synapse/module_api/callbacks/account_data_callbacks.py
Normal file
@@ -0,0 +1,35 @@
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2021, 2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Awaitable, Callable, List, Optional
|
||||
|
||||
from synapse.types import JsonDict
|
||||
|
||||
ON_ACCOUNT_DATA_UPDATED_CALLBACK = Callable[
|
||||
[str, Optional[str], str, JsonDict], Awaitable
|
||||
]
|
||||
|
||||
|
||||
class AccountDataModuleApiCallbacks:
|
||||
def __init__(self) -> None:
|
||||
self.on_account_data_updated_callbacks: List[
|
||||
ON_ACCOUNT_DATA_UPDATED_CALLBACK
|
||||
] = []
|
||||
|
||||
def register_callbacks(
|
||||
self, on_account_data_updated: Optional[ON_ACCOUNT_DATA_UPDATED_CALLBACK] = None
|
||||
) -> None:
|
||||
"""Register callbacks from modules."""
|
||||
if on_account_data_updated is not None:
|
||||
self.on_account_data_updated_callbacks.append(on_account_data_updated)
|
||||
@@ -23,7 +23,7 @@ logger = logging.getLogger(__name__)
|
||||
IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]]
|
||||
ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable]
|
||||
# Temporary hooks to allow for a transition from `/_matrix/client` endpoints
|
||||
# to `/_synapse/client/account_validity`. See `register_callbacks` below.
|
||||
# to `/_synapse/client/account_validity`. See `register_account_validity_callbacks`.
|
||||
ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable]
|
||||
ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]]
|
||||
ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable]
|
||||
|
||||
54
synapse/module_api/callbacks/background_updater_callbacks.py
Normal file
54
synapse/module_api/callbacks/background_updater_callbacks.py
Normal file
@@ -0,0 +1,54 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import AsyncContextManager, Awaitable, Callable, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ON_UPDATE_CALLBACK = Callable[[str, str, bool], AsyncContextManager[int]]
|
||||
DEFAULT_BATCH_SIZE_CALLBACK = Callable[[str, str], Awaitable[int]]
|
||||
MIN_BATCH_SIZE_CALLBACK = Callable[[str, str], Awaitable[int]]
|
||||
|
||||
|
||||
class BackgroundUpdaterModuleApiCallbacks:
|
||||
def __init__(self) -> None:
|
||||
self.on_update_callback: Optional[ON_UPDATE_CALLBACK] = None
|
||||
self.default_batch_size_callback: Optional[DEFAULT_BATCH_SIZE_CALLBACK] = None
|
||||
self.min_batch_size_callback: Optional[MIN_BATCH_SIZE_CALLBACK] = None
|
||||
|
||||
def register_callbacks(
|
||||
self,
|
||||
on_update: ON_UPDATE_CALLBACK,
|
||||
default_batch_size: Optional[DEFAULT_BATCH_SIZE_CALLBACK] = None,
|
||||
min_batch_size: Optional[DEFAULT_BATCH_SIZE_CALLBACK] = None,
|
||||
) -> None:
|
||||
"""Register callbacks from a module for each hook."""
|
||||
if self.on_update_callback is not None:
|
||||
logger.warning(
|
||||
"More than one module tried to register callbacks for controlling"
|
||||
" background updates. Only the callbacks registered by the first module"
|
||||
" (in order of appearance in Synapse's configuration file) that tried to"
|
||||
" do so will be called."
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
self.on_update_callback = on_update
|
||||
|
||||
if default_batch_size is not None:
|
||||
self.default_batch_size_callback = default_batch_size
|
||||
|
||||
if min_batch_size is not None:
|
||||
self.min_batch_size_callback = min_batch_size
|
||||
138
synapse/module_api/callbacks/password_auth_provider_callbacks.py
Normal file
138
synapse/module_api/callbacks/password_auth_provider_callbacks.py
Normal file
@@ -0,0 +1,138 @@
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2019 - 2020, 2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple
|
||||
|
||||
from synapse.types import JsonDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.module_api import LoginResponse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
CHECK_3PID_AUTH_CALLBACK = Callable[
|
||||
[str, str, str],
|
||||
Awaitable[
|
||||
Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]
|
||||
],
|
||||
]
|
||||
ON_LOGGED_OUT_CALLBACK = Callable[[str, Optional[str], str], Awaitable]
|
||||
CHECK_AUTH_CALLBACK = Callable[
|
||||
[str, str, JsonDict],
|
||||
Awaitable[
|
||||
Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]
|
||||
],
|
||||
]
|
||||
GET_USERNAME_FOR_REGISTRATION_CALLBACK = Callable[
|
||||
[JsonDict, JsonDict],
|
||||
Awaitable[Optional[str]],
|
||||
]
|
||||
GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK = Callable[
|
||||
[JsonDict, JsonDict],
|
||||
Awaitable[Optional[str]],
|
||||
]
|
||||
IS_3PID_ALLOWED_CALLBACK = Callable[[str, str, bool], Awaitable[bool]]
|
||||
|
||||
|
||||
class PasswordAuthProviderModuleApiCallbacks:
|
||||
def __init__(self) -> None:
|
||||
# Mapping from login type to login parameters
|
||||
self.supported_login_types: Dict[str, Tuple[str, ...]] = {}
|
||||
|
||||
self.check_3pid_auth_callbacks: List[CHECK_3PID_AUTH_CALLBACK] = []
|
||||
self.on_logged_out_callbacks: List[ON_LOGGED_OUT_CALLBACK] = []
|
||||
self.get_username_for_registration_callbacks: List[
|
||||
GET_USERNAME_FOR_REGISTRATION_CALLBACK
|
||||
] = []
|
||||
self.get_displayname_for_registration_callbacks: List[
|
||||
GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK
|
||||
] = []
|
||||
self.is_3pid_allowed_callbacks: List[IS_3PID_ALLOWED_CALLBACK] = []
|
||||
|
||||
# Mapping from login type to auth checker callbacks
|
||||
self.auth_checker_callbacks: Dict[str, List[CHECK_AUTH_CALLBACK]] = {}
|
||||
|
||||
def register_callbacks(
|
||||
self,
|
||||
check_3pid_auth: Optional[CHECK_3PID_AUTH_CALLBACK] = None,
|
||||
on_logged_out: Optional[ON_LOGGED_OUT_CALLBACK] = None,
|
||||
is_3pid_allowed: Optional[IS_3PID_ALLOWED_CALLBACK] = None,
|
||||
auth_checkers: Optional[
|
||||
Dict[Tuple[str, Tuple[str, ...]], CHECK_AUTH_CALLBACK]
|
||||
] = None,
|
||||
get_username_for_registration: Optional[
|
||||
GET_USERNAME_FOR_REGISTRATION_CALLBACK
|
||||
] = None,
|
||||
get_displayname_for_registration: Optional[
|
||||
GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK
|
||||
] = None,
|
||||
) -> None:
|
||||
# Register check_3pid_auth callback
|
||||
if check_3pid_auth is not None:
|
||||
self.check_3pid_auth_callbacks.append(check_3pid_auth)
|
||||
|
||||
# register on_logged_out callback
|
||||
if on_logged_out is not None:
|
||||
self.on_logged_out_callbacks.append(on_logged_out)
|
||||
|
||||
if auth_checkers is not None:
|
||||
# register a new supported login_type
|
||||
# Iterate through all of the types being registered
|
||||
for (login_type, fields), callback in auth_checkers.items():
|
||||
# Note: fields may be empty here. This would allow a modules auth checker to
|
||||
# be called with just 'login_type' and no password or other secrets
|
||||
|
||||
# Need to check that all the field names are strings or may get nasty errors later
|
||||
for f in fields:
|
||||
if not isinstance(f, str):
|
||||
raise RuntimeError(
|
||||
"A module tried to register support for login type: %s with parameters %s"
|
||||
" but all parameter names must be strings"
|
||||
% (login_type, fields)
|
||||
)
|
||||
|
||||
# 2 modules supporting the same login type must expect the same fields
|
||||
# e.g. 1 can't expect "pass" if the other expects "password"
|
||||
# so throw an exception if that happens
|
||||
if login_type not in self.supported_login_types.get(login_type, []):
|
||||
self.supported_login_types[login_type] = fields
|
||||
else:
|
||||
fields_currently_supported = self.supported_login_types.get(
|
||||
login_type
|
||||
)
|
||||
if fields_currently_supported != fields:
|
||||
raise RuntimeError(
|
||||
"A module tried to register support for login type: %s with parameters %s"
|
||||
" but another module had already registered support for that type with parameters %s"
|
||||
% (login_type, fields, fields_currently_supported)
|
||||
)
|
||||
|
||||
# Add the new method to the list of auth_checker_callbacks for this login type
|
||||
self.auth_checker_callbacks.setdefault(login_type, []).append(callback)
|
||||
|
||||
if get_username_for_registration is not None:
|
||||
self.get_username_for_registration_callbacks.append(
|
||||
get_username_for_registration,
|
||||
)
|
||||
|
||||
if get_displayname_for_registration is not None:
|
||||
self.get_displayname_for_registration_callbacks.append(
|
||||
get_displayname_for_registration,
|
||||
)
|
||||
|
||||
if is_3pid_allowed is not None:
|
||||
self.is_3pid_allowed_callbacks.append(is_3pid_allowed)
|
||||
122
synapse/module_api/callbacks/presence_router_callbacks.py
Normal file
122
synapse/module_api/callbacks/presence_router_callbacks.py
Normal file
@@ -0,0 +1,122 @@
|
||||
# Copyright 2021, 2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
GET_USERS_FOR_STATES_CALLBACK = Callable[
|
||||
[Iterable[UserPresenceState]], Awaitable[Dict[str, Set[UserPresenceState]]]
|
||||
]
|
||||
# This must either return a set of strings or the constant PresenceRouter.ALL_USERS.
|
||||
GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[Set[str], str]]]
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||
"""Wrapper that loads a presence router module configured using the old
|
||||
configuration, and registers the hooks they implement.
|
||||
"""
|
||||
|
||||
if hs.config.server.presence_router_module_class is None:
|
||||
return
|
||||
|
||||
module = hs.config.server.presence_router_module_class
|
||||
config = hs.config.server.presence_router_config
|
||||
api = hs.get_module_api()
|
||||
|
||||
presence_router = module(config=config, module_api=api)
|
||||
|
||||
# The known hooks. If a module implements a method which name appears in this set,
|
||||
# we'll want to register it.
|
||||
presence_router_methods = {
|
||||
"get_users_for_states",
|
||||
"get_interested_users",
|
||||
}
|
||||
|
||||
# All methods that the module provides should be async, but this wasn't enforced
|
||||
# in the old module system, so we wrap them if needed
|
||||
def async_wrapper(
|
||||
f: Optional[Callable[P, R]]
|
||||
) -> Optional[Callable[P, Awaitable[R]]]:
|
||||
# f might be None if the callback isn't implemented by the module. In this
|
||||
# case we don't want to register a callback at all so we return None.
|
||||
if f is None:
|
||||
return None
|
||||
|
||||
def run(*args: P.args, **kwargs: P.kwargs) -> Awaitable[R]:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
return maybe_awaitable(f(*args, **kwargs))
|
||||
|
||||
return run
|
||||
|
||||
# Register the hooks through the module API.
|
||||
hooks: Dict[str, Optional[Callable[..., Any]]] = {
|
||||
hook: async_wrapper(getattr(presence_router, hook, None))
|
||||
for hook in presence_router_methods
|
||||
}
|
||||
|
||||
api.register_presence_router_callbacks(**hooks)
|
||||
|
||||
|
||||
class PresenceRouterModuleApiCallbacks:
|
||||
def __init__(self) -> None:
|
||||
# Initially there are no callbacks
|
||||
self.get_users_for_states_callbacks: List[GET_USERS_FOR_STATES_CALLBACK] = []
|
||||
self.get_interested_users_callbacks: List[GET_INTERESTED_USERS_CALLBACK] = []
|
||||
|
||||
def register_callbacks(
|
||||
self,
|
||||
get_users_for_states: Optional[GET_USERS_FOR_STATES_CALLBACK] = None,
|
||||
get_interested_users: Optional[GET_INTERESTED_USERS_CALLBACK] = None,
|
||||
) -> None:
|
||||
# PresenceRouter modules are required to implement both of these methods
|
||||
# or neither of them as they are assumed to act in a complementary manner
|
||||
paired_methods = [get_users_for_states, get_interested_users]
|
||||
if paired_methods.count(None) == 1:
|
||||
raise RuntimeError(
|
||||
"PresenceRouter modules must register neither or both of the paired callbacks: "
|
||||
"[get_users_for_states, get_interested_users]"
|
||||
)
|
||||
|
||||
# Append the methods provided to the lists of callbacks
|
||||
if get_users_for_states is not None:
|
||||
self.get_users_for_states_callbacks.append(get_users_for_states)
|
||||
|
||||
if get_interested_users is not None:
|
||||
self.get_interested_users_callbacks.append(get_interested_users)
|
||||
373
synapse/module_api/callbacks/spam_checker_callbacks.py
Normal file
373
synapse/module_api/callbacks/spam_checker_callbacks.py
Normal file
@@ -0,0 +1,373 @@
|
||||
# Copyright 2017 New Vector Ltd
|
||||
# Copyright 2019, 2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import inspect
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Collection,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
# `Literal` appears with Python 3.8.
|
||||
from typing_extensions import Literal
|
||||
|
||||
import synapse
|
||||
from synapse.api.errors import Codes
|
||||
from synapse.rest.media.v1._base import FileInfo
|
||||
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
|
||||
from synapse.spam_checker_api import RegistrationBehaviour
|
||||
from synapse.types import JsonDict, RoomAlias, UserProfile
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import synapse.events
|
||||
import synapse.server
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
|
||||
["synapse.events.EventBase"],
|
||||
Awaitable[
|
||||
Union[
|
||||
str,
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK = Callable[
|
||||
["synapse.events.EventBase"],
|
||||
Awaitable[Union[bool, str]],
|
||||
]
|
||||
USER_MAY_JOIN_ROOM_CALLBACK = Callable[
|
||||
[str, str, bool],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
USER_MAY_INVITE_CALLBACK = Callable[
|
||||
[str, str, str],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[
|
||||
[str, str, str, str],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
USER_MAY_CREATE_ROOM_CALLBACK = Callable[
|
||||
[str],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[
|
||||
[str, RoomAlias],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[
|
||||
[str, str],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[UserProfile], Awaitable[bool]]
|
||||
LEGACY_CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[
|
||||
[
|
||||
Optional[dict],
|
||||
Optional[str],
|
||||
Collection[Tuple[str, str]],
|
||||
],
|
||||
Awaitable[RegistrationBehaviour],
|
||||
]
|
||||
CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[
|
||||
[
|
||||
Optional[dict],
|
||||
Optional[str],
|
||||
Collection[Tuple[str, str]],
|
||||
Optional[str],
|
||||
],
|
||||
Awaitable[RegistrationBehaviour],
|
||||
]
|
||||
CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[
|
||||
[ReadableFileWrapper, FileInfo],
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
|
||||
|
||||
def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None:
|
||||
"""Wrapper that loads spam checkers configured using the old configuration, and
|
||||
registers the spam checker hooks they implement.
|
||||
"""
|
||||
spam_checkers: List[Any] = []
|
||||
api = hs.get_module_api()
|
||||
for module, config in hs.config.spamchecker.spam_checkers:
|
||||
# Older spam checkers don't accept the `api` argument, so we
|
||||
# try and detect support.
|
||||
spam_args = inspect.getfullargspec(module)
|
||||
if "api" in spam_args.args:
|
||||
spam_checkers.append(module(config=config, api=api))
|
||||
else:
|
||||
spam_checkers.append(module(config=config))
|
||||
|
||||
# The known spam checker hooks. If a spam checker module implements a method
|
||||
# which name appears in this set, we'll want to register it.
|
||||
spam_checker_methods = {
|
||||
"check_event_for_spam",
|
||||
"user_may_invite",
|
||||
"user_may_create_room",
|
||||
"user_may_create_room_alias",
|
||||
"user_may_publish_room",
|
||||
"check_username_for_spam",
|
||||
"check_registration_for_spam",
|
||||
"check_media_file_for_spam",
|
||||
}
|
||||
|
||||
for spam_checker in spam_checkers:
|
||||
# Methods on legacy spam checkers might not be async, so we wrap them around a
|
||||
# wrapper that will call maybe_awaitable on the result.
|
||||
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
|
||||
# f might be None if the callback isn't implemented by the module. In this
|
||||
# case we don't want to register a callback at all so we return None.
|
||||
if f is None:
|
||||
return None
|
||||
|
||||
wrapped_func = f
|
||||
|
||||
if f.__name__ == "check_registration_for_spam":
|
||||
checker_args = inspect.signature(f)
|
||||
if len(checker_args.parameters) == 3:
|
||||
# Backwards compatibility; some modules might implement a hook that
|
||||
# doesn't expect a 4th argument. In this case, wrap it in a function
|
||||
# that gives it only 3 arguments and drops the auth_provider_id on
|
||||
# the floor.
|
||||
def wrapper(
|
||||
email_threepid: Optional[dict],
|
||||
username: Optional[str],
|
||||
request_info: Collection[Tuple[str, str]],
|
||||
auth_provider_id: Optional[str],
|
||||
) -> Union[Awaitable[RegistrationBehaviour], RegistrationBehaviour]:
|
||||
# Assertion required because mypy can't prove we won't
|
||||
# change `f` back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
return f(
|
||||
email_threepid,
|
||||
username,
|
||||
request_info,
|
||||
)
|
||||
|
||||
wrapped_func = wrapper
|
||||
elif len(checker_args.parameters) != 4:
|
||||
raise RuntimeError(
|
||||
"Bad signature for callback check_registration_for_spam",
|
||||
)
|
||||
|
||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert wrapped_func is not None
|
||||
|
||||
return maybe_awaitable(wrapped_func(*args, **kwargs))
|
||||
|
||||
return run
|
||||
|
||||
# Register the hooks through the module API.
|
||||
hooks = {
|
||||
hook: async_wrapper(getattr(spam_checker, hook, None))
|
||||
for hook in spam_checker_methods
|
||||
}
|
||||
|
||||
api.register_spam_checker_callbacks(**hooks)
|
||||
|
||||
|
||||
class SpamCheckerModuleApiCallbacks:
|
||||
def __init__(self) -> None:
|
||||
self.check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
|
||||
self.should_drop_federated_event_callbacks: List[
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK
|
||||
] = []
|
||||
self.user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = []
|
||||
self.user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
|
||||
self.user_may_send_3pid_invite_callbacks: List[
|
||||
USER_MAY_SEND_3PID_INVITE_CALLBACK
|
||||
] = []
|
||||
self.user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = []
|
||||
self.user_may_create_room_alias_callbacks: List[
|
||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
|
||||
] = []
|
||||
self.user_may_publish_room_callbacks: List[USER_MAY_PUBLISH_ROOM_CALLBACK] = []
|
||||
self.check_username_for_spam_callbacks: List[
|
||||
CHECK_USERNAME_FOR_SPAM_CALLBACK
|
||||
] = []
|
||||
self.check_registration_for_spam_callbacks: List[
|
||||
CHECK_REGISTRATION_FOR_SPAM_CALLBACK
|
||||
] = []
|
||||
self.check_media_file_for_spam_callbacks: List[
|
||||
CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK
|
||||
] = []
|
||||
|
||||
def register_callbacks(
|
||||
self,
|
||||
check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
|
||||
should_drop_federated_event: Optional[
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK
|
||||
] = None,
|
||||
user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None,
|
||||
user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
|
||||
user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None,
|
||||
user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None,
|
||||
user_may_create_room_alias: Optional[
|
||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
|
||||
] = None,
|
||||
user_may_publish_room: Optional[USER_MAY_PUBLISH_ROOM_CALLBACK] = None,
|
||||
check_username_for_spam: Optional[CHECK_USERNAME_FOR_SPAM_CALLBACK] = None,
|
||||
check_registration_for_spam: Optional[
|
||||
CHECK_REGISTRATION_FOR_SPAM_CALLBACK
|
||||
] = None,
|
||||
check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None,
|
||||
) -> None:
|
||||
"""Register callbacks from module for each hook."""
|
||||
if check_event_for_spam is not None:
|
||||
self.check_event_for_spam_callbacks.append(check_event_for_spam)
|
||||
|
||||
if should_drop_federated_event is not None:
|
||||
self.should_drop_federated_event_callbacks.append(
|
||||
should_drop_federated_event
|
||||
)
|
||||
|
||||
if user_may_join_room is not None:
|
||||
self.user_may_join_room_callbacks.append(user_may_join_room)
|
||||
|
||||
if user_may_invite is not None:
|
||||
self.user_may_invite_callbacks.append(user_may_invite)
|
||||
|
||||
if user_may_send_3pid_invite is not None:
|
||||
self.user_may_send_3pid_invite_callbacks.append(
|
||||
user_may_send_3pid_invite,
|
||||
)
|
||||
|
||||
if user_may_create_room is not None:
|
||||
self.user_may_create_room_callbacks.append(user_may_create_room)
|
||||
|
||||
if user_may_create_room_alias is not None:
|
||||
self.user_may_create_room_alias_callbacks.append(
|
||||
user_may_create_room_alias,
|
||||
)
|
||||
|
||||
if user_may_publish_room is not None:
|
||||
self.user_may_publish_room_callbacks.append(user_may_publish_room)
|
||||
|
||||
if check_username_for_spam is not None:
|
||||
self.check_username_for_spam_callbacks.append(check_username_for_spam)
|
||||
|
||||
if check_registration_for_spam is not None:
|
||||
self.check_registration_for_spam_callbacks.append(
|
||||
check_registration_for_spam,
|
||||
)
|
||||
|
||||
if check_media_file_for_spam is not None:
|
||||
self.check_media_file_for_spam_callbacks.append(check_media_file_for_spam)
|
||||
@@ -0,0 +1,238 @@
|
||||
# Copyright 2019, 2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.events import EventBase
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import Requester, StateMap
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CHECK_EVENT_ALLOWED_CALLBACK = Callable[
|
||||
[EventBase, StateMap[EventBase]], Awaitable[Tuple[bool, Optional[dict]]]
|
||||
]
|
||||
ON_CREATE_ROOM_CALLBACK = Callable[[Requester, dict, bool], Awaitable]
|
||||
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK = Callable[
|
||||
[str, str, StateMap[EventBase]], Awaitable[bool]
|
||||
]
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK = Callable[
|
||||
[str, StateMap[EventBase], str], Awaitable[bool]
|
||||
]
|
||||
ON_NEW_EVENT_CALLBACK = Callable[[EventBase, StateMap[EventBase]], Awaitable]
|
||||
CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]]
|
||||
CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]]
|
||||
ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable]
|
||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable]
|
||||
ON_THREEPID_BIND_CALLBACK = Callable[[str, str, str], Awaitable]
|
||||
ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable]
|
||||
ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable]
|
||||
|
||||
|
||||
def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
|
||||
"""Wrapper that loads a third party event rules module configured using the old
|
||||
configuration, and registers the hooks they implement.
|
||||
"""
|
||||
if hs.config.thirdpartyrules.third_party_event_rules is None:
|
||||
return
|
||||
|
||||
module, config = hs.config.thirdpartyrules.third_party_event_rules
|
||||
|
||||
api = hs.get_module_api()
|
||||
third_party_rules = module(config=config, module_api=api)
|
||||
|
||||
# The known hooks. If a module implements a method which name appears in this set,
|
||||
# we'll want to register it.
|
||||
third_party_event_rules_methods = {
|
||||
"check_event_allowed",
|
||||
"on_create_room",
|
||||
"check_threepid_can_be_invited",
|
||||
"check_visibility_can_be_modified",
|
||||
}
|
||||
|
||||
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
|
||||
# f might be None if the callback isn't implemented by the module. In this
|
||||
# case we don't want to register a callback at all so we return None.
|
||||
if f is None:
|
||||
return None
|
||||
|
||||
# We return a separate wrapper for these methods because, in order to wrap them
|
||||
# correctly, we need to await its result. Therefore it doesn't make a lot of
|
||||
# sense to make it go through the run() wrapper.
|
||||
if f.__name__ == "check_event_allowed":
|
||||
# We need to wrap check_event_allowed because its old form would return either
|
||||
# a boolean or a dict, but now we want to return the dict separately from the
|
||||
# boolean.
|
||||
async def wrap_check_event_allowed(
|
||||
event: EventBase,
|
||||
state_events: StateMap[EventBase],
|
||||
) -> Tuple[bool, Optional[dict]]:
|
||||
# Assertion required because mypy can't prove we won't change
|
||||
# `f` back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
res = await f(event, state_events)
|
||||
if isinstance(res, dict):
|
||||
return True, res
|
||||
else:
|
||||
return res, None
|
||||
|
||||
return wrap_check_event_allowed
|
||||
|
||||
if f.__name__ == "on_create_room":
|
||||
# We need to wrap on_create_room because its old form would return a boolean
|
||||
# if the room creation is denied, but now we just want it to raise an
|
||||
# exception.
|
||||
async def wrap_on_create_room(
|
||||
requester: Requester, config: dict, is_requester_admin: bool
|
||||
) -> None:
|
||||
# Assertion required because mypy can't prove we won't change
|
||||
# `f` back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
res = await f(requester, config, is_requester_admin)
|
||||
if res is False:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Room creation forbidden with these parameters",
|
||||
)
|
||||
|
||||
return wrap_on_create_room
|
||||
|
||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
return maybe_awaitable(f(*args, **kwargs))
|
||||
|
||||
return run
|
||||
|
||||
# Register the hooks through the module API.
|
||||
hooks = {
|
||||
hook: async_wrapper(getattr(third_party_rules, hook, None))
|
||||
for hook in third_party_event_rules_methods
|
||||
}
|
||||
|
||||
api.register_third_party_rules_callbacks(**hooks)
|
||||
|
||||
|
||||
class ThirdPartyEventRulesModuleApiCallbacks:
|
||||
def __init__(self) -> None:
|
||||
self.check_event_allowed_callbacks: List[CHECK_EVENT_ALLOWED_CALLBACK] = []
|
||||
self.on_create_room_callbacks: List[ON_CREATE_ROOM_CALLBACK] = []
|
||||
self.check_threepid_can_be_invited_callbacks: List[
|
||||
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK
|
||||
] = []
|
||||
self.check_visibility_can_be_modified_callbacks: List[
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
||||
] = []
|
||||
self.on_new_event_callbacks: List[ON_NEW_EVENT_CALLBACK] = []
|
||||
self.check_can_shutdown_room_callbacks: List[
|
||||
CHECK_CAN_SHUTDOWN_ROOM_CALLBACK
|
||||
] = []
|
||||
self.check_can_deactivate_user_callbacks: List[
|
||||
CHECK_CAN_DEACTIVATE_USER_CALLBACK
|
||||
] = []
|
||||
self.on_profile_update_callbacks: List[ON_PROFILE_UPDATE_CALLBACK] = []
|
||||
self.on_user_deactivation_status_changed_callbacks: List[
|
||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
|
||||
] = []
|
||||
self.on_threepid_bind_callbacks: List[ON_THREEPID_BIND_CALLBACK] = []
|
||||
self.on_add_user_third_party_identifier_callbacks: List[
|
||||
ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
|
||||
] = []
|
||||
self.on_remove_user_third_party_identifier_callbacks: List[
|
||||
ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
|
||||
] = []
|
||||
|
||||
def register_callbacks(
|
||||
self,
|
||||
check_event_allowed: Optional[CHECK_EVENT_ALLOWED_CALLBACK] = None,
|
||||
on_create_room: Optional[ON_CREATE_ROOM_CALLBACK] = None,
|
||||
check_threepid_can_be_invited: Optional[
|
||||
CHECK_THREEPID_CAN_BE_INVITED_CALLBACK
|
||||
] = None,
|
||||
check_visibility_can_be_modified: Optional[
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
||||
] = None,
|
||||
on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None,
|
||||
check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None,
|
||||
check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None,
|
||||
on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None,
|
||||
on_user_deactivation_status_changed: Optional[
|
||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
|
||||
] = None,
|
||||
on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None,
|
||||
on_add_user_third_party_identifier: Optional[
|
||||
ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
|
||||
] = None,
|
||||
on_remove_user_third_party_identifier: Optional[
|
||||
ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
|
||||
] = None,
|
||||
) -> None:
|
||||
"""Register callbacks from modules for each hook."""
|
||||
if check_event_allowed is not None:
|
||||
self.check_event_allowed_callbacks.append(check_event_allowed)
|
||||
|
||||
if on_create_room is not None:
|
||||
self.on_create_room_callbacks.append(on_create_room)
|
||||
|
||||
if check_threepid_can_be_invited is not None:
|
||||
self.check_threepid_can_be_invited_callbacks.append(
|
||||
check_threepid_can_be_invited,
|
||||
)
|
||||
|
||||
if check_visibility_can_be_modified is not None:
|
||||
self.check_visibility_can_be_modified_callbacks.append(
|
||||
check_visibility_can_be_modified,
|
||||
)
|
||||
|
||||
if on_new_event is not None:
|
||||
self.on_new_event_callbacks.append(on_new_event)
|
||||
|
||||
if check_can_shutdown_room is not None:
|
||||
self.check_can_shutdown_room_callbacks.append(check_can_shutdown_room)
|
||||
|
||||
if check_can_deactivate_user is not None:
|
||||
self.check_can_deactivate_user_callbacks.append(check_can_deactivate_user)
|
||||
|
||||
if on_profile_update is not None:
|
||||
self.on_profile_update_callbacks.append(on_profile_update)
|
||||
|
||||
if on_user_deactivation_status_changed is not None:
|
||||
self.on_user_deactivation_status_changed_callbacks.append(
|
||||
on_user_deactivation_status_changed,
|
||||
)
|
||||
|
||||
if on_threepid_bind is not None:
|
||||
self.on_threepid_bind_callbacks.append(on_threepid_bind)
|
||||
|
||||
if on_add_user_third_party_identifier is not None:
|
||||
self.on_add_user_third_party_identifier_callbacks.append(
|
||||
on_add_user_third_party_identifier
|
||||
)
|
||||
|
||||
if on_remove_user_third_party_identifier is not None:
|
||||
self.on_remove_user_third_party_identifier_callbacks.append(
|
||||
on_remove_user_third_party_identifier
|
||||
)
|
||||
@@ -103,7 +103,7 @@ class PusherConfig:
|
||||
|
||||
id: Optional[str]
|
||||
user_name: str
|
||||
|
||||
access_token: Optional[int]
|
||||
profile_tag: str
|
||||
kind: str
|
||||
app_id: str
|
||||
@@ -119,11 +119,6 @@ class PusherConfig:
|
||||
enabled: bool
|
||||
device_id: Optional[str]
|
||||
|
||||
# XXX(quenting): The access_token is not persisted anymore for new pushers, but we
|
||||
# keep it when reading from the database, so that we don't get stale pushers
|
||||
# while the "set_device_id_for_pushers" background update is running.
|
||||
access_token: Optional[int]
|
||||
|
||||
def as_dict(self) -> Dict[str, Any]:
|
||||
"""Information that can be retrieved about a pusher after creation."""
|
||||
return {
|
||||
|
||||
@@ -149,7 +149,7 @@ class Mailer:
|
||||
await self.send_email(
|
||||
email_address,
|
||||
self.email_subjects.password_reset
|
||||
% {"server_name": self.hs.config.server.server_name, "app": self.app_name},
|
||||
% {"server_name": self.hs.config.server.server_name},
|
||||
template_vars,
|
||||
)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user