mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
199 Commits
anoa/docke
...
devon/lock
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
126f0c3587 | ||
|
|
e519ee230b | ||
|
|
ac1bf682ff | ||
|
|
a0b70473fc | ||
|
|
95a85b1129 | ||
|
|
3d8535b1de | ||
|
|
628351b98d | ||
|
|
8f27b3af07 | ||
|
|
579f4ac1cd | ||
|
|
c53999dab8 | ||
|
|
b41a9ebb38 | ||
|
|
6ec5e13ec9 | ||
|
|
148e93576e | ||
|
|
56ed412839 | ||
|
|
9c5d08fff8 | ||
|
|
90a6bd01c2 | ||
|
|
aa07a01452 | ||
|
|
8364c01a2b | ||
|
|
e27808f306 | ||
|
|
048c1ac7f6 | ||
|
|
ca290d325c | ||
|
|
0a31cf18cd | ||
|
|
48db0c2d6c | ||
|
|
24c4d82aeb | ||
|
|
3fda8d3b67 | ||
|
|
5f15a549d7 | ||
|
|
6cefbc6852 | ||
|
|
fd3ec6435e | ||
|
|
39bd6e2c16 | ||
|
|
5c736cd2af | ||
|
|
e70e8d132c | ||
|
|
48334fbc40 | ||
|
|
b4fd694ce3 | ||
|
|
e2d757f62d | ||
|
|
aab3672037 | ||
|
|
d0677dca39 | ||
|
|
e34fd1228d | ||
|
|
beea39f000 | ||
|
|
fa320c4fcb | ||
|
|
22c2add9c0 | ||
|
|
60f596b4d8 | ||
|
|
1143e14479 | ||
|
|
c199ede287 | ||
|
|
9fb7333a7c | ||
|
|
a0a4a36891 | ||
|
|
49fcda31f6 | ||
|
|
b3ba501c52 | ||
|
|
6306de8e16 | ||
|
|
b5267678d2 | ||
|
|
ebc21a8c67 | ||
|
|
e5a53819fc | ||
|
|
66b24d3d00 | ||
|
|
2b59e738ee | ||
|
|
b1d030a107 | ||
|
|
7c2284b2f2 | ||
|
|
d69c00b5a1 | ||
|
|
2d23250da7 | ||
|
|
234d07eb09 | ||
|
|
bd9a1079bc | ||
|
|
3eb92369ca | ||
|
|
09f377fa52 | ||
|
|
f1b0f9a4ef | ||
|
|
f1ecf46647 | ||
|
|
57bf44941e | ||
|
|
3d60a58ad6 | ||
|
|
8208186e3c | ||
|
|
29d586311d | ||
|
|
512c9efcb3 | ||
|
|
35c361c0d9 | ||
|
|
95853c5f31 | ||
|
|
eb019c03c4 | ||
|
|
eedab12e6d | ||
|
|
483602efb2 | ||
|
|
ac429050bc | ||
|
|
daa783f16c | ||
|
|
6c4037dcf3 | ||
|
|
737f6c73f7 | ||
|
|
ed6edc17d0 | ||
|
|
5b0873516c | ||
|
|
5da7081197 | ||
|
|
5cf74c2da0 | ||
|
|
adce8a0111 | ||
|
|
790ce14e46 | ||
|
|
ecbc0b740c | ||
|
|
0db5d247f8 | ||
|
|
02d09e3f0c | ||
|
|
b90ad26ebc | ||
|
|
a00d0b3d0e | ||
|
|
45ca6392f4 | ||
|
|
05d58b86ac | ||
|
|
23b626f2e6 | ||
|
|
abf44ad324 | ||
|
|
657dd5151e | ||
|
|
6f689d452c | ||
|
|
650492ed4d | ||
|
|
b257c7ab19 | ||
|
|
fe3d88b833 | ||
|
|
b64a4e5fbb | ||
|
|
4b7154c585 | ||
|
|
d82e1ed357 | ||
|
|
4daa533e82 | ||
|
|
f3fd6852ac | ||
|
|
d648c8ce3f | ||
|
|
190c400a83 | ||
|
|
e5d3bfba30 | ||
|
|
9b2ae62d20 | ||
|
|
a89b697209 | ||
|
|
a82f5f206f | ||
|
|
6a909aade2 | ||
|
|
d80cd57c54 | ||
|
|
59ad4b18fc | ||
|
|
a58f09acc7 | ||
|
|
cee9da0da5 | ||
|
|
a9c4d1c8ac | ||
|
|
8c653e1dd6 | ||
|
|
cd7d90bd28 | ||
|
|
02aa7adf4c | ||
|
|
3943d2fde7 | ||
|
|
93cc955051 | ||
|
|
4587decd67 | ||
|
|
4c67d20af7 | ||
|
|
80e39fd834 | ||
|
|
573bdbc824 | ||
|
|
79c02cada0 | ||
|
|
81b080f7a2 | ||
|
|
84ec15c47e | ||
|
|
0202e5f210 | ||
|
|
f73edbe4d2 | ||
|
|
ec4d136965 | ||
|
|
ddd1d79d03 | ||
|
|
d0a474d312 | ||
|
|
8291aa8fd7 | ||
|
|
1092a35a2a | ||
|
|
c5e89f5fae | ||
|
|
e918f683d4 | ||
|
|
4efd1056ca | ||
|
|
0f32408c80 | ||
|
|
9d837daa8a | ||
|
|
d72843056b | ||
|
|
e80dad5fa9 | ||
|
|
97284689ea | ||
|
|
c812a79422 | ||
|
|
850ff14613 | ||
|
|
e0fdb862cb | ||
|
|
73dc05c993 | ||
|
|
bfb197c596 | ||
|
|
f387f47a6a | ||
|
|
a4c503674f | ||
|
|
2637b26cfe | ||
|
|
db59067e78 | ||
|
|
7feb07c3e9 | ||
|
|
54e0086abd | ||
|
|
9916932e98 | ||
|
|
f4943b875b | ||
|
|
92fcca8ed7 | ||
|
|
c486ec8bc2 | ||
|
|
20fc9fcc33 | ||
|
|
2f41f6d947 | ||
|
|
f377cee7ec | ||
|
|
cacd4fd7bd | ||
|
|
c7a1d0aa1a | ||
|
|
c92639df21 | ||
|
|
d0fc1e904a | ||
|
|
77eafd47df | ||
|
|
2a321bac35 | ||
|
|
eda735e4bb | ||
|
|
e1f5da65e1 | ||
|
|
a4438c9bc1 | ||
|
|
9266ba72b5 | ||
|
|
61aadb158f | ||
|
|
75698a3e53 | ||
|
|
46bd7e136d | ||
|
|
eac170b21b | ||
|
|
211c31dbd7 | ||
|
|
361bdafb87 | ||
|
|
1c2b18a704 | ||
|
|
2c9ed5e510 | ||
|
|
9c0a3963bc | ||
|
|
0932c77539 | ||
|
|
5580a820ae | ||
|
|
541a009564 | ||
|
|
b5493899c5 | ||
|
|
da7d71e2a2 | ||
|
|
c705beebf7 | ||
|
|
47fe6df013 | ||
|
|
034d472688 | ||
|
|
0c429fae1d | ||
|
|
2e5fe3f187 | ||
|
|
af59a99933 | ||
|
|
7987d5e638 | ||
|
|
3ae80b0de4 | ||
|
|
5c781b578d | ||
|
|
418fbba8de | ||
|
|
6d65c3944b | ||
|
|
330f170c0e | ||
|
|
bf03361c86 | ||
|
|
3e750ab0d8 | ||
|
|
9cd3545bca | ||
|
|
83513b75f7 |
@@ -36,11 +36,11 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
||||
# First calculate the various trial jobs.
|
||||
#
|
||||
# For PRs, we only run each type of test with the oldest Python version supported (which
|
||||
# is Python 3.8 right now)
|
||||
# is Python 3.9 right now)
|
||||
|
||||
trial_sqlite_tests = [
|
||||
{
|
||||
"python-version": "3.8",
|
||||
"python-version": "3.9",
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
@@ -53,14 +53,14 @@ if not IS_PR:
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.9", "3.10", "3.11", "3.12", "3.13")
|
||||
for version in ("3.10", "3.11", "3.12", "3.13")
|
||||
)
|
||||
|
||||
trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.8",
|
||||
"python-version": "3.9",
|
||||
"database": "postgres",
|
||||
"postgres-version": "11",
|
||||
"postgres-version": "13",
|
||||
"extras": "all",
|
||||
}
|
||||
]
|
||||
@@ -77,7 +77,7 @@ if not IS_PR:
|
||||
|
||||
trial_no_extra_tests = [
|
||||
{
|
||||
"python-version": "3.8",
|
||||
"python-version": "3.9",
|
||||
"database": "sqlite",
|
||||
"extras": "",
|
||||
}
|
||||
@@ -99,24 +99,24 @@ set_output("trial_test_matrix", test_matrix)
|
||||
|
||||
# First calculate the various sytest jobs.
|
||||
#
|
||||
# For each type of test we only run on focal on PRs
|
||||
# For each type of test we only run on bullseye on PRs
|
||||
|
||||
|
||||
sytest_tests = [
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
"reactor": "asyncio",
|
||||
@@ -127,11 +127,11 @@ if not IS_PR:
|
||||
sytest_tests.extend(
|
||||
[
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"reactor": "asyncio",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"sytest-tag": "bullseye",
|
||||
"postgres": "postgres",
|
||||
"reactor": "asyncio",
|
||||
},
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# this script is run by GitHub Actions in a plain `focal` container; it
|
||||
# this script is run by GitHub Actions in a plain `jammy` container; it
|
||||
# - installs the minimal system requirements, and poetry;
|
||||
# - patches the project definition file to refer to old versions only;
|
||||
# - creates a venv with these old versions using poetry; and finally
|
||||
|
||||
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -14,7 +14,7 @@ permissions:
|
||||
id-token: write # needed for signing the images with GitHub OIDC Token
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
|
||||
2
.github/workflows/docs-pr-netlify.yaml
vendored
2
.github/workflows/docs-pr-netlify.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@bf251b5aa9c2f7eeb574a96ee720e24f801b7c11 # v6
|
||||
uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
|
||||
4
.github/workflows/latest_deps.yml
vendored
4
.github/workflows/latest_deps.yml
vendored
@@ -132,9 +132,9 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sytest-tag: focal
|
||||
- sytest-tag: bullseye
|
||||
|
||||
- sytest-tag: focal
|
||||
- sytest-tag: bullseye
|
||||
postgres: postgres
|
||||
workers: workers
|
||||
redis: redis
|
||||
|
||||
41
.github/workflows/release-artifacts.yml
vendored
41
.github/workflows/release-artifacts.yml
vendored
@@ -91,10 +91,19 @@ jobs:
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
- name: Artifact name
|
||||
id: artifact-name
|
||||
# We can't have colons in the upload name of the artifact, so we convert
|
||||
# e.g. `debian:sid` to `sid`.
|
||||
env:
|
||||
DISTRO: ${{ matrix.distro }}
|
||||
run: |
|
||||
echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Upload debs as artifacts
|
||||
uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: debs
|
||||
name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
|
||||
path: debs/*
|
||||
|
||||
build-wheels:
|
||||
@@ -102,7 +111,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-12]
|
||||
os: [ubuntu-22.04, macos-13]
|
||||
arch: [x86_64, aarch64]
|
||||
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
||||
# It is not read by the rest of the workflow.
|
||||
@@ -112,9 +121,9 @@ jobs:
|
||||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-12"
|
||||
os: "macos-13"
|
||||
# Don't build aarch64 wheels on mac.
|
||||
- os: "macos-12"
|
||||
- os: "macos-13"
|
||||
arch: aarch64
|
||||
# Don't build aarch64 wheels on PR CI.
|
||||
- is_pr: true
|
||||
@@ -144,7 +153,7 @@ jobs:
|
||||
|
||||
- name: Only build a single wheel on PR
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
run: echo "CIBW_BUILD="cp39-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
@@ -156,9 +165,9 @@ jobs:
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI: true
|
||||
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
|
||||
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Wheel
|
||||
name: Wheel-${{ matrix.os }}-${{ matrix.arch }}
|
||||
path: ./wheelhouse/*.whl
|
||||
|
||||
build-sdist:
|
||||
@@ -177,7 +186,7 @@ jobs:
|
||||
- name: Build sdist
|
||||
run: python -m build --sdist
|
||||
|
||||
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Sdist
|
||||
path: dist/*.tar.gz
|
||||
@@ -194,17 +203,23 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all workflow run artifacts
|
||||
uses: actions/download-artifact@v3 # Don't upgrade to v4, it should match upload-artifact
|
||||
uses: actions/download-artifact@v4
|
||||
- name: Build a tarball for the debs
|
||||
run: tar -cvJf debs.tar.xz debs
|
||||
# We need to merge all the debs uploads into one folder, then compress
|
||||
# that.
|
||||
run: |
|
||||
mkdir debs
|
||||
mv debs*/* debs/
|
||||
tar -cvJf debs.tar.xz debs
|
||||
- name: Attach to release
|
||||
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
|
||||
# Pinned to work around https://github.com/softprops/action-gh-release/issues/445
|
||||
uses: softprops/action-gh-release@v0.1.15
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
files: |
|
||||
Sdist/*
|
||||
Wheel/*
|
||||
Wheel*/*
|
||||
debs.tar.xz
|
||||
# if it's not already published, keep the release as a draft.
|
||||
draft: true
|
||||
|
||||
16
.github/workflows/tests.yml
vendored
16
.github/workflows/tests.yml
vendored
@@ -397,7 +397,7 @@ jobs:
|
||||
needs:
|
||||
- linting-done
|
||||
- changes
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -409,12 +409,12 @@ jobs:
|
||||
# their build dependencies
|
||||
- run: |
|
||||
sudo apt-get -qq update
|
||||
sudo apt-get -qq install build-essential libffi-dev python-dev \
|
||||
sudo apt-get -qq install build-essential libffi-dev python3-dev \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.8'
|
||||
python-version: '3.9'
|
||||
|
||||
- name: Prepare old deps
|
||||
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
|
||||
@@ -458,7 +458,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["pypy-3.8"]
|
||||
python-version: ["pypy-3.9"]
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
@@ -580,11 +580,11 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.8"
|
||||
postgres-version: "11"
|
||||
- python-version: "3.9"
|
||||
postgres-version: "13"
|
||||
|
||||
- python-version: "3.11"
|
||||
postgres-version: "15"
|
||||
- python-version: "3.13"
|
||||
postgres-version: "17"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
|
||||
4
.github/workflows/twisted_trunk.yml
vendored
4
.github/workflows/twisted_trunk.yml
vendored
@@ -99,11 +99,11 @@ jobs:
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
|
||||
# We're using debian:bullseye because it uses Python 3.9 which is our minimum supported Python version.
|
||||
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
|
||||
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
|
||||
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
|
||||
image: matrixdotorg/sytest-synapse:focal
|
||||
image: matrixdotorg/sytest-synapse:bullseye
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
|
||||
3571
CHANGES.md
3571
CHANGES.md
File diff suppressed because it is too large
Load Diff
205
Cargo.lock
generated
205
Cargo.lock
generated
@@ -13,9 +13,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.90"
|
||||
version = "1.0.95"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37bf3594c4c988a53154954629820791dde498571819ae4ca50ca811e060cc95"
|
||||
checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -35,12 +35,6 @@ version = "0.21.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
version = "0.10.6"
|
||||
@@ -67,9 +61,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "1.7.2"
|
||||
version = "1.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3"
|
||||
checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
@@ -130,10 +124,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"js-sys",
|
||||
"libc",
|
||||
"wasi",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -162,9 +154,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.4.1"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
|
||||
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
||||
|
||||
[[package]]
|
||||
name = "hex"
|
||||
@@ -174,9 +166,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "1.1.0"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258"
|
||||
checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fnv",
|
||||
@@ -222,21 +214,11 @@ version = "0.2.154"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.22"
|
||||
version = "0.4.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
@@ -265,29 +247,6 @@ version = "1.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.12.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb"
|
||||
dependencies = [
|
||||
"lock_api",
|
||||
"parking_lot_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.9.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"smallvec",
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "portable-atomic"
|
||||
version = "1.6.0"
|
||||
@@ -302,25 +261,25 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.82"
|
||||
version = "1.0.89"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b"
|
||||
checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyo3"
|
||||
version = "0.21.2"
|
||||
version = "0.23.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a5e00b96a521718e08e03b1a622f01c8a8deb50719335de3f60b3b3950f069d8"
|
||||
checksum = "57fe09249128b3173d092de9523eaa75136bf7ba85e0d69eca241c7939c933cc"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
"indoc",
|
||||
"libc",
|
||||
"memoffset",
|
||||
"parking_lot",
|
||||
"once_cell",
|
||||
"portable-atomic",
|
||||
"pyo3-build-config",
|
||||
"pyo3-ffi",
|
||||
@@ -330,9 +289,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-build-config"
|
||||
version = "0.21.2"
|
||||
version = "0.23.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7883df5835fafdad87c0d888b266c8ec0f4c9ca48a5bed6bbb592e8dedee1b50"
|
||||
checksum = "1cd3927b5a78757a0d71aa9dff669f903b1eb64b54142a9bd9f757f8fde65fd7"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"target-lexicon",
|
||||
@@ -340,9 +299,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-ffi"
|
||||
version = "0.21.2"
|
||||
version = "0.23.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "01be5843dc60b916ab4dad1dca6d20b9b4e6ddc8e15f50c47fe6d85f1fb97403"
|
||||
checksum = "dab6bb2102bd8f991e7749f130a70d05dd557613e39ed2deeee8e9ca0c4d548d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"pyo3-build-config",
|
||||
@@ -350,9 +309,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.10.0"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2af49834b8d2ecd555177e63b273b708dea75150abc6f5341d0a6e1a9623976c"
|
||||
checksum = "3eb421dc86d38d08e04b927b02424db480be71b777fa3a56f32e2f2a3a1a3b08"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
@@ -361,9 +320,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros"
|
||||
version = "0.21.2"
|
||||
version = "0.23.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77b34069fc0682e11b31dbd10321cbf94808394c56fd996796ce45217dfac53c"
|
||||
checksum = "91871864b353fd5ffcb3f91f2f703a22a9797c91b9ab497b1acac7b07ae509c7"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"pyo3-macros-backend",
|
||||
@@ -373,9 +332,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros-backend"
|
||||
version = "0.21.2"
|
||||
version = "0.23.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08260721f32db5e1a5beae69a55553f56b99bd0e1c3e6e0a5e8851a9d0f5a85c"
|
||||
checksum = "43abc3b80bc20f3facd86cd3c60beed58c3e2aa26213f3cda368de39c60a27e4"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
@@ -386,9 +345,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pythonize"
|
||||
version = "0.21.1"
|
||||
version = "0.23.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d0664248812c38cc55a4ed07f88e4df516ce82604b93b1ffdc041aa77a6cb3c"
|
||||
checksum = "91a6ee7a084f913f98d70cdc3ebec07e852b735ae3059a1500db2661265da9ff"
|
||||
dependencies = [
|
||||
"pyo3",
|
||||
"serde",
|
||||
@@ -433,20 +392,11 @@ dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.11.0"
|
||||
version = "1.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8"
|
||||
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -477,26 +427,20 @@ version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.210"
|
||||
version = "1.0.217"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a"
|
||||
checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.210"
|
||||
version = "1.0.217"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f"
|
||||
checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -505,9 +449,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.132"
|
||||
version = "1.0.137"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03"
|
||||
checksum = "930cfb6e6abf99298aaad7d29abbef7a9999a9a8806a40088f55f0dcec03146b"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
@@ -537,12 +481,6 @@ dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.13.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
|
||||
|
||||
[[package]]
|
||||
name = "subtle"
|
||||
version = "2.5.0"
|
||||
@@ -551,9 +489,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.61"
|
||||
version = "2.0.85"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9"
|
||||
checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -598,11 +536,10 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
|
||||
|
||||
[[package]]
|
||||
name = "ulid"
|
||||
version = "1.1.3"
|
||||
version = "1.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04f903f293d11f31c0c29e4148f6dc0d033a7f80cebc0282bea147611667d289"
|
||||
checksum = "f294bff79170ed1c5633812aff1e565c35d993a36e757f9bc0accf5eec4e6045"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"rand",
|
||||
"web-time",
|
||||
]
|
||||
@@ -694,67 +631,3 @@ dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0"
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
# A build script for poetry that adds the rust extension.
|
||||
|
||||
import itertools
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
from packaging.specifiers import SpecifierSet
|
||||
from setuptools_rust import Binding, RustExtension
|
||||
|
||||
|
||||
@@ -14,6 +16,8 @@ def build(setup_kwargs: Dict[str, Any]) -> None:
|
||||
target="synapse.synapse_rust",
|
||||
path=cargo_toml_path,
|
||||
binding=Binding.PyO3,
|
||||
# This flag is a no-op in the latest versions. Instead, we need to
|
||||
# specify this in the `bdist_wheel` config below.
|
||||
py_limited_api=True,
|
||||
# We force always building in release mode, as we can't tell the
|
||||
# difference between using `poetry` in development vs production.
|
||||
@@ -21,3 +25,18 @@ def build(setup_kwargs: Dict[str, Any]) -> None:
|
||||
)
|
||||
setup_kwargs.setdefault("rust_extensions", []).append(extension)
|
||||
setup_kwargs["zip_safe"] = False
|
||||
|
||||
# We lookup the minimum supported python version by looking at
|
||||
# `python_requires` (e.g. ">=3.9.0,<4.0.0") and finding the first python
|
||||
# version that matches. We then convert that into the `py_limited_api` form,
|
||||
# e.g. cp39 for python 3.9.
|
||||
py_limited_api: str
|
||||
python_bounds = SpecifierSet(setup_kwargs["python_requires"])
|
||||
for minor_version in itertools.count(start=8):
|
||||
if f"3.{minor_version}.0" in python_bounds:
|
||||
py_limited_api = f"cp3{minor_version}"
|
||||
break
|
||||
|
||||
setup_kwargs.setdefault("options", {}).setdefault("bdist_wheel", {})[
|
||||
"py_limited_api"
|
||||
] = py_limited_api
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Add a test for downloading and thumbnailing a CMYK JPEG.
|
||||
@@ -1 +0,0 @@
|
||||
Include the destination in the error of 'Destination mismatch' on federation requests.
|
||||
@@ -1,2 +0,0 @@
|
||||
Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in
|
||||
the config option `run_background_tasks_on`.
|
||||
@@ -1 +0,0 @@
|
||||
Fix detection when the built Rust library was outdated when using source installations.
|
||||
1
changelog.d/18000.bugfix
Normal file
1
changelog.d/18000.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Add rate limit `rc_presence.per_user`. This prevents load from excessive presence updates sent by clients via sync api. Also rate limit `/_matrix/client/v3/presence` as per the spec. Contributed by @rda0.
|
||||
1
changelog.d/18073.bugfix
Normal file
1
changelog.d/18073.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Deactivated users will no longer automatically accept an invite when `auto_accept_invites` is enabled.
|
||||
1
changelog.d/18075.bugfix
Normal file
1
changelog.d/18075.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix join being denied after being invited over federation. Also fixes other out-of-band membership transitions.
|
||||
2
changelog.d/18089.bugfix
Normal file
2
changelog.d/18089.bugfix
Normal file
@@ -0,0 +1,2 @@
|
||||
Updates contributed `docker-compose.yml` file to PostgreSQL v15, as v12 is no longer supported by Synapse.
|
||||
Contributed by @maxkratz.
|
||||
1
changelog.d/18109.misc
Normal file
1
changelog.d/18109.misc
Normal file
@@ -0,0 +1 @@
|
||||
Increase the length of the generated `nonce` parameter when perfoming OIDC logins to comply with the TI-Messenger spec.
|
||||
1
changelog.d/18112.bugfix
Normal file
1
changelog.d/18112.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Raise an error if someone is using an incorrect suffix in a config duration string.
|
||||
1
changelog.d/18119.bugfix
Normal file
1
changelog.d/18119.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug where the [Delete Room Admin API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#version-2-new-version) would fail if the `block` parameter was set to `true` and a worker other than the main process was configured to handle background tasks.
|
||||
1
changelog.d/18124.misc
Normal file
1
changelog.d/18124.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add log message when worker lock timeouts get large.
|
||||
@@ -245,7 +245,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
|
||||
if "flows" not in json_res:
|
||||
print("Failed to find any login flows.")
|
||||
defer.returnValue(False)
|
||||
return False
|
||||
|
||||
flow = json_res["flows"][0] # assume first is the one we want.
|
||||
if "type" not in flow or "m.login.password" != flow["type"] or "stages" in flow:
|
||||
@@ -254,8 +254,8 @@ class SynapseCmd(cmd.Cmd):
|
||||
"Unable to login via the command line client. Please visit "
|
||||
"%s to login." % fallback_url
|
||||
)
|
||||
defer.returnValue(False)
|
||||
defer.returnValue(True)
|
||||
return False
|
||||
return True
|
||||
|
||||
def do_emailrequest(self, line):
|
||||
"""Requests the association of a third party identifier
|
||||
|
||||
@@ -78,7 +78,7 @@ class TwistedHttpClient(HttpClient):
|
||||
url, data, headers_dict={"Content-Type": ["application/json"]}
|
||||
)
|
||||
body = yield readBody(response)
|
||||
defer.returnValue((response.code, body))
|
||||
return response.code, body
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_json(self, url, args=None):
|
||||
@@ -88,7 +88,7 @@ class TwistedHttpClient(HttpClient):
|
||||
url = "%s?%s" % (url, qs)
|
||||
response = yield self._create_get_request(url)
|
||||
body = yield readBody(response)
|
||||
defer.returnValue(json.loads(body))
|
||||
return json.loads(body)
|
||||
|
||||
def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None):
|
||||
"""Wrapper of _create_request to issue a PUT request"""
|
||||
@@ -134,7 +134,7 @@ class TwistedHttpClient(HttpClient):
|
||||
response = yield self._create_request(method, url)
|
||||
|
||||
body = yield readBody(response)
|
||||
defer.returnValue(json.loads(body))
|
||||
return json.loads(body)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_request(
|
||||
@@ -173,7 +173,7 @@ class TwistedHttpClient(HttpClient):
|
||||
if self.verbose:
|
||||
print("Status %s %s" % (response.code, response.phrase))
|
||||
print(pformat(list(response.headers.getAllRawHeaders())))
|
||||
defer.returnValue(response)
|
||||
return response
|
||||
|
||||
def sleep(self, seconds):
|
||||
d = defer.Deferred()
|
||||
|
||||
@@ -30,3 +30,6 @@ docker-compose up -d
|
||||
### More information
|
||||
|
||||
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)
|
||||
|
||||
**For a more comprehensive Docker Compose example showcasing a full Matrix 2.0 stack, please see
|
||||
https://github.com/element-hq/element-docker-demo**
|
||||
@@ -51,7 +51,7 @@ services:
|
||||
- traefik.http.routers.https-synapse.tls.certResolver=le-ssl
|
||||
|
||||
db:
|
||||
image: docker.io/postgres:12-alpine
|
||||
image: docker.io/postgres:15-alpine
|
||||
# Change that password, of course!
|
||||
environment:
|
||||
- POSTGRES_USER=synapse
|
||||
|
||||
@@ -8,6 +8,9 @@ All examples and snippets assume that your Synapse service is called `synapse` i
|
||||
|
||||
An example Docker Compose file can be found [here](docker-compose.yaml).
|
||||
|
||||
**For a more comprehensive Docker Compose example, showcasing a full Matrix 2.0 stack (originally based on this
|
||||
docker-compose.yaml), please see https://github.com/element-hq/element-docker-demo**
|
||||
|
||||
## Worker Service Examples in Docker Compose
|
||||
|
||||
In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`).
|
||||
|
||||
84
debian/changelog
vendored
84
debian/changelog
vendored
@@ -1,3 +1,87 @@
|
||||
matrix-synapse-py3 (1.123.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.123.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Jan 2025 08:37:34 -0700
|
||||
|
||||
matrix-synapse-py3 (1.123.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.123.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Jan 2025 14:39:57 +0100
|
||||
|
||||
matrix-synapse-py3 (1.122.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.122.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Jan 2025 14:14:14 +0000
|
||||
|
||||
matrix-synapse-py3 (1.122.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.122.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Jan 2025 14:06:19 +0000
|
||||
|
||||
matrix-synapse-py3 (1.121.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.121.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 11 Dec 2024 18:24:48 +0000
|
||||
|
||||
matrix-synapse-py3 (1.121.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.121.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 11 Dec 2024 13:12:30 +0100
|
||||
|
||||
matrix-synapse-py3 (1.121.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.121.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 04 Dec 2024 14:47:23 +0000
|
||||
|
||||
matrix-synapse-py3 (1.120.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.120.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Dec 2024 15:43:37 +0000
|
||||
|
||||
matrix-synapse-py3 (1.120.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.120.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Dec 2024 09:07:57 +0000
|
||||
|
||||
matrix-synapse-py3 (1.120.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.120.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Nov 2024 13:10:23 +0000
|
||||
|
||||
matrix-synapse-py3 (1.120.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.120.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Nov 2024 15:02:21 +0000
|
||||
|
||||
matrix-synapse-py3 (1.119.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.119.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 13 Nov 2024 13:57:51 +0000
|
||||
|
||||
matrix-synapse-py3 (1.119.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.119.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 11 Nov 2024 14:33:02 +0000
|
||||
|
||||
matrix-synapse-py3 (1.119.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.119.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 06 Nov 2024 08:59:43 -0700
|
||||
|
||||
matrix-synapse-py3 (1.118.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.118.0.
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
# `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in
|
||||
# in `poetry export` in the past.
|
||||
|
||||
ARG PYTHON_VERSION=3.11
|
||||
ARG PYTHON_VERSION=3.12
|
||||
|
||||
###
|
||||
### Stage 0: generate requirements.txt
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#}
|
||||
|
||||
## Server ##
|
||||
public_baseurl: http://127.0.0.1:8008/
|
||||
report_stats: False
|
||||
trusted_key_servers: []
|
||||
enable_registration: true
|
||||
@@ -84,6 +85,14 @@ rc_invites:
|
||||
per_user:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
per_issuer:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
|
||||
rc_presence:
|
||||
per_user:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
federation_rr_transactions_per_room_per_second: 9999
|
||||
|
||||
@@ -104,6 +113,16 @@ experimental_features:
|
||||
msc3967_enabled: true
|
||||
# Expose a room summary for public rooms
|
||||
msc3266_enabled: true
|
||||
# Send to-device messages to application services
|
||||
msc2409_to_device_messages_enabled: true
|
||||
# Allow application services to masquerade devices
|
||||
msc3202_device_masquerading: true
|
||||
# Sending device list changes, one-time key counts and fallback key usage to application services
|
||||
msc3202_transaction_extensions: true
|
||||
# Proxy OTK claim requests to exclusive ASes
|
||||
msc3983_appservice_otk_claims: true
|
||||
# Proxy key queries to exclusive ASes
|
||||
msc3984_appservice_key_query: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
|
||||
@@ -38,10 +38,13 @@ server {
|
||||
{% if using_unix_sockets %}
|
||||
proxy_pass http://unix:/run/main_public.sock;
|
||||
{% else %}
|
||||
# note: do not add a path (even a single /) after the port in `proxy_pass`,
|
||||
# otherwise nginx will canonicalise the URI and cause signature verification
|
||||
# errors.
|
||||
proxy_pass http://localhost:8080;
|
||||
{% endif %}
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Host $host:$server_port;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,6 +54,7 @@
|
||||
- [Using `synctl` with Workers](synctl_workers.md)
|
||||
- [Systemd](systemd-with-workers/README.md)
|
||||
- [Administration](usage/administration/README.md)
|
||||
- [Backups](usage/administration/backups.md)
|
||||
- [Admin API](usage/administration/admin_api/README.md)
|
||||
- [Account Validity](admin_api/account_validity.md)
|
||||
- [Background Updates](usage/administration/admin_api/background_updates.md)
|
||||
|
||||
@@ -60,10 +60,11 @@ paginate through.
|
||||
anything other than the return value of `next_token` from a previous call. Defaults to `0`.
|
||||
* `dir`: string - Direction of event report order. Whether to fetch the most recent
|
||||
first (`b`) or the oldest first (`f`). Defaults to `b`.
|
||||
* `user_id`: string - Is optional and filters to only return users with user IDs that
|
||||
contain this value. This is the user who reported the event and wrote the reason.
|
||||
* `room_id`: string - Is optional and filters to only return rooms with room IDs that
|
||||
contain this value.
|
||||
* `user_id`: optional string - Filter by the user ID of the reporter. This is the user who reported the event
|
||||
and wrote the reason.
|
||||
* `room_id`: optional string - Filter by room id.
|
||||
* `event_sender_user_id`: optional string - Filter by the sender of the reported event. This is the user who
|
||||
the report was made against.
|
||||
|
||||
**Response**
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ basis. The currently supported features are:
|
||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
|
||||
- [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222): adding `state_after` to sync v2
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
|
||||
@@ -385,6 +385,13 @@ The API is:
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/state
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following query parameter is available:
|
||||
|
||||
* `type` - The type of room state event to filter by, eg "m.room.create". If provided, only state events
|
||||
of this type will be returned (regardless of their `state_key` value).
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
|
||||
@@ -40,6 +40,7 @@ It returns a JSON body like the following:
|
||||
"erased": false,
|
||||
"shadow_banned": 0,
|
||||
"creation_ts": 1560432506,
|
||||
"last_seen_ts": 1732919539393,
|
||||
"appservice_id": null,
|
||||
"consent_server_notice_sent": null,
|
||||
"consent_version": null,
|
||||
@@ -55,7 +56,8 @@ It returns a JSON body like the following:
|
||||
}
|
||||
],
|
||||
"user_type": null,
|
||||
"locked": false
|
||||
"locked": false,
|
||||
"suspended": false
|
||||
}
|
||||
```
|
||||
|
||||
@@ -476,9 +478,9 @@ with a body of:
|
||||
}
|
||||
```
|
||||
|
||||
## List room memberships of a user
|
||||
## List joined rooms of a user
|
||||
|
||||
Gets a list of all `room_id` that a specific `user_id` is member.
|
||||
Gets a list of all `room_id` that a specific `user_id` is joined to and is a member of (participating in).
|
||||
|
||||
The API is:
|
||||
|
||||
@@ -515,6 +517,73 @@ The following fields are returned in the JSON response body:
|
||||
- `joined_rooms` - An array of `room_id`.
|
||||
- `total` - Number of rooms.
|
||||
|
||||
## Get the number of invites sent by the user
|
||||
|
||||
Fetches the number of invites sent by the provided user ID across all rooms
|
||||
after the given timestamp.
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/users/$user_id/sent_invite_count
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
* `user_id`: fully qualified: for example, `@user:server.com`
|
||||
|
||||
The following should be set as query parameters in the URL:
|
||||
|
||||
* `from_ts`: int, required. A timestamp in ms from the unix epoch. Only
|
||||
invites sent at or after the provided timestamp will be returned.
|
||||
This works by comparing the provided timestamp to the `received_ts`
|
||||
column in the `events` table.
|
||||
Note: https://currentmillis.com/ is a useful tool for converting dates
|
||||
into timestamps and vice versa.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"invite_count": 30
|
||||
}
|
||||
```
|
||||
|
||||
_Added in Synapse 1.122.0_
|
||||
|
||||
## Get the cumulative number of rooms a user has joined after a given timestamp
|
||||
|
||||
Fetches the number of rooms that the user joined after the given timestamp, even
|
||||
if they have subsequently left/been banned from those rooms.
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/users/$<user_id/cumulative_joined_room_count
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
* `user_id`: fully qualified: for example, `@user:server.com`
|
||||
|
||||
The following should be set as query parameters in the URL:
|
||||
|
||||
* `from_ts`: int, required. A timestamp in ms from the unix epoch. Only
|
||||
invites sent at or after the provided timestamp will be returned.
|
||||
This works by comparing the provided timestamp to the `received_ts`
|
||||
column in the `events` table.
|
||||
Note: https://currentmillis.com/ is a useful tool for converting dates
|
||||
into timestamps and vice versa.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"cumulative_joined_room_count": 30
|
||||
}
|
||||
```
|
||||
_Added in Synapse 1.122.0_
|
||||
|
||||
## Account Data
|
||||
Gets information about account data for a specific `user_id`.
|
||||
|
||||
@@ -1443,4 +1512,6 @@ The following fields are returned in the JSON response body:
|
||||
- `failed_redactions` - dictionary - the keys of the dict are event ids the process was unable to redact, if any, and the values are
|
||||
the corresponding error that caused the redaction to fail
|
||||
|
||||
_Added in Synapse 1.116.0._
|
||||
_Added in Synapse 1.116.0._
|
||||
|
||||
|
||||
|
||||
2202
docs/changelogs/CHANGES-2023.md
Normal file
2202
docs/changelogs/CHANGES-2023.md
Normal file
File diff suppressed because it is too large
Load Diff
1586
docs/changelogs/CHANGES-2024.md
Normal file
1586
docs/changelogs/CHANGES-2024.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common
|
||||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:bullseye
|
||||
```
|
||||
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
|
||||
|
||||
|
||||
@@ -245,7 +245,7 @@ this callback.
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def check_username_for_spam(user_profile: synapse.module_api.UserProfile) -> bool
|
||||
async def check_username_for_spam(user_profile: synapse.module_api.UserProfile, requester_id: str) -> bool
|
||||
```
|
||||
|
||||
Called when computing search results in the user directory. The module must return a
|
||||
@@ -264,6 +264,8 @@ The profile is represented as a dictionary with the following keys:
|
||||
The module is given a copy of the original dictionary, so modifying it from within the
|
||||
module cannot modify a user's profile when included in user directory search results.
|
||||
|
||||
The requester_id parameter is the ID of the user that called the user directory API.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `False`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
|
||||
@@ -336,6 +336,36 @@ but it has a `response_types_supported` which excludes "code" (which we rely on,
|
||||
is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)),
|
||||
so we have to disable discovery and configure the URIs manually.
|
||||
|
||||
### Forgejo
|
||||
|
||||
Forgejo is a fork of Gitea that can act as an OAuth2 provider.
|
||||
|
||||
The implementation of OAuth2 is improved compared to Gitea, as it provides a correctly defined `subject_claim` and `scopes`.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: forgejo
|
||||
idp_name: Forgejo
|
||||
discover: false
|
||||
issuer: "https://your-forgejo.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: client_secret_post
|
||||
scopes: ["openid", "profile", "email", "groups"]
|
||||
authorization_endpoint: "https://your-forgejo.com/login/oauth/authorize"
|
||||
token_endpoint: "https://your-forgejo.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://your-forgejo.com/api/v1/user"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "sub"
|
||||
picture_claim: "picture"
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
email_template: "{{ user.email }}"
|
||||
```
|
||||
|
||||
### GitHub
|
||||
|
||||
[GitHub][github-idp] is a bit special as it is not an OpenID Connect compliant provider, but
|
||||
|
||||
@@ -100,6 +100,10 @@ database:
|
||||
keepalives_count: 3
|
||||
```
|
||||
|
||||
## Backups
|
||||
|
||||
Don't forget to [back up](./usage/administration/backups.md#database) your database!
|
||||
|
||||
## Tuning Postgres
|
||||
|
||||
The default settings should be fine for most deployments. For larger
|
||||
|
||||
@@ -74,7 +74,7 @@ server {
|
||||
proxy_pass http://localhost:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Host $host:$server_port;
|
||||
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
|
||||
@@ -157,7 +157,7 @@ sudo pip install py-bcrypt
|
||||
|
||||
#### Alpine Linux
|
||||
|
||||
6543 maintains [Synapse packages for Alpine Linux](https://pkgs.alpinelinux.org/packages?name=synapse&branch=edge) in the community repository. Install with:
|
||||
Jahway603 maintains [Synapse packages for Alpine Linux](https://pkgs.alpinelinux.org/packages?name=synapse&branch=edge) in the community repository. Install with:
|
||||
|
||||
```sh
|
||||
sudo apk add synapse
|
||||
@@ -208,7 +208,7 @@ When following this route please make sure that the [Platform-specific prerequis
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.8 or later, up to Python 3.11.
|
||||
- Python 3.9 or later, up to Python 3.13.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
If building on an uncommon architecture for which pre-built wheels are
|
||||
@@ -656,6 +656,10 @@ This also requires the optional `lxml` python dependency to be installed. This
|
||||
in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
|
||||
means `apt-get install libxml2-dev`, or equivalent for your OS.
|
||||
|
||||
### Backups
|
||||
|
||||
Don't forget to take [backups](../usage/administration/backups.md) of your new server!
|
||||
|
||||
### Troubleshooting Installation
|
||||
|
||||
`pip` seems to leak *lots* of memory during installation. For instance, a Linux
|
||||
|
||||
@@ -72,8 +72,8 @@ class ExampleSpamChecker:
|
||||
async def user_may_publish_room(self, userid, room_id):
|
||||
return True # allow publishing of all rooms
|
||||
|
||||
async def check_username_for_spam(self, user_profile):
|
||||
return False # allow all usernames
|
||||
async def check_username_for_spam(self, user_profile, requester_id):
|
||||
return False # allow all usernames regardless of requester
|
||||
|
||||
async def check_registration_for_spam(
|
||||
self,
|
||||
|
||||
@@ -117,6 +117,59 @@ each upgrade are complete before moving on to the next upgrade, to avoid
|
||||
stacking them up. You can monitor the currently running background updates with
|
||||
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||
|
||||
# Upgrading to v1.122.0
|
||||
|
||||
## Dropping support for PostgreSQL 11 and 12
|
||||
|
||||
In line with our [deprecation policy](deprecation_policy.md), we've dropped
|
||||
support for PostgreSQL 11 and 12, as they are no longer supported upstream.
|
||||
This release of Synapse requires PostgreSQL 13+.
|
||||
|
||||
# Upgrading to v1.120.0
|
||||
|
||||
## Removal of experimental MSC3886 feature
|
||||
|
||||
[MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886)
|
||||
has been closed (and will not enter the Matrix spec). As such, we are
|
||||
removing the experimental support for it in this release.
|
||||
|
||||
The `experimental_features.msc3886_endpoint` configuration option has
|
||||
been removed.
|
||||
|
||||
## Authenticated media is now enforced by default
|
||||
|
||||
The [`enable_authenticated_media`] configuration option now defaults to true.
|
||||
|
||||
This means that clients and remote (federated) homeservers now need to use
|
||||
the authenticated media endpoints in order to download media from your
|
||||
homeserver.
|
||||
|
||||
As an exception, existing media that was stored on the server prior to
|
||||
this option changing to `true` will still be accessible over the
|
||||
unauthenticated endpoints.
|
||||
|
||||
The matrix.org homeserver has already been running with this option enabled
|
||||
since September 2024, so most common clients and homeservers should already
|
||||
be compatible.
|
||||
|
||||
With that said, administrators who wish to disable this feature for broader
|
||||
compatibility can still do so by manually configuring
|
||||
`enable_authenticated_media: False`.
|
||||
|
||||
[`enable_authenticated_media`]: usage/configuration/config_documentation.md#enable_authenticated_media
|
||||
|
||||
|
||||
# Upgrading to v1.119.0
|
||||
|
||||
## Minimum supported Python version
|
||||
|
||||
The minimum supported Python version has been increased from v3.8 to v3.9.
|
||||
You will need Python 3.9+ to run Synapse v1.119.0 (due out Nov 7th, 2024).
|
||||
|
||||
If you use current versions of the Matrix.org-distributed Docker images, no action is required.
|
||||
Please note that support for Ubuntu `focal` was dropped as well since it uses Python 3.8.
|
||||
|
||||
|
||||
# Upgrading to v1.111.0
|
||||
|
||||
## New worker endpoints for authenticated client and federation media
|
||||
|
||||
125
docs/usage/administration/backups.md
Normal file
125
docs/usage/administration/backups.md
Normal file
@@ -0,0 +1,125 @@
|
||||
# How to back up a Synapse homeserver
|
||||
|
||||
It is critical to maintain good backups of your server, to guard against
|
||||
hardware failure as well as potential corruption due to bugs or administrator
|
||||
error.
|
||||
|
||||
This page documents the things you will need to consider backing up as part of
|
||||
a Synapse installation.
|
||||
|
||||
## Configuration files
|
||||
|
||||
Keep a copy of your configuration file (`homeserver.yaml`), as well as any
|
||||
auxiliary config files it refers to such as the
|
||||
[`log_config`](../configuration/config_documentation.md#log_config) file,
|
||||
[`app_service_config_files`](../configuration/config_documentation.md#app_service_config_files).
|
||||
Often, all such config files will be kept in a single directory such as
|
||||
`/etc/synapse`, which will make this easier.
|
||||
|
||||
## Server signing key
|
||||
|
||||
Your server has a [signing
|
||||
key](../configuration/config_documentation.md#signing_key_path) which it uses
|
||||
to sign events and outgoing federation requests. It is easiest to back it up
|
||||
with your configuration files, but an alternative is to have Synapse create a
|
||||
new signing key if you have to restore.
|
||||
|
||||
If you do decide to replace the signing key, you should add the old *public*
|
||||
key to
|
||||
[`old_signing_keys`](../configuration/config_documentation.md#old_signing_keys).
|
||||
|
||||
## Database
|
||||
|
||||
Synapse's support for SQLite is only suitable for testing purposes, so for the
|
||||
purposes of this document, we'll assume you are using
|
||||
[PostgreSQL](../../postgres.md).
|
||||
|
||||
A full discussion of backup strategies for PostgreSQL is out of scope for this
|
||||
document; see the [PostgreSQL
|
||||
documentation](https://www.postgresql.org/docs/current/backup.html) for
|
||||
detailed information.
|
||||
|
||||
### Synapse-specfic details
|
||||
|
||||
* Be very careful not to restore into a database that already has tables
|
||||
present. At best, this will error; at worst, it will lead to subtle database
|
||||
inconsistencies.
|
||||
|
||||
* The `e2e_one_time_keys_json` table should **not** be backed up, or if it is
|
||||
backed up, should be
|
||||
[`TRUNCATE`d](https://www.postgresql.org/docs/current/sql-truncate.html)
|
||||
after restoring the database before Synapse is started.
|
||||
|
||||
[Background: restoring the database to an older backup can cause
|
||||
used one-time-keys to be re-issued, causing subsequent [message decryption
|
||||
errors](https://github.com/element-hq/element-meta/issues/2155). Clearing
|
||||
all one-time-keys from the database ensures that this cannot happen, and
|
||||
will prompt clients to generate and upload new one-time-keys.]
|
||||
|
||||
### Quick and easy database backup and restore
|
||||
|
||||
Typically, the easiest solution is to use `pg_dump` to take a copy of the whole
|
||||
database. We recommend `pg_dump`'s custom dump format, as it produces
|
||||
significantly smaller backup files.
|
||||
|
||||
```shell
|
||||
sudo -u postgres pg_dump -Fc --exclude-table-data e2e_one_time_keys_json synapse > synapse.dump
|
||||
```
|
||||
|
||||
There is no need to stop Postgres or Synapse while `pg_dump` is running: it
|
||||
will take a consistent snapshot of the databse.
|
||||
|
||||
To restore, you will need to recreate the database as described in [Using
|
||||
Postgres](../../postgres.md#set-up-database),
|
||||
then load the dump into it with `pg_restore`:
|
||||
|
||||
```shell
|
||||
sudo -u postgres createdb --encoding=UTF8 --locale=C --template=template0 --owner=synapse_user synapse
|
||||
sudo -u postgres pg_restore -d synapse < synapse.dump
|
||||
```
|
||||
|
||||
(If you forgot to exclude `e2e_one_time_keys_json` during `pg_dump`, remember
|
||||
to connect to the new database and `TRUNCATE e2e_one_time_keys_json;` before
|
||||
starting Synapse.)
|
||||
|
||||
To reiterate: do **not** restore a dump over an existing database.
|
||||
|
||||
Again, if you plan to run your homeserver at any sort of production level, we
|
||||
recommend studying the PostgreSQL documentation on backup options.
|
||||
|
||||
## Media store
|
||||
|
||||
Synapse keeps a copy of media uploaded by users, including avatars and message
|
||||
attachments, in its [Media
|
||||
store](../configuration/config_documentation.md#media-store).
|
||||
|
||||
It is a directory on the local disk, containing the following directories:
|
||||
|
||||
* `local_content`: this is content uploaded by your local users. As a general
|
||||
rule, you should back this up: it may represent the only copy of those
|
||||
media files anywhere in the federation, and if they are lost, users will
|
||||
see errors when viewing user or room avatars, and messages with attachments.
|
||||
|
||||
* `local_thumbnails`: "thumbnails" of images uploaded by your users. If
|
||||
[`dynamic_thumbnails`](../configuration/config_documentation.md#dynamic_thumbnails)
|
||||
is enabled, these will be regenerated if they are removed from the disk, and
|
||||
there is therefore no need to back them up.
|
||||
|
||||
If `dynamic_thumbnails` is *not* enabled (the default): although this can
|
||||
theoretically be regenerated from `local_content`, there is no tooling to do
|
||||
so. We recommend that these are backed up too.
|
||||
|
||||
* `remote_content`: this is a cache of content that was uploaded by a user on
|
||||
another server, and has since been requested by a user on your own server.
|
||||
|
||||
Typically there is no need to back up this directory: if a file in this directory
|
||||
is removed, Synapse will attempt to fetch it again from the remote
|
||||
server.
|
||||
|
||||
* `remote_thumbnails`: thumbnails of images uploaded by users on other
|
||||
servers. As with `remote_content`, there is normally no need to back this
|
||||
up.
|
||||
|
||||
* `url_cache`, `url_cache_thumbnails`: temporary caches of files downloaded
|
||||
by the [URL previews](../../setup/installation.md#url-previews) feature.
|
||||
These do not need to be backed up.
|
||||
@@ -673,8 +673,9 @@ This setting has the following sub-options:
|
||||
TLS via STARTTLS *if the SMTP server supports it*. If this option is set,
|
||||
Synapse will refuse to connect unless the server supports STARTTLS.
|
||||
* `enable_tls`: By default, if the server supports TLS, it will be used, and the server
|
||||
must present a certificate that is valid for 'smtp_host'. If this option
|
||||
must present a certificate that is valid for `tlsname`. If this option
|
||||
is set to false, TLS will not be used.
|
||||
* `tlsname`: The domain name the SMTP server's TLS certificate must be valid for, defaulting to `smtp_host`.
|
||||
* `notif_from`: defines the "From" address to use when sending emails.
|
||||
It must be set if email sending is enabled. The placeholder '%(app)s' will be replaced by the application name,
|
||||
which is normally set in `app_name`, but may be overridden by the
|
||||
@@ -741,6 +742,7 @@ email:
|
||||
force_tls: true
|
||||
require_transport_security: true
|
||||
enable_tls: false
|
||||
tlsname: mail.server.example.com
|
||||
notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
|
||||
app_name: my_branded_matrix_server
|
||||
enable_notifs: true
|
||||
@@ -1866,6 +1868,27 @@ rc_federation:
|
||||
concurrent: 5
|
||||
```
|
||||
---
|
||||
### `rc_presence`
|
||||
|
||||
This option sets ratelimiting for presence.
|
||||
|
||||
The `rc_presence.per_user` option sets rate limits on how often a specific
|
||||
users' presence updates are evaluated. Ratelimited presence updates sent via sync are
|
||||
ignored, and no error is returned to the client.
|
||||
This option also sets the rate limit for the
|
||||
[`PUT /_matrix/client/v3/presence/{userId}/status`](https://spec.matrix.org/latest/client-server-api/#put_matrixclientv3presenceuseridstatus)
|
||||
endpoint.
|
||||
|
||||
`per_user` defaults to `per_second: 0.1`, `burst_count: 1`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
rc_presence:
|
||||
per_user:
|
||||
per_second: 0.05
|
||||
burst_count: 0.5
|
||||
```
|
||||
---
|
||||
### `federation_rr_transactions_per_room_per_second`
|
||||
|
||||
Sets outgoing federation transaction frequency for sending read-receipts,
|
||||
@@ -1887,12 +1910,33 @@ Config options related to Synapse's media store.
|
||||
|
||||
When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy
|
||||
unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false
|
||||
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but
|
||||
this will change to true in a future Synapse release.
|
||||
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to true (previously false). In a future release of Synapse, this option will be removed and become always-on.
|
||||
|
||||
In all cases, authenticated requests to download media will succeed, but for unauthenticated requests, this
|
||||
case-by-case breakdown describes whether media downloads are permitted:
|
||||
|
||||
* `enable_authenticated_media = False`:
|
||||
* unauthenticated client or homeserver requesting local media: allowed
|
||||
* unauthenticated client or homeserver requesting remote media: allowed as long as the media is in the cache,
|
||||
or as long as the remote homeserver does not require authentication to retrieve the media
|
||||
* `enable_authenticated_media = True`:
|
||||
* unauthenticated client or homeserver requesting local media:
|
||||
allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist);
|
||||
otherwise denied.
|
||||
* unauthenticated client or homeserver requesting remote media: the same as for local media;
|
||||
allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist);
|
||||
otherwise denied.
|
||||
|
||||
It is especially notable that media downloaded before this option existed (in older Synapse versions), or whilst this option was set to `False`,
|
||||
will perpetually be available over the legacy, unauthenticated endpoint, even after this option is set to `True`.
|
||||
This is for backwards compatibility with older clients and homeservers that do not yet support requesting authenticated media;
|
||||
those older clients or homeservers will not be cut off from media they can already see.
|
||||
|
||||
_Changed in Synapse 1.120:_ This option now defaults to `True` when not set, whereas before this version it defaulted to `False`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_authenticated_media: true
|
||||
enable_authenticated_media: false
|
||||
```
|
||||
---
|
||||
### `enable_media_repo`
|
||||
@@ -3070,6 +3114,22 @@ Example configuration:
|
||||
```yaml
|
||||
macaroon_secret_key: <PRIVATE STRING>
|
||||
```
|
||||
---
|
||||
### `macaroon_secret_key_path`
|
||||
|
||||
An alternative to [`macaroon_secret_key`](#macaroon_secret_key):
|
||||
allows the secret key to be specified in an external file.
|
||||
|
||||
The file should be a plain text file, containing only the secret key.
|
||||
Synapse reads the secret key from the given file once at startup.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
macaroon_secret_key_path: /path/to/secrets/file
|
||||
```
|
||||
|
||||
_Added in Synapse 1.121.0._
|
||||
|
||||
---
|
||||
### `form_secret`
|
||||
|
||||
@@ -3108,6 +3168,15 @@ it was last used.
|
||||
It is possible to build an entry from an old `signing.key` file using the
|
||||
`export_signing_key` script which is provided with synapse.
|
||||
|
||||
If you have lost the private key file, you can ask another server you trust to
|
||||
tell you the public keys it has seen from your server. To fetch the keys from
|
||||
`matrix.org`, try something like:
|
||||
|
||||
```
|
||||
curl https://matrix-federation.matrix.org/_matrix/key/v2/query/myserver.example.com |
|
||||
jq '.server_keys | map(.verify_keys) | add'
|
||||
```
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
old_signing_keys:
|
||||
@@ -4371,9 +4440,9 @@ It is possible to scale the processes that handle sending outbound federation re
|
||||
by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
|
||||
a `federation_sender_instances` map. Doing so will remove handling of this function from
|
||||
the main process. Multiple workers can be added to this map, in which case the work is
|
||||
balanced across them.
|
||||
balanced across them.
|
||||
|
||||
The way that the load balancing works is any outbound federation request will be assigned
|
||||
The way that the load balancing works is any outbound federation request will be assigned
|
||||
to a federation sender worker based on the hash of the destination server name. This
|
||||
means that all requests being sent to the same destination will be processed by the same
|
||||
worker instance. Multiple `federation_sender_instances` are useful if there is a federation
|
||||
@@ -4417,6 +4486,10 @@ instance_map:
|
||||
worker1:
|
||||
host: localhost
|
||||
port: 8034
|
||||
other:
|
||||
host: localhost
|
||||
port: 8035
|
||||
tls: true
|
||||
```
|
||||
Example configuration(#2, for UNIX sockets):
|
||||
```yaml
|
||||
@@ -4730,7 +4803,7 @@ This setting has the following sub-options:
|
||||
* `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only
|
||||
for direct messages. Defaults to false.
|
||||
* `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false.
|
||||
* `worker_to_run_on`: Which worker to run this module on. This must match
|
||||
* `worker_to_run_on`: Which worker to run this module on. This must match
|
||||
the "worker_name". If not set or `null`, invites will be accepted on the
|
||||
main process.
|
||||
|
||||
|
||||
@@ -273,17 +273,6 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/knock/
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/profile/
|
||||
|
||||
# Account data requests
|
||||
^/_matrix/client/(r0|v3|unstable)/.*/tags
|
||||
^/_matrix/client/(r0|v3|unstable)/.*/account_data
|
||||
|
||||
# Receipts requests
|
||||
^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt
|
||||
^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers
|
||||
|
||||
# Presence requests
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
||||
|
||||
# User directory search requests
|
||||
^/_matrix/client/(r0|v3|unstable)/user_directory/search$
|
||||
|
||||
@@ -292,6 +281,13 @@ Additionally, the following REST endpoints can be handled for GET requests:
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
|
||||
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events
|
||||
|
||||
# Account data requests
|
||||
^/_matrix/client/(r0|v3|unstable)/.*/tags
|
||||
^/_matrix/client/(r0|v3|unstable)/.*/account_data
|
||||
|
||||
# Presence requests
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
||||
|
||||
Pagination requests can also be handled, but all requests for a given
|
||||
room must be routed to the same instance. Additionally, care must be taken to
|
||||
ensure that the purge history admin API is not used while pagination requests
|
||||
|
||||
56
flake.lock
generated
56
flake.lock
generated
@@ -56,24 +56,6 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681202837,
|
||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gitignore": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
@@ -186,27 +168,27 @@
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1690535733,
|
||||
"narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
|
||||
"lastModified": 1729265718,
|
||||
"narHash": "sha256-4HQI+6LsO3kpWTYuVGIzhJs1cetFcwT7quWCk/6rqeo=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
|
||||
"rev": "ccc0c2126893dd20963580b6478d1a10a4512185",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "master",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681358109,
|
||||
"narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=",
|
||||
"lastModified": 1728538411,
|
||||
"narHash": "sha256-f0SBJz1eZ2yOuKUr5CA9BHULGXVSn6miBuUWdTyhUhU=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9",
|
||||
"rev": "b69de56fac8c2b6f8fd27f2eca01dcda8e0a4221",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -249,20 +231,19 @@
|
||||
"devenv": "devenv",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"rust-overlay": "rust-overlay",
|
||||
"systems": "systems_3"
|
||||
"systems": "systems_2"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nixpkgs": "nixpkgs_3"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1693966243,
|
||||
"narHash": "sha256-a2CA1aMIPE67JWSVIGoGtD3EGlFdK9+OlJQs0FOWCKY=",
|
||||
"lastModified": 1731897198,
|
||||
"narHash": "sha256-Ou7vLETSKwmE/HRQz4cImXXJBr/k9gp4J4z/PF8LzTE=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "a8b4bb4cbb744baaabc3e69099f352f99164e2c1",
|
||||
"rev": "0be641045af6d8666c11c2c40e45ffc9667839b5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -300,21 +281,6 @@
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
|
||||
26
flake.nix
26
flake.nix
@@ -3,13 +3,13 @@
|
||||
# (https://github.com/matrix-org/complement) Matrix homeserver test suites are also
|
||||
# installed automatically.
|
||||
#
|
||||
# You must have already installed Nix (https://nixos.org) on your system to use this.
|
||||
# Nix can be installed on Linux or MacOS; NixOS is not required. Windows is not
|
||||
# directly supported, but Nix can be installed inside of WSL2 or even Docker
|
||||
# You must have already installed Nix (https://nixos.org/download/) on your system to use this.
|
||||
# Nix can be installed on any Linux distribiution or MacOS; NixOS is not required.
|
||||
# Windows is not directly supported, but Nix can be installed inside of WSL2 or even Docker
|
||||
# containers. Please refer to https://nixos.org/download for details.
|
||||
#
|
||||
# You must also enable support for flakes in Nix. See the following for how to
|
||||
# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes
|
||||
# do so permanently: https://wiki.nixos.org/wiki/Flakes#Other_Distros,_without_Home-Manager
|
||||
#
|
||||
# Be warned: you'll need over 3.75 GB of free space to download all the dependencies.
|
||||
#
|
||||
@@ -20,7 +20,7 @@
|
||||
# locally from "services", such as PostgreSQL and Redis.
|
||||
#
|
||||
# You should now be dropped into a new shell with all programs and dependencies
|
||||
# availabile to you!
|
||||
# available to you!
|
||||
#
|
||||
# You can start up pre-configured local Synapse, PostgreSQL and Redis instances by
|
||||
# running: `devenv up`. To stop them, use Ctrl-C.
|
||||
@@ -39,9 +39,9 @@
|
||||
|
||||
{
|
||||
inputs = {
|
||||
# Use the master/unstable branch of nixpkgs. Used to fetch the latest
|
||||
# Use the rolling/unstable branch of nixpkgs. Used to fetch the latest
|
||||
# available versions of packages.
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/master";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
|
||||
systems.url = "github:nix-systems/default";
|
||||
# A development environment manager built on Nix. See https://devenv.sh.
|
||||
@@ -50,7 +50,7 @@
|
||||
rust-overlay.url = "github:oxalica/rust-overlay";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
|
||||
outputs = { nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
|
||||
let
|
||||
forEachSystem = nixpkgs.lib.genAttrs (import systems);
|
||||
in {
|
||||
@@ -82,7 +82,7 @@
|
||||
#
|
||||
# NOTE: We currently need to set the Rust version unnecessarily high
|
||||
# in order to work around https://github.com/matrix-org/synapse/issues/15939
|
||||
(rust-bin.stable."1.71.1".default.override {
|
||||
(rust-bin.stable."1.82.0".default.override {
|
||||
# Additionally install the "rust-src" extension to allow diving into the
|
||||
# Rust source code in an IDE (rust-analyzer will also make use of it).
|
||||
extensions = [ "rust-src" ];
|
||||
@@ -126,7 +126,7 @@
|
||||
# Automatically activate the poetry virtualenv upon entering the shell.
|
||||
languages.python.poetry.activate.enable = true;
|
||||
# Install all extra Python dependencies; this is needed to run the unit
|
||||
# tests and utilitise all Synapse features.
|
||||
# tests and utilise all Synapse features.
|
||||
languages.python.poetry.install.arguments = ["--extras all"];
|
||||
# Install the 'matrix-synapse' package from the local checkout.
|
||||
languages.python.poetry.install.installRootPackage = true;
|
||||
@@ -163,8 +163,8 @@
|
||||
# Create a postgres user called 'synapse_user' which has ownership
|
||||
# over the 'synapse' database.
|
||||
services.postgres.initialScript = ''
|
||||
CREATE USER synapse_user;
|
||||
ALTER DATABASE synapse OWNER TO synapse_user;
|
||||
CREATE USER synapse_user;
|
||||
ALTER DATABASE synapse OWNER TO synapse_user;
|
||||
'';
|
||||
|
||||
# Redis is needed in order to run Synapse in worker mode.
|
||||
@@ -205,7 +205,7 @@
|
||||
# corresponding Nix packages on https://search.nixos.org/packages.
|
||||
#
|
||||
# This was done until `./install-deps.pl --dryrun` produced no output.
|
||||
env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [
|
||||
env.PERL5LIB = "${with pkgs.perl538Packages; makePerlPath [
|
||||
DBI
|
||||
ClassMethodModifiers
|
||||
CryptEd25519
|
||||
|
||||
2
mypy.ini
2
mypy.ini
@@ -26,7 +26,7 @@ strict_equality = True
|
||||
|
||||
# Run mypy type checking with the minimum supported Python version to catch new usage
|
||||
# that isn't backwards-compatible (types, overloads, etc).
|
||||
python_version = 3.8
|
||||
python_version = 3.9
|
||||
|
||||
files =
|
||||
docker/,
|
||||
|
||||
680
poetry.lock
generated
680
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -36,7 +36,7 @@
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
target-version = "py38"
|
||||
target-version = "py39"
|
||||
|
||||
[tool.ruff.lint]
|
||||
# See https://beta.ruff.rs/docs/rules/#error-e
|
||||
@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.118.0"
|
||||
version = "1.123.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -155,7 +155,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
||||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.8.0"
|
||||
python = "^3.9.0"
|
||||
|
||||
# Mandatory Dependencies
|
||||
# ----------------------
|
||||
@@ -178,7 +178,7 @@ Twisted = {extras = ["tls"], version = ">=18.9.0"}
|
||||
treq = ">=15.1"
|
||||
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
|
||||
pyOpenSSL = ">=16.0.0"
|
||||
PyYAML = ">=3.13"
|
||||
PyYAML = ">=5.3"
|
||||
pyasn1 = ">=0.1.9"
|
||||
pyasn1-modules = ">=0.0.7"
|
||||
bcrypt = ">=3.1.7"
|
||||
@@ -241,7 +241,7 @@ authlib = { version = ">=0.15.1", optional = true }
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
# Note: systemd-python 231 appears to have been yanked from pypi
|
||||
systemd-python = { version = ">=231", optional = true }
|
||||
lxml = { version = ">=4.2.0", optional = true }
|
||||
lxml = { version = ">=4.5.2", optional = true }
|
||||
sentry-sdk = { version = ">=0.7.2", optional = true }
|
||||
opentracing = { version = ">=2.2.0", optional = true }
|
||||
jaeger-client = { version = ">=4.0.0", optional = true }
|
||||
@@ -320,7 +320,7 @@ all = [
|
||||
# failing on new releases. Keeping lower bounds loose here means that dependabot
|
||||
# can bump versions without having to update the content-hash in the lockfile.
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
ruff = "0.6.9"
|
||||
ruff = "0.7.3"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
@@ -370,7 +370,7 @@ tomli = ">=1.2.3"
|
||||
# runtime errors caused by build system changes.
|
||||
# We are happy to raise these upper bounds upon request,
|
||||
# provided we check that it's safe to do so (i.e. that CI passes).
|
||||
requires = ["poetry-core>=1.1.0,<=1.9.0", "setuptools_rust>=1.3,<=1.8.1"]
|
||||
requires = ["poetry-core>=1.1.0,<=1.9.1", "setuptools_rust>=1.3,<=1.10.2"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
@@ -378,16 +378,19 @@ build-backend = "poetry.core.masonry.api"
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
|
||||
# We skip:
|
||||
# - CPython 3.6 and 3.7: EOLed
|
||||
# - PyPy 3.7: we only support Python 3.8+
|
||||
# - CPython 3.6, 3.7 and 3.8: EOLed
|
||||
# - PyPy 3.7 and 3.8: we only support Python 3.9+
|
||||
# - musllinux i686: excluded to reduce number of wheels we build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
|
||||
# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/14259
|
||||
skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
skip = "cp36* cp37* cp38* pp37* pp38* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
|
||||
# We need a rust compiler.
|
||||
#
|
||||
# We temporarily pin Rust to 1.82.0 to work around
|
||||
# https://github.com/element-hq/synapse/issues/17988
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.82.0 -y --profile minimal"
|
||||
environment= { PATH = "$PATH:$HOME/.cargo/bin" }
|
||||
|
||||
# For some reason if we don't manually clean the build directory we
|
||||
|
||||
@@ -30,14 +30,14 @@ http = "1.1.0"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.17"
|
||||
mime = "0.3.17"
|
||||
pyo3 = { version = "0.21.0", features = [
|
||||
pyo3 = { version = "0.23.2", features = [
|
||||
"macros",
|
||||
"anyhow",
|
||||
"abi3",
|
||||
"abi3-py38",
|
||||
] }
|
||||
pyo3-log = "0.10.0"
|
||||
pythonize = "0.21.0"
|
||||
pyo3-log = "0.12.0"
|
||||
pythonize = "0.23.0"
|
||||
regex = "1.6.0"
|
||||
sha2 = "0.10.8"
|
||||
serde = { version = "1.0.144", features = ["derive"] }
|
||||
|
||||
@@ -32,14 +32,14 @@ use crate::push::utils::{glob_to_regex, GlobMatchType};
|
||||
|
||||
/// Called when registering modules with python.
|
||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let child_module = PyModule::new_bound(py, "acl")?;
|
||||
let child_module = PyModule::new(py, "acl")?;
|
||||
child_module.add_class::<ServerAclEvaluator>()?;
|
||||
|
||||
m.add_submodule(&child_module)?;
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import acl` work.
|
||||
py.import_bound("sys")?
|
||||
py.import("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.acl", child_module)?;
|
||||
|
||||
|
||||
107
rust/src/events/filter.rs
Normal file
107
rust/src/events/filter.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2024 New Vector, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*/
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use pyo3::{exceptions::PyValueError, pyfunction, PyResult};
|
||||
|
||||
use crate::{
|
||||
identifier::UserID,
|
||||
matrix_const::{
|
||||
HISTORY_VISIBILITY_INVITED, HISTORY_VISIBILITY_JOINED, MEMBERSHIP_INVITE, MEMBERSHIP_JOIN,
|
||||
},
|
||||
};
|
||||
|
||||
#[pyfunction(name = "event_visible_to_server")]
|
||||
pub fn event_visible_to_server_py(
|
||||
sender: String,
|
||||
target_server_name: String,
|
||||
history_visibility: String,
|
||||
erased_senders: HashMap<String, bool>,
|
||||
partial_state_invisible: bool,
|
||||
memberships: Vec<(String, String)>, // (state_key, membership)
|
||||
) -> PyResult<bool> {
|
||||
event_visible_to_server(
|
||||
sender,
|
||||
target_server_name,
|
||||
history_visibility,
|
||||
erased_senders,
|
||||
partial_state_invisible,
|
||||
memberships,
|
||||
)
|
||||
.map_err(|e| PyValueError::new_err(format!("{e}")))
|
||||
}
|
||||
|
||||
/// Return whether the target server is allowed to see the event.
|
||||
///
|
||||
/// For a fully stated room, the target server is allowed to see an event E if:
|
||||
/// - the state at E has world readable or shared history vis, OR
|
||||
/// - the state at E says that the target server is in the room.
|
||||
///
|
||||
/// For a partially stated room, the target server is allowed to see E if:
|
||||
/// - E was created by this homeserver, AND:
|
||||
/// - the partial state at E has world readable or shared history vis, OR
|
||||
/// - the partial state at E says that the target server is in the room.
|
||||
pub fn event_visible_to_server(
|
||||
sender: String,
|
||||
target_server_name: String,
|
||||
history_visibility: String,
|
||||
erased_senders: HashMap<String, bool>,
|
||||
partial_state_invisible: bool,
|
||||
memberships: Vec<(String, String)>, // (state_key, membership)
|
||||
) -> anyhow::Result<bool> {
|
||||
if let Some(&erased) = erased_senders.get(&sender) {
|
||||
if erased {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
if partial_state_invisible {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
if history_visibility != HISTORY_VISIBILITY_INVITED
|
||||
&& history_visibility != HISTORY_VISIBILITY_JOINED
|
||||
{
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let mut visible = false;
|
||||
for (state_key, membership) in memberships {
|
||||
let state_key = UserID::try_from(state_key.as_ref())
|
||||
.map_err(|e| anyhow::anyhow!(format!("invalid user_id ({state_key}): {e}")))?;
|
||||
if state_key.server_name() != target_server_name {
|
||||
return Err(anyhow::anyhow!(
|
||||
"state_key.server_name ({}) does not match target_server_name ({target_server_name})",
|
||||
state_key.server_name()
|
||||
));
|
||||
}
|
||||
|
||||
match membership.as_str() {
|
||||
MEMBERSHIP_INVITE => {
|
||||
if history_visibility == HISTORY_VISIBILITY_INVITED {
|
||||
visible = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
MEMBERSHIP_JOIN => {
|
||||
visible = true;
|
||||
break;
|
||||
}
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(visible)
|
||||
}
|
||||
@@ -41,9 +41,11 @@ use pyo3::{
|
||||
pybacked::PyBackedStr,
|
||||
pyclass, pymethods,
|
||||
types::{PyAnyMethods, PyDict, PyDictMethods, PyString},
|
||||
Bound, IntoPy, PyAny, PyObject, PyResult, Python,
|
||||
Bound, IntoPyObject, PyAny, PyObject, PyResult, Python,
|
||||
};
|
||||
|
||||
use crate::UnwrapInfallible;
|
||||
|
||||
/// Definitions of the various fields of the internal metadata.
|
||||
#[derive(Clone)]
|
||||
enum EventInternalMetadataData {
|
||||
@@ -60,31 +62,59 @@ enum EventInternalMetadataData {
|
||||
|
||||
impl EventInternalMetadataData {
|
||||
/// Convert the field to its name and python object.
|
||||
fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, PyObject) {
|
||||
fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, Bound<'a, PyAny>) {
|
||||
match self {
|
||||
EventInternalMetadataData::OutOfBandMembership(o) => {
|
||||
(pyo3::intern!(py, "out_of_band_membership"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::SendOnBehalfOf(o) => {
|
||||
(pyo3::intern!(py, "send_on_behalf_of"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::RecheckRedaction(o) => {
|
||||
(pyo3::intern!(py, "recheck_redaction"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::SoftFailed(o) => {
|
||||
(pyo3::intern!(py, "soft_failed"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::ProactivelySend(o) => {
|
||||
(pyo3::intern!(py, "proactively_send"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::Redacted(o) => {
|
||||
(pyo3::intern!(py, "redacted"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::TxnId(o) => (pyo3::intern!(py, "txn_id"), o.into_py(py)),
|
||||
EventInternalMetadataData::TokenId(o) => (pyo3::intern!(py, "token_id"), o.into_py(py)),
|
||||
EventInternalMetadataData::DeviceId(o) => {
|
||||
(pyo3::intern!(py, "device_id"), o.into_py(py))
|
||||
}
|
||||
EventInternalMetadataData::OutOfBandMembership(o) => (
|
||||
pyo3::intern!(py, "out_of_band_membership"),
|
||||
o.into_pyobject(py)
|
||||
.unwrap_infallible()
|
||||
.to_owned()
|
||||
.into_any(),
|
||||
),
|
||||
EventInternalMetadataData::SendOnBehalfOf(o) => (
|
||||
pyo3::intern!(py, "send_on_behalf_of"),
|
||||
o.into_pyobject(py).unwrap_infallible().into_any(),
|
||||
),
|
||||
EventInternalMetadataData::RecheckRedaction(o) => (
|
||||
pyo3::intern!(py, "recheck_redaction"),
|
||||
o.into_pyobject(py)
|
||||
.unwrap_infallible()
|
||||
.to_owned()
|
||||
.into_any(),
|
||||
),
|
||||
EventInternalMetadataData::SoftFailed(o) => (
|
||||
pyo3::intern!(py, "soft_failed"),
|
||||
o.into_pyobject(py)
|
||||
.unwrap_infallible()
|
||||
.to_owned()
|
||||
.into_any(),
|
||||
),
|
||||
EventInternalMetadataData::ProactivelySend(o) => (
|
||||
pyo3::intern!(py, "proactively_send"),
|
||||
o.into_pyobject(py)
|
||||
.unwrap_infallible()
|
||||
.to_owned()
|
||||
.into_any(),
|
||||
),
|
||||
EventInternalMetadataData::Redacted(o) => (
|
||||
pyo3::intern!(py, "redacted"),
|
||||
o.into_pyobject(py)
|
||||
.unwrap_infallible()
|
||||
.to_owned()
|
||||
.into_any(),
|
||||
),
|
||||
EventInternalMetadataData::TxnId(o) => (
|
||||
pyo3::intern!(py, "txn_id"),
|
||||
o.into_pyobject(py).unwrap_infallible().into_any(),
|
||||
),
|
||||
EventInternalMetadataData::TokenId(o) => (
|
||||
pyo3::intern!(py, "token_id"),
|
||||
o.into_pyobject(py).unwrap_infallible().into_any(),
|
||||
),
|
||||
EventInternalMetadataData::DeviceId(o) => (
|
||||
pyo3::intern!(py, "device_id"),
|
||||
o.into_pyobject(py).unwrap_infallible().into_any(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -247,7 +277,7 @@ impl EventInternalMetadata {
|
||||
///
|
||||
/// Note that `outlier` and `stream_ordering` are stored in separate columns so are not returned here.
|
||||
fn get_dict(&self, py: Python<'_>) -> PyResult<PyObject> {
|
||||
let dict = PyDict::new_bound(py);
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
for entry in &self.data {
|
||||
let (key, value) = entry.to_python_pair(py);
|
||||
|
||||
@@ -22,21 +22,23 @@
|
||||
|
||||
use pyo3::{
|
||||
types::{PyAnyMethods, PyModule, PyModuleMethods},
|
||||
Bound, PyResult, Python,
|
||||
wrap_pyfunction, Bound, PyResult, Python,
|
||||
};
|
||||
|
||||
pub mod filter;
|
||||
mod internal_metadata;
|
||||
|
||||
/// Called when registering modules with python.
|
||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let child_module = PyModule::new_bound(py, "events")?;
|
||||
let child_module = PyModule::new(py, "events")?;
|
||||
child_module.add_class::<internal_metadata::EventInternalMetadata>()?;
|
||||
child_module.add_function(wrap_pyfunction!(filter::event_visible_to_server_py, m)?)?;
|
||||
|
||||
m.add_submodule(&child_module)?;
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import events` work.
|
||||
py.import_bound("sys")?
|
||||
py.import("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.events", child_module)?;
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ pub fn http_request_from_twisted(request: &Bound<'_, PyAny>) -> PyResult<Request
|
||||
let headers_iter = request
|
||||
.getattr("requestHeaders")?
|
||||
.call_method0("getAllRawHeaders")?
|
||||
.iter()?;
|
||||
.try_iter()?;
|
||||
|
||||
for header in headers_iter {
|
||||
let header = header?;
|
||||
|
||||
252
rust/src/identifier.rs
Normal file
252
rust/src/identifier.rs
Normal file
@@ -0,0 +1,252 @@
|
||||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2024 New Vector, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*/
|
||||
|
||||
//! # Matrix Identifiers
|
||||
//!
|
||||
//! This module contains definitions and utilities for working with matrix identifiers.
|
||||
|
||||
use std::{fmt, ops::Deref};
|
||||
|
||||
/// Errors that can occur when parsing a matrix identifier.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum IdentifierError {
|
||||
IncorrectSigil,
|
||||
MissingColon,
|
||||
}
|
||||
|
||||
impl fmt::Display for IdentifierError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
/// A Matrix user_id.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct UserID(String);
|
||||
|
||||
impl UserID {
|
||||
/// Returns the `localpart` of the user_id.
|
||||
pub fn localpart(&self) -> &str {
|
||||
&self[1..self.colon_pos()]
|
||||
}
|
||||
|
||||
/// Returns the `server_name` / `domain` of the user_id.
|
||||
pub fn server_name(&self) -> &str {
|
||||
&self[self.colon_pos() + 1..]
|
||||
}
|
||||
|
||||
/// Returns the position of the ':' inside of the user_id.
|
||||
/// Used when splitting the user_id into it's respective parts.
|
||||
fn colon_pos(&self) -> usize {
|
||||
self.find(':').unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for UserID {
|
||||
type Error = IdentifierError;
|
||||
|
||||
/// Will try creating a `UserID` from the provided `&str`.
|
||||
/// Can fail if the user_id is incorrectly formatted.
|
||||
fn try_from(s: &str) -> Result<Self, Self::Error> {
|
||||
if !s.starts_with('@') {
|
||||
return Err(IdentifierError::IncorrectSigil);
|
||||
}
|
||||
|
||||
if s.find(':').is_none() {
|
||||
return Err(IdentifierError::MissingColon);
|
||||
}
|
||||
|
||||
Ok(UserID(s.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for UserID {
|
||||
type Error = IdentifierError;
|
||||
|
||||
/// Will try creating a `UserID` from the provided `&str`.
|
||||
/// Can fail if the user_id is incorrectly formatted.
|
||||
fn try_from(s: String) -> Result<Self, Self::Error> {
|
||||
if !s.starts_with('@') {
|
||||
return Err(IdentifierError::IncorrectSigil);
|
||||
}
|
||||
|
||||
if s.find(':').is_none() {
|
||||
return Err(IdentifierError::MissingColon);
|
||||
}
|
||||
|
||||
Ok(UserID(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> serde::Deserialize<'de> for UserID {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let s: String = serde::Deserialize::deserialize(deserializer)?;
|
||||
UserID::try_from(s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for UserID {
|
||||
type Target = str;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for UserID {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// A Matrix room_id.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct RoomID(String);
|
||||
|
||||
impl RoomID {
|
||||
/// Returns the `localpart` of the room_id.
|
||||
pub fn localpart(&self) -> &str {
|
||||
&self[1..self.colon_pos()]
|
||||
}
|
||||
|
||||
/// Returns the `server_name` / `domain` of the room_id.
|
||||
pub fn server_name(&self) -> &str {
|
||||
&self[self.colon_pos() + 1..]
|
||||
}
|
||||
|
||||
/// Returns the position of the ':' inside of the room_id.
|
||||
/// Used when splitting the room_id into it's respective parts.
|
||||
fn colon_pos(&self) -> usize {
|
||||
self.find(':').unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for RoomID {
|
||||
type Error = IdentifierError;
|
||||
|
||||
/// Will try creating a `RoomID` from the provided `&str`.
|
||||
/// Can fail if the room_id is incorrectly formatted.
|
||||
fn try_from(s: &str) -> Result<Self, Self::Error> {
|
||||
if !s.starts_with('!') {
|
||||
return Err(IdentifierError::IncorrectSigil);
|
||||
}
|
||||
|
||||
if s.find(':').is_none() {
|
||||
return Err(IdentifierError::MissingColon);
|
||||
}
|
||||
|
||||
Ok(RoomID(s.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for RoomID {
|
||||
type Error = IdentifierError;
|
||||
|
||||
/// Will try creating a `RoomID` from the provided `String`.
|
||||
/// Can fail if the room_id is incorrectly formatted.
|
||||
fn try_from(s: String) -> Result<Self, Self::Error> {
|
||||
if !s.starts_with('!') {
|
||||
return Err(IdentifierError::IncorrectSigil);
|
||||
}
|
||||
|
||||
if s.find(':').is_none() {
|
||||
return Err(IdentifierError::MissingColon);
|
||||
}
|
||||
|
||||
Ok(RoomID(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> serde::Deserialize<'de> for RoomID {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let s: String = serde::Deserialize::deserialize(deserializer)?;
|
||||
RoomID::try_from(s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for RoomID {
|
||||
type Target = str;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for RoomID {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// A Matrix event_id.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct EventID(String);
|
||||
|
||||
impl TryFrom<&str> for EventID {
|
||||
type Error = IdentifierError;
|
||||
|
||||
/// Will try creating a `EventID` from the provided `&str`.
|
||||
/// Can fail if the event_id is incorrectly formatted.
|
||||
fn try_from(s: &str) -> Result<Self, Self::Error> {
|
||||
if !s.starts_with('$') {
|
||||
return Err(IdentifierError::IncorrectSigil);
|
||||
}
|
||||
|
||||
Ok(EventID(s.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for EventID {
|
||||
type Error = IdentifierError;
|
||||
|
||||
/// Will try creating a `EventID` from the provided `String`.
|
||||
/// Can fail if the event_id is incorrectly formatted.
|
||||
fn try_from(s: String) -> Result<Self, Self::Error> {
|
||||
if !s.starts_with('$') {
|
||||
return Err(IdentifierError::IncorrectSigil);
|
||||
}
|
||||
|
||||
Ok(EventID(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> serde::Deserialize<'de> for EventID {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let s: String = serde::Deserialize::deserialize(deserializer)?;
|
||||
EventID::try_from(s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for EventID {
|
||||
type Target = str;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for EventID {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
use std::convert::Infallible;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3_log::ResetHandle;
|
||||
@@ -6,6 +8,8 @@ pub mod acl;
|
||||
pub mod errors;
|
||||
pub mod events;
|
||||
pub mod http;
|
||||
pub mod identifier;
|
||||
pub mod matrix_const;
|
||||
pub mod push;
|
||||
pub mod rendezvous;
|
||||
|
||||
@@ -50,3 +54,16 @@ fn synapse_rust(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub trait UnwrapInfallible<T> {
|
||||
fn unwrap_infallible(self) -> T;
|
||||
}
|
||||
|
||||
impl<T> UnwrapInfallible<T> for Result<T, Infallible> {
|
||||
fn unwrap_infallible(self) -> T {
|
||||
match self {
|
||||
Ok(val) => val,
|
||||
Err(never) => match never {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
28
rust/src/matrix_const.rs
Normal file
28
rust/src/matrix_const.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2024 New Vector, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*/
|
||||
|
||||
//! # Matrix Constants
|
||||
//!
|
||||
//! This module contains definitions for constant values described by the matrix specification.
|
||||
|
||||
pub const HISTORY_VISIBILITY_WORLD_READABLE: &str = "world_readable";
|
||||
pub const HISTORY_VISIBILITY_SHARED: &str = "shared";
|
||||
pub const HISTORY_VISIBILITY_INVITED: &str = "invited";
|
||||
pub const HISTORY_VISIBILITY_JOINED: &str = "joined";
|
||||
|
||||
pub const MEMBERSHIP_BAN: &str = "ban";
|
||||
pub const MEMBERSHIP_LEAVE: &str = "leave";
|
||||
pub const MEMBERSHIP_KNOCK: &str = "knock";
|
||||
pub const MEMBERSHIP_INVITE: &str = "invite";
|
||||
pub const MEMBERSHIP_JOIN: &str = "join";
|
||||
@@ -167,6 +167,7 @@ impl PushRuleEvaluator {
|
||||
///
|
||||
/// Returns the set of actions, if any, that match (filtering out any
|
||||
/// `dont_notify` and `coalesce` actions).
|
||||
#[pyo3(signature = (push_rules, user_id=None, display_name=None))]
|
||||
pub fn run(
|
||||
&self,
|
||||
push_rules: &FilteredPushRules,
|
||||
@@ -236,6 +237,7 @@ impl PushRuleEvaluator {
|
||||
}
|
||||
|
||||
/// Check if the given condition matches.
|
||||
#[pyo3(signature = (condition, user_id=None, display_name=None))]
|
||||
fn matches(
|
||||
&self,
|
||||
condition: Condition,
|
||||
|
||||
@@ -65,8 +65,8 @@ use anyhow::{Context, Error};
|
||||
use log::warn;
|
||||
use pyo3::exceptions::PyTypeError;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::types::{PyBool, PyList, PyLong, PyString};
|
||||
use pythonize::{depythonize_bound, pythonize};
|
||||
use pyo3::types::{PyBool, PyInt, PyList, PyString};
|
||||
use pythonize::{depythonize, pythonize, PythonizeError};
|
||||
use serde::de::Error as _;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
@@ -79,7 +79,7 @@ pub mod utils;
|
||||
|
||||
/// Called when registering modules with python.
|
||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let child_module = PyModule::new_bound(py, "push")?;
|
||||
let child_module = PyModule::new(py, "push")?;
|
||||
child_module.add_class::<PushRule>()?;
|
||||
child_module.add_class::<PushRules>()?;
|
||||
child_module.add_class::<FilteredPushRules>()?;
|
||||
@@ -90,7 +90,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()>
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import push` work.
|
||||
py.import_bound("sys")?
|
||||
py.import("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.push", child_module)?;
|
||||
|
||||
@@ -182,12 +182,16 @@ pub enum Action {
|
||||
Unknown(Value),
|
||||
}
|
||||
|
||||
impl IntoPy<PyObject> for Action {
|
||||
fn into_py(self, py: Python<'_>) -> PyObject {
|
||||
impl<'py> IntoPyObject<'py> for Action {
|
||||
type Target = PyAny;
|
||||
type Output = Bound<'py, Self::Target>;
|
||||
type Error = PythonizeError;
|
||||
|
||||
fn into_pyobject(self, py: Python<'py>) -> Result<Self::Output, Self::Error> {
|
||||
// When we pass the `Action` struct to Python we want it to be converted
|
||||
// to a dict. We use `pythonize`, which converts the struct using the
|
||||
// `serde` serialization.
|
||||
pythonize(py, &self).expect("valid action")
|
||||
pythonize(py, &self)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -270,13 +274,13 @@ pub enum SimpleJsonValue {
|
||||
}
|
||||
|
||||
impl<'source> FromPyObject<'source> for SimpleJsonValue {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
|
||||
if let Ok(s) = ob.downcast::<PyString>() {
|
||||
Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string())))
|
||||
// A bool *is* an int, ensure we try bool first.
|
||||
} else if let Ok(b) = ob.downcast::<PyBool>() {
|
||||
Ok(SimpleJsonValue::Bool(b.extract()?))
|
||||
} else if let Ok(i) = ob.downcast::<PyLong>() {
|
||||
} else if let Ok(i) = ob.downcast::<PyInt>() {
|
||||
Ok(SimpleJsonValue::Int(i.extract()?))
|
||||
} else if ob.is_none() {
|
||||
Ok(SimpleJsonValue::Null)
|
||||
@@ -298,15 +302,19 @@ pub enum JsonValue {
|
||||
}
|
||||
|
||||
impl<'source> FromPyObject<'source> for JsonValue {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
|
||||
if let Ok(l) = ob.downcast::<PyList>() {
|
||||
match l.iter().map(SimpleJsonValue::extract).collect() {
|
||||
match l
|
||||
.iter()
|
||||
.map(|it| SimpleJsonValue::extract_bound(&it))
|
||||
.collect()
|
||||
{
|
||||
Ok(a) => Ok(JsonValue::Array(a)),
|
||||
Err(e) => Err(PyTypeError::new_err(format!(
|
||||
"Can't convert to JsonValue::Array: {e}"
|
||||
))),
|
||||
}
|
||||
} else if let Ok(v) = SimpleJsonValue::extract(ob) {
|
||||
} else if let Ok(v) = SimpleJsonValue::extract_bound(ob) {
|
||||
Ok(JsonValue::Value(v))
|
||||
} else {
|
||||
Err(PyTypeError::new_err(format!(
|
||||
@@ -363,15 +371,19 @@ pub enum KnownCondition {
|
||||
},
|
||||
}
|
||||
|
||||
impl IntoPy<PyObject> for Condition {
|
||||
fn into_py(self, py: Python<'_>) -> PyObject {
|
||||
pythonize(py, &self).expect("valid condition")
|
||||
impl<'source> IntoPyObject<'source> for Condition {
|
||||
type Target = PyAny;
|
||||
type Output = Bound<'source, Self::Target>;
|
||||
type Error = PythonizeError;
|
||||
|
||||
fn into_pyobject(self, py: Python<'source>) -> Result<Self::Output, Self::Error> {
|
||||
pythonize(py, &self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'source> FromPyObject<'source> for Condition {
|
||||
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
|
||||
Ok(depythonize_bound(ob.clone())?)
|
||||
Ok(depythonize(ob)?)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@ use anyhow::bail;
|
||||
use anyhow::Context;
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
use regex;
|
||||
use regex::Regex;
|
||||
use regex::RegexBuilder;
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ use pyo3::{
|
||||
exceptions::PyValueError,
|
||||
pyclass, pymethods,
|
||||
types::{PyAnyMethods, PyModule, PyModuleMethods},
|
||||
Bound, Py, PyAny, PyObject, PyResult, Python, ToPyObject,
|
||||
Bound, IntoPyObject, Py, PyAny, PyObject, PyResult, Python,
|
||||
};
|
||||
use ulid::Ulid;
|
||||
|
||||
@@ -37,6 +37,7 @@ use self::session::Session;
|
||||
use crate::{
|
||||
errors::{NotFoundError, SynapseError},
|
||||
http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt},
|
||||
UnwrapInfallible,
|
||||
};
|
||||
|
||||
mod session;
|
||||
@@ -125,7 +126,11 @@ impl RendezvousHandler {
|
||||
let base = Uri::try_from(format!("{base}_synapse/client/rendezvous"))
|
||||
.map_err(|_| PyValueError::new_err("Invalid base URI"))?;
|
||||
|
||||
let clock = homeserver.call_method0("get_clock")?.to_object(py);
|
||||
let clock = homeserver
|
||||
.call_method0("get_clock")?
|
||||
.into_pyobject(py)
|
||||
.unwrap_infallible()
|
||||
.unbind();
|
||||
|
||||
// Construct a Python object so that we can get a reference to the
|
||||
// evict method and schedule it to run.
|
||||
@@ -288,6 +293,13 @@ impl RendezvousHandler {
|
||||
let mut response = Response::new(Bytes::new());
|
||||
*response.status_mut() = StatusCode::ACCEPTED;
|
||||
prepare_headers(response.headers_mut(), session);
|
||||
|
||||
// Even though this isn't mandated by the MSC, we set a Content-Type on the response. It
|
||||
// doesn't do any harm as the body is empty, but this helps escape a bug in some reverse
|
||||
// proxy/cache setup which strips the ETag header if there is no Content-Type set.
|
||||
// Specifically, we noticed this behaviour when placing Synapse behind Cloudflare.
|
||||
response.headers_mut().typed_insert(ContentType::text());
|
||||
|
||||
http_response_to_twisted(twisted_request, response)?;
|
||||
|
||||
Ok(())
|
||||
@@ -311,7 +323,7 @@ impl RendezvousHandler {
|
||||
}
|
||||
|
||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let child_module = PyModule::new_bound(py, "rendezvous")?;
|
||||
let child_module = PyModule::new(py, "rendezvous")?;
|
||||
|
||||
child_module.add_class::<RendezvousHandler>()?;
|
||||
|
||||
@@ -319,7 +331,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()>
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import rendezvous` work.
|
||||
py.import_bound("sys")?
|
||||
py.import("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.rendezvous", child_module)?;
|
||||
|
||||
|
||||
@@ -28,9 +28,8 @@ from typing import Collection, Optional, Sequence, Set
|
||||
# example)
|
||||
DISTS = (
|
||||
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
|
||||
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
|
||||
"debian:bookworm", # (EOL 2026-06) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:sid", # (rolling distro, no EOL)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:noble", # 24.04 LTS (EOL 2029-06)
|
||||
"ubuntu:oracular", # 24.10 (EOL 2025-07)
|
||||
|
||||
@@ -195,6 +195,10 @@ if [ -z "$skip_docker_build" ]; then
|
||||
# Build the unified Complement image (from the worker Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: complement/Dockerfile"
|
||||
$CONTAINER_RUNTIME build -t complement-synapse \
|
||||
`# This is the tag we end up pushing to the registry (see` \
|
||||
`# .github/workflows/push_complement_image.yml) so let's just label it now` \
|
||||
`# so people can reference it by the same name locally.` \
|
||||
-t ghcr.io/element-hq/synapse/complement-synapse \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
|
||||
@@ -39,8 +39,8 @@ ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
# Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the
|
||||
# if-statement completely.
|
||||
py_version = sys.version_info
|
||||
if py_version < (3, 8):
|
||||
print("Synapse requires Python 3.8 or above.")
|
||||
if py_version < (3, 9):
|
||||
print("Synapse requires Python 3.9 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Allow using the asyncio reactor via env var.
|
||||
|
||||
@@ -88,6 +88,7 @@ from synapse.storage.databases.main.relations import RelationsWorkerStore
|
||||
from synapse.storage.databases.main.room import RoomBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.search import SearchBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.sliding_sync import SlidingSyncStore
|
||||
from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.user_directory import (
|
||||
@@ -255,6 +256,7 @@ class Store(
|
||||
ReceiptsBackgroundUpdateStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
SlidingSyncStore,
|
||||
):
|
||||
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
||||
@@ -174,6 +174,12 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
logger.warning("Failed to load metadata:", exc_info=True)
|
||||
return None
|
||||
|
||||
async def auth_metadata(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Returns the auth metadata dict
|
||||
"""
|
||||
return await self._issuer_metadata.get()
|
||||
|
||||
async def _introspection_endpoint(self) -> str:
|
||||
"""
|
||||
Returns the introspection endpoint of the issuer
|
||||
|
||||
@@ -231,6 +231,8 @@ class EventContentFields:
|
||||
ROOM_NAME: Final = "name"
|
||||
|
||||
MEMBERSHIP: Final = "membership"
|
||||
MEMBERSHIP_DISPLAYNAME: Final = "displayname"
|
||||
MEMBERSHIP_AVATAR_URL: Final = "avatar_url"
|
||||
|
||||
# Used in m.room.guest_access events.
|
||||
GUEST_ACCESS: Final = "guest_access"
|
||||
@@ -318,3 +320,8 @@ class ApprovalNoticeMedium:
|
||||
class Direction(enum.Enum):
|
||||
BACKWARDS = "b"
|
||||
FORWARDS = "f"
|
||||
|
||||
|
||||
class ProfileFields:
|
||||
DISPLAYNAME: Final = "displayname"
|
||||
AVATAR_URL: Final = "avatar_url"
|
||||
|
||||
@@ -87,8 +87,7 @@ class Codes(str, Enum):
|
||||
WEAK_PASSWORD = "M_WEAK_PASSWORD"
|
||||
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
|
||||
USER_DEACTIVATED = "M_USER_DEACTIVATED"
|
||||
# USER_LOCKED = "M_USER_LOCKED"
|
||||
USER_LOCKED = "ORG_MATRIX_MSC3939_USER_LOCKED"
|
||||
USER_LOCKED = "M_USER_LOCKED"
|
||||
NOT_YET_UPLOADED = "M_NOT_YET_UPLOADED"
|
||||
CANNOT_OVERWRITE_MEDIA = "M_CANNOT_OVERWRITE_MEDIA"
|
||||
|
||||
@@ -101,8 +100,9 @@ class Codes(str, Enum):
|
||||
# The account has been suspended on the server.
|
||||
# By opposition to `USER_DEACTIVATED`, this is a reversible measure
|
||||
# that can possibly be appealed and reverted.
|
||||
# Part of MSC3823.
|
||||
USER_ACCOUNT_SUSPENDED = "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED"
|
||||
# Introduced by MSC3823
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/3823
|
||||
USER_ACCOUNT_SUSPENDED = "M_USER_SUSPENDED"
|
||||
|
||||
BAD_ALIAS = "M_BAD_ALIAS"
|
||||
# For restricted join rules.
|
||||
@@ -132,6 +132,10 @@ class Codes(str, Enum):
|
||||
# connection.
|
||||
UNKNOWN_POS = "M_UNKNOWN_POS"
|
||||
|
||||
# Part of MSC4133
|
||||
PROFILE_TOO_LARGE = "M_PROFILE_TOO_LARGE"
|
||||
KEY_TOO_LARGE = "M_KEY_TOO_LARGE"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
"""An exception with integer code, a message string attributes and optional headers.
|
||||
|
||||
@@ -275,6 +275,7 @@ class Ratelimiter:
|
||||
update: bool = True,
|
||||
n_actions: int = 1,
|
||||
_time_now_s: Optional[float] = None,
|
||||
pause: Optional[float] = 0.5,
|
||||
) -> None:
|
||||
"""Checks if an action can be performed. If not, raises a LimitExceededError
|
||||
|
||||
@@ -298,6 +299,8 @@ class Ratelimiter:
|
||||
at all.
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
pause: Time in seconds to pause when an action is being limited. Defaults to 0.5
|
||||
to stop clients from "tight-looping" on retrying their request.
|
||||
|
||||
Raises:
|
||||
LimitExceededError: If an action could not be performed, along with the time in
|
||||
@@ -316,9 +319,8 @@ class Ratelimiter:
|
||||
)
|
||||
|
||||
if not allowed:
|
||||
# We pause for a bit here to stop clients from "tight-looping" on
|
||||
# retrying their request.
|
||||
await self.clock.sleep(0.5)
|
||||
if pause:
|
||||
await self.clock.sleep(pause)
|
||||
|
||||
raise LimitExceededError(
|
||||
limiter_name=self._limiter_name,
|
||||
|
||||
@@ -23,7 +23,8 @@
|
||||
|
||||
import hmac
|
||||
from hashlib import sha256
|
||||
from urllib.parse import urlencode
|
||||
from typing import Optional
|
||||
from urllib.parse import urlencode, urljoin
|
||||
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
@@ -66,3 +67,42 @@ class ConsentURIBuilder:
|
||||
urlencode({"u": user_id, "h": mac}),
|
||||
)
|
||||
return consent_uri
|
||||
|
||||
|
||||
class LoginSSORedirectURIBuilder:
|
||||
def __init__(self, hs_config: HomeServerConfig):
|
||||
self._public_baseurl = hs_config.server.public_baseurl
|
||||
|
||||
def build_login_sso_redirect_uri(
|
||||
self, *, idp_id: Optional[str], client_redirect_url: str
|
||||
) -> str:
|
||||
"""Build a `/login/sso/redirect` URI for the given identity provider.
|
||||
|
||||
Builds `/_matrix/client/v3/login/sso/redirect/{idpId}?redirectUrl=xxx` when `idp_id` is specified.
|
||||
Otherwise, builds `/_matrix/client/v3/login/sso/redirect?redirectUrl=xxx` when `idp_id` is `None`.
|
||||
|
||||
Args:
|
||||
idp_id: Optional ID of the identity provider
|
||||
client_redirect_url: URL to redirect the user to after login
|
||||
|
||||
Returns
|
||||
The URI to follow when choosing a specific identity provider.
|
||||
"""
|
||||
base_url = urljoin(
|
||||
self._public_baseurl,
|
||||
f"{CLIENT_API_PREFIX}/v3/login/sso/redirect",
|
||||
)
|
||||
|
||||
serialized_query_parameters = urlencode({"redirectUrl": client_redirect_url})
|
||||
|
||||
if idp_id:
|
||||
resultant_url = urljoin(
|
||||
# We have to add a trailing slash to the base URL to ensure that the
|
||||
# last path segment is not stripped away when joining with another path.
|
||||
f"{base_url}/",
|
||||
f"{idp_id}?{serialized_query_parameters}",
|
||||
)
|
||||
else:
|
||||
resultant_url = f"{base_url}?{serialized_query_parameters}"
|
||||
|
||||
return resultant_url
|
||||
|
||||
@@ -87,6 +87,7 @@ class ApplicationService:
|
||||
ip_range_whitelist: Optional[IPSet] = None,
|
||||
supports_ephemeral: bool = False,
|
||||
msc3202_transaction_extensions: bool = False,
|
||||
msc4190_device_management: bool = False,
|
||||
):
|
||||
self.token = token
|
||||
self.url = (
|
||||
@@ -100,6 +101,7 @@ class ApplicationService:
|
||||
self.ip_range_whitelist = ip_range_whitelist
|
||||
self.supports_ephemeral = supports_ephemeral
|
||||
self.msc3202_transaction_extensions = msc3202_transaction_extensions
|
||||
self.msc4190_device_management = msc4190_device_management
|
||||
|
||||
if "|" in self.id:
|
||||
raise Exception("application service ID cannot contain '|' character")
|
||||
|
||||
@@ -221,9 +221,13 @@ class Config:
|
||||
The number of milliseconds in the duration.
|
||||
|
||||
Raises:
|
||||
TypeError, if given something other than an integer or a string
|
||||
TypeError: if given something other than an integer or a string, or the
|
||||
duration is using an incorrect suffix.
|
||||
ValueError: if given a string not of the form described above.
|
||||
"""
|
||||
# For integers, we prefer to use `type(value) is int` instead of
|
||||
# `isinstance(value, int)` because we want to exclude subclasses of int, such as
|
||||
# bool.
|
||||
if type(value) is int: # noqa: E721
|
||||
return value
|
||||
elif isinstance(value, str):
|
||||
@@ -246,9 +250,20 @@ class Config:
|
||||
if suffix in sizes:
|
||||
value = value[:-1]
|
||||
size = sizes[suffix]
|
||||
elif suffix.isdigit():
|
||||
# No suffix is treated as milliseconds.
|
||||
value = value
|
||||
size = 1
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Bad duration suffix {value} (expected no suffix or one of these suffixes: {sizes.keys()})"
|
||||
)
|
||||
|
||||
return int(value) * size
|
||||
else:
|
||||
raise TypeError(f"Bad duration {value!r}")
|
||||
raise TypeError(
|
||||
f"Bad duration type {value!r} (expected int or string duration)"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def abspath(file_path: str) -> str:
|
||||
|
||||
@@ -183,6 +183,18 @@ def _load_appservice(
|
||||
"The `org.matrix.msc3202` option should be true or false if specified."
|
||||
)
|
||||
|
||||
# Opt-in flag for the MSC4190 behaviours.
|
||||
# When enabled, the following C-S API endpoints change for appservices:
|
||||
# - POST /register does not return an access token
|
||||
# - PUT /devices/{device_id} creates a new device if one does not exist
|
||||
# - DELETE /devices/{device_id} no longer requires UIA
|
||||
# - POST /delete_devices/{device_id} no longer requires UIA
|
||||
msc4190_enabled = as_info.get("io.element.msc4190", False)
|
||||
if not isinstance(msc4190_enabled, bool):
|
||||
raise ValueError(
|
||||
"The `io.element.msc4190` option should be true or false if specified."
|
||||
)
|
||||
|
||||
return ApplicationService(
|
||||
token=as_info["as_token"],
|
||||
url=as_info["url"],
|
||||
@@ -195,4 +207,5 @@ def _load_appservice(
|
||||
ip_range_whitelist=ip_range_whitelist,
|
||||
supports_ephemeral=supports_ephemeral,
|
||||
msc3202_transaction_extensions=msc3202_transaction_extensions,
|
||||
msc4190_device_management=msc4190_enabled,
|
||||
)
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#
|
||||
#
|
||||
|
||||
from typing import Any, List
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from synapse.config.sso import SsoAttributeRequirement
|
||||
from synapse.types import JsonDict
|
||||
@@ -46,7 +46,9 @@ class CasConfig(Config):
|
||||
|
||||
# TODO Update this to a _synapse URL.
|
||||
public_baseurl = self.root.server.public_baseurl
|
||||
self.cas_service_url = public_baseurl + "_matrix/client/r0/login/cas/ticket"
|
||||
self.cas_service_url: Optional[str] = (
|
||||
public_baseurl + "_matrix/client/r0/login/cas/ticket"
|
||||
)
|
||||
|
||||
self.cas_protocol_version = cas_config.get("protocol_version")
|
||||
if (
|
||||
|
||||
@@ -110,6 +110,7 @@ class EmailConfig(Config):
|
||||
raise ConfigError(
|
||||
"email.require_transport_security requires email.enable_tls to be true"
|
||||
)
|
||||
self.email_tlsname = email_config.get("tlsname", None)
|
||||
|
||||
if "app_name" in email_config:
|
||||
self.email_app_name = email_config["app_name"]
|
||||
|
||||
@@ -365,11 +365,6 @@ class ExperimentalConfig(Config):
|
||||
# MSC3874: Filtering /messages with rel_types / not_rel_types.
|
||||
self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False)
|
||||
|
||||
# MSC3886: Simple client rendezvous capability
|
||||
self.msc3886_endpoint: Optional[str] = experimental.get(
|
||||
"msc3886_endpoint", None
|
||||
)
|
||||
|
||||
# MSC3890: Remotely silence local notifications
|
||||
# Note: This option requires "experimental_features.msc3391_enabled" to be
|
||||
# set to "true", in order to communicate account data deletions to clients.
|
||||
@@ -441,12 +436,14 @@ class ExperimentalConfig(Config):
|
||||
("experimental", "msc4108_delegation_endpoint"),
|
||||
)
|
||||
|
||||
self.msc3823_account_suspension = experimental.get(
|
||||
"msc3823_account_suspension", False
|
||||
)
|
||||
|
||||
# MSC4151: Report room API (Client-Server API)
|
||||
self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
|
||||
# MSC4133: Custom profile fields
|
||||
self.msc4133_enabled: bool = experimental.get("msc4133_enabled", False)
|
||||
|
||||
# MSC4210: Remove legacy mentions
|
||||
self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False)
|
||||
|
||||
# MSC4222: Adding `state_after` to sync v2
|
||||
self.msc4222_enabled: bool = experimental.get("msc4222_enabled", False)
|
||||
|
||||
# MSC4076: Add `disable_badge_count`` to pusher configuration
|
||||
self.msc4076_enabled: bool = experimental.get("msc4076_enabled", False)
|
||||
|
||||
@@ -43,7 +43,7 @@ from unpaddedbase64 import decode_base64
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.stringutils import random_string, random_string_with_symbols
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
from ._base import Config, ConfigError, read_file
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from signedjson.key import VerifyKeyWithExpiry
|
||||
@@ -91,6 +91,11 @@ To suppress this warning and continue using 'matrix.org', admins should set
|
||||
'suppress_key_server_warning' to 'true' in homeserver.yaml.
|
||||
--------------------------------------------------------------------------------"""
|
||||
|
||||
CONFLICTING_MACAROON_SECRET_KEY_OPTS_ERROR = """\
|
||||
Conflicting options 'macaroon_secret_key' and 'macaroon_secret_key_path' are
|
||||
both defined in config file.
|
||||
"""
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -166,10 +171,16 @@ class KeyConfig(Config):
|
||||
)
|
||||
)
|
||||
|
||||
macaroon_secret_key: Optional[str] = config.get(
|
||||
"macaroon_secret_key", self.root.registration.registration_shared_secret
|
||||
)
|
||||
|
||||
macaroon_secret_key = config.get("macaroon_secret_key")
|
||||
macaroon_secret_key_path = config.get("macaroon_secret_key_path")
|
||||
if macaroon_secret_key_path:
|
||||
if macaroon_secret_key:
|
||||
raise ConfigError(CONFLICTING_MACAROON_SECRET_KEY_OPTS_ERROR)
|
||||
macaroon_secret_key = read_file(
|
||||
macaroon_secret_key_path, "macaroon_secret_key_path"
|
||||
).strip()
|
||||
if not macaroon_secret_key:
|
||||
macaroon_secret_key = self.root.registration.registration_shared_secret
|
||||
if not macaroon_secret_key:
|
||||
# Unfortunately, there are people out there that don't have this
|
||||
# set. Lets just be "nice" and derive one from their secret key.
|
||||
|
||||
@@ -360,5 +360,6 @@ def setup_logging(
|
||||
"Licensed under the AGPL 3.0 license. Website: https://github.com/element-hq/synapse"
|
||||
)
|
||||
logging.info("Server hostname: %s", config.server.server_name)
|
||||
logging.info("Public Base URL: %s", config.server.public_baseurl)
|
||||
logging.info("Instance name: %s", hs.get_instance_name())
|
||||
logging.info("Twisted reactor: %s", type(reactor).__name__)
|
||||
|
||||
@@ -228,3 +228,9 @@ class RatelimitConfig(Config):
|
||||
config.get("remote_media_download_burst_count", "500M")
|
||||
),
|
||||
)
|
||||
|
||||
self.rc_presence_per_user = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_presence.per_user",
|
||||
defaults={"per_second": 0.1, "burst_count": 1},
|
||||
)
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, Dict, List, Tuple
|
||||
from urllib.request import getproxies_environment # type: ignore
|
||||
from urllib.request import getproxies_environment
|
||||
|
||||
import attr
|
||||
|
||||
@@ -272,9 +272,7 @@ class ContentRepositoryConfig(Config):
|
||||
remote_media_lifetime
|
||||
)
|
||||
|
||||
self.enable_authenticated_media = config.get(
|
||||
"enable_authenticated_media", False
|
||||
)
|
||||
self.enable_authenticated_media = config.get("enable_authenticated_media", True)
|
||||
|
||||
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
|
||||
assert data_dir_path is not None
|
||||
|
||||
@@ -215,9 +215,6 @@ class HttpListenerConfig:
|
||||
additional_resources: Dict[str, dict] = attr.Factory(dict)
|
||||
tag: Optional[str] = None
|
||||
request_id_header: Optional[str] = None
|
||||
# If true, the listener will return CORS response headers compatible with MSC3886:
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/3886
|
||||
experimental_cors_msc3886: bool = False
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
@@ -335,8 +332,14 @@ class ServerConfig(Config):
|
||||
logger.info("Using default public_baseurl %s", public_baseurl)
|
||||
else:
|
||||
self.serve_client_wellknown = True
|
||||
# Ensure that public_baseurl ends with a trailing slash
|
||||
if public_baseurl[-1] != "/":
|
||||
public_baseurl += "/"
|
||||
|
||||
# Scrutinize user-provided config
|
||||
if not isinstance(public_baseurl, str):
|
||||
raise ConfigError("Must be a string", ("public_baseurl",))
|
||||
|
||||
self.public_baseurl = public_baseurl
|
||||
|
||||
# check that public_baseurl is valid
|
||||
@@ -1004,7 +1007,6 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
||||
additional_resources=listener.get("additional_resources", {}),
|
||||
tag=listener.get("tag"),
|
||||
request_id_header=listener.get("request_id_header"),
|
||||
experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
|
||||
)
|
||||
|
||||
if socket_path:
|
||||
|
||||
@@ -566,6 +566,7 @@ def _is_membership_change_allowed(
|
||||
logger.debug(
|
||||
"_is_membership_change_allowed: %s",
|
||||
{
|
||||
"caller_membership": caller.membership if caller else None,
|
||||
"caller_in_room": caller_in_room,
|
||||
"caller_invited": caller_invited,
|
||||
"caller_knocked": caller_knocked,
|
||||
@@ -677,7 +678,8 @@ def _is_membership_change_allowed(
|
||||
and join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
)
|
||||
):
|
||||
if not caller_in_room and not caller_invited:
|
||||
# You can only join the room if you are invited or are already in the room.
|
||||
if not (caller_in_room or caller_invited):
|
||||
raise AuthError(403, "You are not invited to this room.")
|
||||
else:
|
||||
# TODO (erikj): may_join list
|
||||
|
||||
@@ -42,7 +42,7 @@ import attr
|
||||
from typing_extensions import Literal
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from synapse.api.constants import RelationTypes
|
||||
from synapse.api.constants import EventTypes, RelationTypes
|
||||
from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
|
||||
from synapse.synapse_rust.events import EventInternalMetadata
|
||||
from synapse.types import JsonDict, StrCollection
|
||||
@@ -325,12 +325,17 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
def __repr__(self) -> str:
|
||||
rejection = f"REJECTED={self.rejected_reason}, " if self.rejected_reason else ""
|
||||
|
||||
conditional_membership_string = ""
|
||||
if self.get("type") == EventTypes.Member:
|
||||
conditional_membership_string = f"membership={self.membership}, "
|
||||
|
||||
return (
|
||||
f"<{self.__class__.__name__} "
|
||||
f"{rejection}"
|
||||
f"event_id={self.event_id}, "
|
||||
f"type={self.get('type')}, "
|
||||
f"state_key={self.get('state_key')}, "
|
||||
f"{conditional_membership_string}"
|
||||
f"outlier={self.internal_metadata.is_outlier()}"
|
||||
">"
|
||||
)
|
||||
|
||||
@@ -66,50 +66,67 @@ class InviteAutoAccepter:
|
||||
event: The incoming event.
|
||||
"""
|
||||
# Check if the event is an invite for a local user.
|
||||
is_invite_for_local_user = (
|
||||
event.type == EventTypes.Member
|
||||
and event.is_state()
|
||||
and event.membership == Membership.INVITE
|
||||
and self._api.is_mine(event.state_key)
|
||||
)
|
||||
if (
|
||||
event.type != EventTypes.Member
|
||||
or event.is_state() is False
|
||||
or event.membership != Membership.INVITE
|
||||
or self._api.is_mine(event.state_key) is False
|
||||
):
|
||||
return
|
||||
|
||||
# Only accept invites for direct messages if the configuration mandates it.
|
||||
is_direct_message = event.content.get("is_direct", False)
|
||||
is_allowed_by_direct_message_rules = (
|
||||
not self._config.accept_invites_only_for_direct_messages
|
||||
or is_direct_message is True
|
||||
)
|
||||
if (
|
||||
self._config.accept_invites_only_for_direct_messages
|
||||
and is_direct_message is False
|
||||
):
|
||||
return
|
||||
|
||||
# Only accept invites from remote users if the configuration mandates it.
|
||||
is_from_local_user = self._api.is_mine(event.sender)
|
||||
is_allowed_by_local_user_rules = (
|
||||
not self._config.accept_invites_only_from_local_users
|
||||
or is_from_local_user is True
|
||||
if (
|
||||
self._config.accept_invites_only_from_local_users
|
||||
and is_from_local_user is False
|
||||
):
|
||||
return
|
||||
|
||||
# Check the user is activated.
|
||||
recipient = await self._api.get_userinfo_by_id(event.state_key)
|
||||
|
||||
# Ignore if the user doesn't exist.
|
||||
if recipient is None:
|
||||
return
|
||||
|
||||
# Never accept invites for deactivated users.
|
||||
if recipient.is_deactivated:
|
||||
return
|
||||
|
||||
# Never accept invites for suspended users.
|
||||
if recipient.suspended:
|
||||
return
|
||||
|
||||
# Never accept invites for locked users.
|
||||
if recipient.locked:
|
||||
return
|
||||
|
||||
# Make the user join the room. We run this as a background process to circumvent a race condition
|
||||
# that occurs when responding to invites over federation (see https://github.com/matrix-org/synapse-auto-accept-invite/issues/12)
|
||||
run_as_background_process(
|
||||
"retry_make_join",
|
||||
self._retry_make_join,
|
||||
event.state_key,
|
||||
event.state_key,
|
||||
event.room_id,
|
||||
"join",
|
||||
bg_start_span=False,
|
||||
)
|
||||
|
||||
if (
|
||||
is_invite_for_local_user
|
||||
and is_allowed_by_direct_message_rules
|
||||
and is_allowed_by_local_user_rules
|
||||
):
|
||||
# Make the user join the room. We run this as a background process to circumvent a race condition
|
||||
# that occurs when responding to invites over federation (see https://github.com/matrix-org/synapse-auto-accept-invite/issues/12)
|
||||
run_as_background_process(
|
||||
"retry_make_join",
|
||||
self._retry_make_join,
|
||||
event.state_key,
|
||||
event.state_key,
|
||||
event.room_id,
|
||||
"join",
|
||||
bg_start_span=False,
|
||||
if is_direct_message:
|
||||
# Mark this room as a direct message!
|
||||
await self._mark_room_as_direct_message(
|
||||
event.state_key, event.sender, event.room_id
|
||||
)
|
||||
|
||||
if is_direct_message:
|
||||
# Mark this room as a direct message!
|
||||
await self._mark_room_as_direct_message(
|
||||
event.state_key, event.sender, event.room_id
|
||||
)
|
||||
|
||||
async def _mark_room_as_direct_message(
|
||||
self, user_id: str, dm_user_id: str, room_id: str
|
||||
) -> None:
|
||||
|
||||
@@ -24,7 +24,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
import attr
|
||||
from signedjson.types import SigningKey
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH
|
||||
from synapse.api.constants import MAX_DEPTH, EventTypes
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_EVENT_FORMAT_VERSIONS,
|
||||
EventFormatVersions,
|
||||
@@ -109,6 +109,19 @@ class EventBuilder:
|
||||
def is_state(self) -> bool:
|
||||
return self._state_key is not None
|
||||
|
||||
def is_mine_id(self, user_id: str) -> bool:
|
||||
"""Determines whether a user ID or room alias originates from this homeserver.
|
||||
|
||||
Returns:
|
||||
`True` if the hostname part of the user ID or room alias matches this
|
||||
homeserver.
|
||||
`False` otherwise, or if the user ID or room alias is malformed.
|
||||
"""
|
||||
localpart_hostname = user_id.split(":", 1)
|
||||
if len(localpart_hostname) < 2:
|
||||
return False
|
||||
return localpart_hostname[1] == self._hostname
|
||||
|
||||
async def build(
|
||||
self,
|
||||
prev_event_ids: List[str],
|
||||
@@ -142,6 +155,46 @@ class EventBuilder:
|
||||
self, state_ids
|
||||
)
|
||||
|
||||
# Check for out-of-band membership that may have been exposed on `/sync` but
|
||||
# the events have not been de-outliered yet so they won't be part of the
|
||||
# room state yet.
|
||||
#
|
||||
# This helps in situations where a remote homeserver invites a local user to
|
||||
# a room that we're already participating in; and we've persisted the invite
|
||||
# as an out-of-band membership (outlier), but it hasn't been pushed to us as
|
||||
# part of a `/send` transaction yet and de-outliered. This also helps for
|
||||
# any of the other out-of-band membership transitions.
|
||||
#
|
||||
# As an optimization, we could check if the room state already includes a
|
||||
# non-`leave` membership event, then we can assume the membership event has
|
||||
# been de-outliered and we don't need to check for an out-of-band
|
||||
# membership. But we don't have the necessary information from a
|
||||
# `StateMap[str]` and we'll just have to take the hit of this extra lookup
|
||||
# for any membership event for now.
|
||||
if self.type == EventTypes.Member and self.is_mine_id(self.state_key):
|
||||
(
|
||||
_membership,
|
||||
member_event_id,
|
||||
) = await self._store.get_local_current_membership_for_user_in_room(
|
||||
user_id=self.state_key,
|
||||
room_id=self.room_id,
|
||||
)
|
||||
# There is no need to check if the membership is actually an
|
||||
# out-of-band membership (`outlier`) as we would end up with the
|
||||
# same result either way (adding the member event to the
|
||||
# `auth_event_ids`).
|
||||
if (
|
||||
member_event_id is not None
|
||||
# We only need to be careful about duplicating the event in the
|
||||
# `auth_event_ids` list (duplicate `type`/`state_key` is part of the
|
||||
# authorization rules)
|
||||
and member_event_id not in auth_event_ids
|
||||
):
|
||||
auth_event_ids.append(member_event_id)
|
||||
# Also make sure to point to the previous membership event that will
|
||||
# allow this one to happen so the computed state works out.
|
||||
prev_event_ids.append(member_event_id)
|
||||
|
||||
format_version = self.room_version.event_format
|
||||
# The types of auth/prev events changes between event versions.
|
||||
prev_events: Union[StrCollection, List[Tuple[str, Dict[str, str]]]]
|
||||
|
||||
@@ -248,7 +248,7 @@ class EventContext(UnpersistedEventContextBase):
|
||||
@tag_args
|
||||
async def get_current_state_ids(
|
||||
self, state_filter: Optional["StateFilter"] = None
|
||||
) -> Optional[StateMap[str]]:
|
||||
) -> StateMap[str]:
|
||||
"""
|
||||
Gets the room state map, including this event - ie, the state in ``state_group``
|
||||
|
||||
@@ -256,13 +256,12 @@ class EventContext(UnpersistedEventContextBase):
|
||||
not make it into the room state. This method will raise an exception if
|
||||
``rejected`` is set.
|
||||
|
||||
It is also an error to access this for an outlier event.
|
||||
|
||||
Arg:
|
||||
state_filter: specifies the type of state event to fetch from DB, example: EventTypes.JoinRules
|
||||
|
||||
Returns:
|
||||
Returns None if state_group is None, which happens when the associated
|
||||
event is an outlier.
|
||||
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
@@ -300,7 +299,8 @@ class EventContext(UnpersistedEventContextBase):
|
||||
this tuple.
|
||||
"""
|
||||
|
||||
assert self.state_group_before_event is not None
|
||||
if self.state_group_before_event is None:
|
||||
return {}
|
||||
return await self._storage.state.get_state_ids_for_group(
|
||||
self.state_group_before_event, state_filter
|
||||
)
|
||||
|
||||
@@ -140,7 +140,6 @@ from typing import (
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
@@ -170,7 +169,13 @@ from synapse.metrics.background_process_metrics import (
|
||||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
ReadReceipt,
|
||||
RoomStreamToken,
|
||||
StrCollection,
|
||||
get_domain_from_id,
|
||||
)
|
||||
from synapse.util import Clock
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import filter_destinations_by_retry_limiter
|
||||
@@ -297,12 +302,10 @@ class _DestinationWakeupQueue:
|
||||
# being woken up.
|
||||
_MAX_TIME_IN_QUEUE = 30.0
|
||||
|
||||
# The maximum duration in seconds between waking up consecutive destination
|
||||
# queues.
|
||||
_MAX_DELAY = 0.1
|
||||
|
||||
sender: "FederationSender" = attr.ib()
|
||||
clock: Clock = attr.ib()
|
||||
max_delay_s: int = attr.ib()
|
||||
|
||||
queue: "OrderedDict[str, Literal[None]]" = attr.ib(factory=OrderedDict)
|
||||
processing: bool = attr.ib(default=False)
|
||||
|
||||
@@ -332,7 +335,7 @@ class _DestinationWakeupQueue:
|
||||
# We also add an upper bound to the delay, to gracefully handle the
|
||||
# case where the queue only has a few entries in it.
|
||||
current_sleep_seconds = min(
|
||||
self._MAX_DELAY, self._MAX_TIME_IN_QUEUE / len(self.queue)
|
||||
self.max_delay_s, self._MAX_TIME_IN_QUEUE / len(self.queue)
|
||||
)
|
||||
|
||||
while self.queue:
|
||||
@@ -416,19 +419,14 @@ class FederationSender(AbstractFederationSender):
|
||||
self._is_processing = False
|
||||
self._last_poked_id = -1
|
||||
|
||||
# map from room_id to a set of PerDestinationQueues which we believe are
|
||||
# awaiting a call to flush_read_receipts_for_room. The presence of an entry
|
||||
# here for a given room means that we are rate-limiting RR flushes to that room,
|
||||
# and that there is a pending call to _flush_rrs_for_room in the system.
|
||||
self._queues_awaiting_rr_flush_by_room: Dict[str, Set[PerDestinationQueue]] = {}
|
||||
|
||||
self._rr_txn_interval_per_room_ms = (
|
||||
1000.0
|
||||
/ hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
|
||||
)
|
||||
|
||||
self._external_cache = hs.get_external_cache()
|
||||
self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock)
|
||||
|
||||
rr_txn_interval_per_room_s = (
|
||||
1.0 / hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
|
||||
)
|
||||
self._destination_wakeup_queue = _DestinationWakeupQueue(
|
||||
self, self.clock, max_delay_s=rr_txn_interval_per_room_s
|
||||
)
|
||||
|
||||
# Regularly wake up destinations that have outstanding PDUs to be caught up
|
||||
self.clock.looping_call_now(
|
||||
@@ -745,37 +743,48 @@ class FederationSender(AbstractFederationSender):
|
||||
|
||||
# Some background on the rate-limiting going on here.
|
||||
#
|
||||
# It turns out that if we attempt to send out RRs as soon as we get them from
|
||||
# a client, then we end up trying to do several hundred Hz of federation
|
||||
# transactions. (The number of transactions scales as O(N^2) on the size of a
|
||||
# room, since in a large room we have both more RRs coming in, and more servers
|
||||
# to send them to.)
|
||||
# It turns out that if we attempt to send out RRs as soon as we get them
|
||||
# from a client, then we end up trying to do several hundred Hz of
|
||||
# federation transactions. (The number of transactions scales as O(N^2)
|
||||
# on the size of a room, since in a large room we have both more RRs
|
||||
# coming in, and more servers to send them to.)
|
||||
#
|
||||
# This leads to a lot of CPU load, and we end up getting behind. The solution
|
||||
# currently adopted is as follows:
|
||||
# This leads to a lot of CPU load, and we end up getting behind. The
|
||||
# solution currently adopted is to differentiate between receipts and
|
||||
# destinations we should immediately send to, and those we can trickle
|
||||
# the receipts to.
|
||||
#
|
||||
# The first receipt in a given room is sent out immediately, at time T0. Any
|
||||
# further receipts are, in theory, batched up for N seconds, where N is calculated
|
||||
# based on the number of servers in the room to achieve a transaction frequency
|
||||
# of around 50Hz. So, for example, if there were 100 servers in the room, then
|
||||
# N would be 100 / 50Hz = 2 seconds.
|
||||
# The current logic is to send receipts out immediately if:
|
||||
# - the room is "small", i.e. there's only N servers to send receipts
|
||||
# to, and so sending out the receipts immediately doesn't cause too
|
||||
# much load; or
|
||||
# - the receipt is for an event that happened recently, as users
|
||||
# notice if receipts are delayed when they know other users are
|
||||
# currently reading the room; or
|
||||
# - the receipt is being sent to the server that sent the event, so
|
||||
# that users see receipts for their own receipts quickly.
|
||||
#
|
||||
# Then, after T+N, we flush out any receipts that have accumulated, and restart
|
||||
# the timer to flush out more receipts at T+2N, etc. If no receipts accumulate,
|
||||
# we stop the cycle and go back to the start.
|
||||
# For destinations that we should delay sending the receipt to, we queue
|
||||
# the receipts up to be sent in the next transaction, but don't trigger
|
||||
# a new transaction to be sent. We then add the destination to the
|
||||
# `DestinationWakeupQueue`, which will slowly iterate over each
|
||||
# destination and trigger a new transaction to be sent.
|
||||
#
|
||||
# However, in practice, it is often possible to flush out receipts earlier: in
|
||||
# particular, if we are sending a transaction to a given server anyway (for
|
||||
# example, because we have a PDU or a RR in another room to send), then we may
|
||||
# as well send out all of the pending RRs for that server. So it may be that
|
||||
# by the time we get to T+N, we don't actually have any RRs left to send out.
|
||||
# Nevertheless we continue to buffer up RRs for the room in question until we
|
||||
# reach the point that no RRs arrive between timer ticks.
|
||||
# However, in practice, it is often possible to send out delayed
|
||||
# receipts earlier: in particular, if we are sending a transaction to a
|
||||
# given server anyway (for example, because we have a PDU or a RR in
|
||||
# another room to send), then we may as well send out all of the pending
|
||||
# RRs for that server. So it may be that by the time we get to waking up
|
||||
# the destination, we don't actually have any RRs left to send out.
|
||||
#
|
||||
# For even more background, see https://github.com/matrix-org/synapse/issues/4730.
|
||||
# For even more background, see
|
||||
# https://github.com/matrix-org/synapse/issues/4730.
|
||||
|
||||
room_id = receipt.room_id
|
||||
|
||||
# Local read receipts always have 1 event ID.
|
||||
event_id = receipt.event_ids[0]
|
||||
|
||||
# Work out which remote servers should be poked and poke them.
|
||||
domains_set = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
|
||||
room_id
|
||||
@@ -797,49 +806,51 @@ class FederationSender(AbstractFederationSender):
|
||||
if not domains:
|
||||
return
|
||||
|
||||
queues_pending_flush = self._queues_awaiting_rr_flush_by_room.get(room_id)
|
||||
# We now split which domains we want to wake up immediately vs which we
|
||||
# want to delay waking up.
|
||||
immediate_domains: StrCollection
|
||||
delay_domains: StrCollection
|
||||
|
||||
# if there is no flush yet scheduled, we will send out these receipts with
|
||||
# immediate flushes, and schedule the next flush for this room.
|
||||
if queues_pending_flush is not None:
|
||||
logger.debug("Queuing receipt for: %r", domains)
|
||||
if len(domains) < 10:
|
||||
# For "small" rooms send to all domains immediately
|
||||
immediate_domains = domains
|
||||
delay_domains = ()
|
||||
else:
|
||||
logger.debug("Sending receipt to: %r", domains)
|
||||
self._schedule_rr_flush_for_room(room_id, len(domains))
|
||||
metadata = await self.store.get_metadata_for_event(
|
||||
receipt.room_id, event_id
|
||||
)
|
||||
assert metadata is not None
|
||||
|
||||
for domain in domains:
|
||||
sender_domain = get_domain_from_id(metadata.sender)
|
||||
|
||||
if self.clock.time_msec() - metadata.received_ts < 60_000:
|
||||
# We always send receipts for recent messages immediately
|
||||
immediate_domains = domains
|
||||
delay_domains = ()
|
||||
else:
|
||||
# Otherwise, we delay waking up all destinations except for the
|
||||
# sender's domain.
|
||||
immediate_domains = []
|
||||
delay_domains = []
|
||||
for domain in domains:
|
||||
if domain == sender_domain:
|
||||
immediate_domains.append(domain)
|
||||
else:
|
||||
delay_domains.append(domain)
|
||||
|
||||
for domain in immediate_domains:
|
||||
# Add to destination queue and wake the destination up
|
||||
queue = self._get_per_destination_queue(domain)
|
||||
queue.queue_read_receipt(receipt)
|
||||
queue.attempt_new_transaction()
|
||||
|
||||
for domain in delay_domains:
|
||||
# Add to destination queue...
|
||||
queue = self._get_per_destination_queue(domain)
|
||||
queue.queue_read_receipt(receipt)
|
||||
|
||||
# if there is already a RR flush pending for this room, then make sure this
|
||||
# destination is registered for the flush
|
||||
if queues_pending_flush is not None:
|
||||
queues_pending_flush.add(queue)
|
||||
else:
|
||||
queue.flush_read_receipts_for_room(room_id)
|
||||
|
||||
def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None:
|
||||
# that is going to cause approximately len(domains) transactions, so now back
|
||||
# off for that multiplied by RR_TXN_INTERVAL_PER_ROOM
|
||||
backoff_ms = self._rr_txn_interval_per_room_ms * n_domains
|
||||
|
||||
logger.debug("Scheduling RR flush in %s in %d ms", room_id, backoff_ms)
|
||||
self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id)
|
||||
self._queues_awaiting_rr_flush_by_room[room_id] = set()
|
||||
|
||||
def _flush_rrs_for_room(self, room_id: str) -> None:
|
||||
queues = self._queues_awaiting_rr_flush_by_room.pop(room_id)
|
||||
logger.debug("Flushing RRs in %s to %s", room_id, queues)
|
||||
|
||||
if not queues:
|
||||
# no more RRs arrived for this room; we are done.
|
||||
return
|
||||
|
||||
# schedule the next flush
|
||||
self._schedule_rr_flush_for_room(room_id, len(queues))
|
||||
|
||||
for queue in queues:
|
||||
queue.flush_read_receipts_for_room(room_id)
|
||||
# ... and schedule the destination to be woken up.
|
||||
self._destination_wakeup_queue.add_to_queue(domain)
|
||||
|
||||
async def send_presence_to_destinations(
|
||||
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
||||
|
||||
@@ -156,7 +156,6 @@ class PerDestinationQueue:
|
||||
# Each receipt can only have a single receipt per
|
||||
# (room ID, receipt type, user ID, thread ID) tuple.
|
||||
self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = []
|
||||
self._rrs_pending_flush = False
|
||||
|
||||
# stream_id of last successfully sent to-device message.
|
||||
# NB: may be a long or an int.
|
||||
@@ -258,15 +257,7 @@ class PerDestinationQueue:
|
||||
}
|
||||
)
|
||||
|
||||
def flush_read_receipts_for_room(self, room_id: str) -> None:
|
||||
# If there are any pending receipts for this room then force-flush them
|
||||
# in a new transaction.
|
||||
for edu in self._pending_receipt_edus:
|
||||
if room_id in edu:
|
||||
self._rrs_pending_flush = True
|
||||
self.attempt_new_transaction()
|
||||
# No use in checking remaining EDUs if the room was found.
|
||||
break
|
||||
self.mark_new_data()
|
||||
|
||||
def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
|
||||
self._pending_edus_keyed[(edu.edu_type, key)] = edu
|
||||
@@ -603,12 +594,9 @@ class PerDestinationQueue:
|
||||
self._destination, last_successful_stream_ordering
|
||||
)
|
||||
|
||||
def _get_receipt_edus(self, force_flush: bool, limit: int) -> Iterable[Edu]:
|
||||
def _get_receipt_edus(self, limit: int) -> Iterable[Edu]:
|
||||
if not self._pending_receipt_edus:
|
||||
return
|
||||
if not force_flush and not self._rrs_pending_flush:
|
||||
# not yet time for this lot
|
||||
return
|
||||
|
||||
# Send at most limit EDUs for receipts.
|
||||
for content in self._pending_receipt_edus[:limit]:
|
||||
@@ -747,7 +735,7 @@ class _TransactionQueueManager:
|
||||
)
|
||||
|
||||
# Add read receipt EDUs.
|
||||
pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
|
||||
pending_edus.extend(self.queue._get_receipt_edus(limit=5))
|
||||
edu_limit = MAX_EDUS_PER_TRANSACTION - len(pending_edus)
|
||||
|
||||
# Next, prioritize to-device messages so that existing encryption channels
|
||||
@@ -795,13 +783,6 @@ class _TransactionQueueManager:
|
||||
if not self._pdus and not pending_edus:
|
||||
return [], []
|
||||
|
||||
# if we've decided to send a transaction anyway, and we have room, we
|
||||
# may as well send any pending RRs
|
||||
if edu_limit:
|
||||
pending_edus.extend(
|
||||
self.queue._get_receipt_edus(force_flush=True, limit=edu_limit)
|
||||
)
|
||||
|
||||
if self._pdus:
|
||||
self._last_stream_ordering = self._pdus[
|
||||
-1
|
||||
|
||||
@@ -509,6 +509,9 @@ class FederationV2InviteServlet(BaseFederationServerServlet):
|
||||
event = content["event"]
|
||||
invite_room_state = content.get("invite_room_state", [])
|
||||
|
||||
if not isinstance(invite_room_state, list):
|
||||
invite_room_state = []
|
||||
|
||||
# Synapse expects invite_room_state to be in unsigned, as it is in v1
|
||||
# API
|
||||
|
||||
|
||||
@@ -124,6 +124,7 @@ class AdminHandler:
|
||||
"consent_ts": user_info.consent_ts,
|
||||
"user_type": user_info.user_type,
|
||||
"is_guest": user_info.is_guest,
|
||||
"suspended": user_info.suspended,
|
||||
}
|
||||
|
||||
if self._msc3866_enabled:
|
||||
@@ -472,7 +473,7 @@ class AdminHandler:
|
||||
"type": EventTypes.Redaction,
|
||||
"content": {"reason": reason} if reason else {},
|
||||
"room_id": room,
|
||||
"sender": user_id,
|
||||
"sender": requester.user.to_string(),
|
||||
}
|
||||
if room_version.updated_redaction_rules:
|
||||
event_dict["content"]["redacts"] = event.event_id
|
||||
|
||||
@@ -896,10 +896,10 @@ class ApplicationServicesHandler:
|
||||
results = await make_deferred_yieldable(
|
||||
defer.DeferredList(
|
||||
[
|
||||
run_in_background(
|
||||
run_in_background( # type: ignore[call-overload]
|
||||
self.appservice_api.claim_client_keys,
|
||||
# We know this must be an app service.
|
||||
self.store.get_app_service_by_id(service_id), # type: ignore[arg-type]
|
||||
self.store.get_app_service_by_id(service_id),
|
||||
service_query,
|
||||
)
|
||||
for service_id, service_query in query_by_appservice.items()
|
||||
@@ -952,10 +952,10 @@ class ApplicationServicesHandler:
|
||||
results = await make_deferred_yieldable(
|
||||
defer.DeferredList(
|
||||
[
|
||||
run_in_background(
|
||||
run_in_background( # type: ignore[call-overload]
|
||||
self.appservice_api.query_keys,
|
||||
# We know this must be an app service.
|
||||
self.store.get_app_service_by_id(service_id), # type: ignore[arg-type]
|
||||
self.store.get_app_service_by_id(service_id),
|
||||
service_query,
|
||||
)
|
||||
for service_id, service_query in query_by_appservice.items()
|
||||
|
||||
@@ -729,6 +729,40 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
|
||||
await self.notify_device_update(user_id, device_ids)
|
||||
|
||||
async def upsert_device(
|
||||
self, user_id: str, device_id: str, display_name: Optional[str] = None
|
||||
) -> bool:
|
||||
"""Create or update a device
|
||||
|
||||
Args:
|
||||
user_id: The user to update devices of.
|
||||
device_id: The device to update.
|
||||
display_name: The new display name for this device.
|
||||
|
||||
Returns:
|
||||
True if the device was created, False if it was updated.
|
||||
|
||||
"""
|
||||
|
||||
# Reject a new displayname which is too long.
|
||||
self._check_device_name_length(display_name)
|
||||
|
||||
created = await self.store.store_device(
|
||||
user_id,
|
||||
device_id,
|
||||
initial_device_display_name=display_name,
|
||||
)
|
||||
|
||||
if not created:
|
||||
await self.store.update_device(
|
||||
user_id,
|
||||
device_id,
|
||||
new_display_name=display_name,
|
||||
)
|
||||
|
||||
await self.notify_device_update(user_id, [device_id])
|
||||
return created
|
||||
|
||||
async def update_device(self, user_id: str, device_id: str, content: dict) -> None:
|
||||
"""Update the given device
|
||||
|
||||
|
||||
@@ -39,6 +39,8 @@ from synapse.replication.http.devices import ReplicationUploadKeysForUserRestSer
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
ScheduledTask,
|
||||
TaskStatus,
|
||||
UserID,
|
||||
get_domain_from_id,
|
||||
get_verify_key_from_cross_signing_key,
|
||||
@@ -70,6 +72,7 @@ class E2eKeysHandler:
|
||||
self.is_mine = hs.is_mine
|
||||
self.clock = hs.get_clock()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
self._task_scheduler = hs.get_task_scheduler()
|
||||
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
||||
@@ -116,6 +119,10 @@ class E2eKeysHandler:
|
||||
hs.config.experimental.msc3984_appservice_key_query
|
||||
)
|
||||
|
||||
self._task_scheduler.register_action(
|
||||
self._delete_old_one_time_keys_task, "delete_old_otks"
|
||||
)
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def query_devices(
|
||||
@@ -615,7 +622,7 @@ class E2eKeysHandler:
|
||||
3. Attempt to fetch fallback keys from the database.
|
||||
|
||||
Args:
|
||||
local_query: An iterable of tuples of (user ID, device ID, algorithm).
|
||||
local_query: An iterable of tuples of (user ID, device ID, algorithm, number of keys).
|
||||
always_include_fallback_keys: True to always include fallback keys.
|
||||
|
||||
Returns:
|
||||
@@ -1574,6 +1581,45 @@ class E2eKeysHandler:
|
||||
return True
|
||||
return False
|
||||
|
||||
async def _delete_old_one_time_keys_task(
|
||||
self, task: ScheduledTask
|
||||
) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]:
|
||||
"""Scheduler task to delete old one time keys.
|
||||
|
||||
Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility
|
||||
that it could still have old OTKs that the client has dropped. This task is scheduled exactly once
|
||||
by a database schema delta file, and it clears out old one-time-keys that look like they came from libolm.
|
||||
"""
|
||||
last_user = task.result.get("from_user", "") if task.result else ""
|
||||
while True:
|
||||
# We process users in batches of 100
|
||||
users, rowcount = await self.store.delete_old_otks_for_next_user_batch(
|
||||
last_user, 100
|
||||
)
|
||||
if len(users) == 0:
|
||||
# We're done!
|
||||
return TaskStatus.COMPLETE, None, None
|
||||
|
||||
logger.debug(
|
||||
"Deleted %i old one-time-keys for users '%s'..'%s'",
|
||||
rowcount,
|
||||
users[0],
|
||||
users[-1],
|
||||
)
|
||||
last_user = users[-1]
|
||||
|
||||
# Store our progress
|
||||
await self._task_scheduler.update_task(
|
||||
task.id, result={"from_user": last_user}
|
||||
)
|
||||
|
||||
# Sleep a little before doing the next user.
|
||||
#
|
||||
# matrix.org has about 15M users in the e2e_one_time_keys_json table
|
||||
# (comprising 20M devices). We want this to take about a week, so we need
|
||||
# to do about one batch of 100 users every 4 seconds.
|
||||
await self.clock.sleep(4)
|
||||
|
||||
|
||||
def _check_cross_signing_key(
|
||||
key: JsonDict, user_id: str, key_type: str, signing_key: Optional[VerifyKey] = None
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user