mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-05 01:10:13 +00:00
Compare commits
275 Commits
dmr/docs-t
...
v1.74.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
774e20b570 | ||
|
|
eb2defc2f7 | ||
|
|
e70f398f4a | ||
|
|
822646b636 | ||
|
|
b8cf480fa9 | ||
|
|
62ed877433 | ||
|
|
e2a1adbf5d | ||
|
|
3d87847ecc | ||
|
|
7982891794 | ||
|
|
b5b5f66084 | ||
|
|
74b89c2761 | ||
|
|
527366f962 | ||
|
|
b087964875 | ||
|
|
2a3cd59dd0 | ||
|
|
a5d8fee097 | ||
|
|
ceb7be56a6 | ||
|
|
eb32bc5056 | ||
|
|
4ea8745724 | ||
|
|
373c485d8c | ||
|
|
3ac412b4e2 | ||
|
|
94bc21e69f | ||
|
|
c2de2ca630 | ||
|
|
a58b550eac | ||
|
|
c369e95691 | ||
|
|
9d8a3234ba | ||
|
|
da77720752 | ||
|
|
f3ad68c343 | ||
|
|
dfe8febe47 | ||
|
|
60c3fea327 | ||
|
|
2506dd7641 | ||
|
|
be3a8a85e3 | ||
|
|
22e91b8019 | ||
|
|
96251af50d | ||
|
|
d69bf3b24c | ||
|
|
9a9568168a | ||
|
|
cf1059d045 | ||
|
|
9e82caac45 | ||
|
|
66d47b44cd | ||
|
|
bb9f156978 | ||
|
|
9b6224577e | ||
|
|
a16931f30d | ||
|
|
5d7c35b4d9 | ||
|
|
dc6b60f68d | ||
|
|
cb59e08062 | ||
|
|
cee9445884 | ||
|
|
6a8310f3df | ||
|
|
501f62d1a6 | ||
|
|
e1779bc69f | ||
|
|
93ac3c197e | ||
|
|
05eb55f57d | ||
|
|
057cc7850a | ||
|
|
de6bb61062 | ||
|
|
7558d294ae | ||
|
|
680a8d4e9e | ||
|
|
802539159e | ||
|
|
e863a99d8d | ||
|
|
f685318c2a | ||
|
|
890e5f610e | ||
|
|
acea4d7a2f | ||
|
|
fac8a38525 | ||
|
|
6acb6d772a | ||
|
|
656dce4baf | ||
|
|
058789bada | ||
|
|
d32820c7be | ||
|
|
6ac35667af | ||
|
|
c61f1ef716 | ||
|
|
71f3e53ad0 | ||
|
|
781b14ec69 | ||
|
|
854a6884d8 | ||
|
|
6a41e5022e | ||
|
|
89ee169556 | ||
|
|
7aefc7e9fc | ||
|
|
e8bce8999f | ||
|
|
4569eda944 | ||
|
|
ecb6fe9d9c | ||
|
|
c29e2c6306 | ||
|
|
13aa29db1d | ||
|
|
99d1897078 | ||
|
|
807f077db2 | ||
|
|
e860316818 | ||
|
|
8c5b8e6d40 | ||
|
|
5b0dcda7f0 | ||
|
|
c7e29ca277 | ||
|
|
72f3e38137 | ||
|
|
9ccc09fe9e | ||
|
|
dd51828120 | ||
|
|
3da6450327 | ||
|
|
8f10c8b054 | ||
|
|
1183c372fa | ||
|
|
d56f48038a | ||
|
|
d748bbc8f8 | ||
|
|
f792dd74e1 | ||
|
|
2dad42a9fb | ||
|
|
58383c18bd | ||
|
|
7a7ee3d6b8 | ||
|
|
105ab1c3d2 | ||
|
|
7d24662fdd | ||
|
|
09de2aecb0 | ||
|
|
39cde585bf | ||
|
|
c2e06c36d4 | ||
|
|
f6c74d1cb2 | ||
|
|
9af2be192a | ||
|
|
3b4e150868 | ||
|
|
f38d7d79c8 | ||
|
|
4ae967cf63 | ||
|
|
7f78b383ca | ||
|
|
df390a8e67 | ||
|
|
972743051b | ||
|
|
6d47b7e325 | ||
|
|
9b4cb1e2ed | ||
|
|
9cae44f49e | ||
|
|
7eb7460042 | ||
|
|
6d7523ef14 | ||
|
|
1799a54a54 | ||
|
|
da933bfc3f | ||
|
|
ececb2d6cb | ||
|
|
7c005b279e | ||
|
|
706b6a1ebb | ||
|
|
a6514792b2 | ||
|
|
1526ff389f | ||
|
|
640cb3c81c | ||
|
|
22036f038e | ||
|
|
6e0cb8de79 | ||
|
|
d988fb5e7b | ||
|
|
8f77418edd | ||
|
|
78867f302f | ||
|
|
8718322130 | ||
|
|
8d133a8464 | ||
|
|
e1b15f25f3 | ||
|
|
78e23eea05 | ||
|
|
ae22e6e94f | ||
|
|
01a0527892 | ||
|
|
e7132c3f81 | ||
|
|
75888c2b1f | ||
|
|
115f0eb233 | ||
|
|
c15e9a0edb | ||
|
|
a84744fba0 | ||
|
|
7f44f3aee3 | ||
|
|
f0d18772f3 | ||
|
|
e6b5ca1a9f | ||
|
|
618e4ab81b | ||
|
|
d8cc86eff4 | ||
|
|
1a8cd8bec0 | ||
|
|
882277008c | ||
|
|
d63814fd73 | ||
|
|
945a0928c7 | ||
|
|
f844b470f6 | ||
|
|
5cb6ad3b87 | ||
|
|
1eed795fc5 | ||
|
|
258b5285b6 | ||
|
|
63cc56affa | ||
|
|
b5ab2c428a | ||
|
|
634359b083 | ||
|
|
64dd8a9c6e | ||
|
|
36097e88c4 | ||
|
|
e226513c0f | ||
|
|
4d1de6a944 | ||
|
|
4a333d638b | ||
|
|
2cecb782c4 | ||
|
|
ae54a94063 | ||
|
|
6816300588 | ||
|
|
2cc592584a | ||
|
|
fb66fae84b | ||
|
|
95f7a65a56 | ||
|
|
683bf4af4b | ||
|
|
8e38d74313 | ||
|
|
b7f5a3aaa6 | ||
|
|
cc45808ea3 | ||
|
|
fec1e2cb52 | ||
|
|
639780fc15 | ||
|
|
2e7c86c129 | ||
|
|
334a8324d3 | ||
|
|
a3623af74e | ||
|
|
3a4f80f8c6 | ||
|
|
13ca8bb2fc | ||
|
|
b2c2b03079 | ||
|
|
d10a85ec9e | ||
|
|
e9a4343cb2 | ||
|
|
21447c9102 | ||
|
|
e9cbddc8e7 | ||
|
|
0cf48f2d5f | ||
|
|
22d46db0ea | ||
|
|
a5fcdea090 | ||
|
|
d85cba1aa0 | ||
|
|
5853d798a1 | ||
|
|
69814eb282 | ||
|
|
f0dec49f01 | ||
|
|
1d1ab0e41f | ||
|
|
404404733c | ||
|
|
7894251bce | ||
|
|
2193513346 | ||
|
|
42f9d414c2 | ||
|
|
e980982b59 | ||
|
|
233fc6e279 | ||
|
|
bd70fc1a3c | ||
|
|
a2a44e53a6 | ||
|
|
6ac9b5c9a5 | ||
|
|
7deee6763c | ||
|
|
b03b5a5a4f | ||
|
|
1df4260620 | ||
|
|
04359f92f2 | ||
|
|
b2a1e75431 | ||
|
|
8bcdd712b8 | ||
|
|
bb39fc4366 | ||
|
|
79b6c19321 | ||
|
|
a4b1f64562 | ||
|
|
e5d18956b9 | ||
|
|
af592d7d4c | ||
|
|
b00294b8b1 | ||
|
|
78909f5028 | ||
|
|
2e2cffe1a2 | ||
|
|
b1379a7ca8 | ||
|
|
86c5a710d8 | ||
|
|
e5cd278f3f | ||
|
|
6546308c1e | ||
|
|
19a57f4a37 | ||
|
|
d4fac8a3e2 | ||
|
|
59ca73006c | ||
|
|
2bd7f3eeab | ||
|
|
2b56aaa0b8 | ||
|
|
1dd16e96c8 | ||
|
|
a62c796f63 | ||
|
|
efdcb24328 | ||
|
|
5905ba12d0 | ||
|
|
051402d1df | ||
|
|
ddbba28d52 | ||
|
|
9473ebb9e7 | ||
|
|
b922b54b61 | ||
|
|
dbfc9b803e | ||
|
|
cc3a52b33d | ||
|
|
15bdb0da52 | ||
|
|
b2890369cd | ||
|
|
278f8543be | ||
|
|
00d108fce4 | ||
|
|
2bb2c32e8e | ||
|
|
7911e2835d | ||
|
|
730b13dbc9 | ||
|
|
81815e0561 | ||
|
|
453914b472 | ||
|
|
d1efa7b3a4 | ||
|
|
1335367ca7 | ||
|
|
44f0d573cf | ||
|
|
e0d9013adf | ||
|
|
cc3a04876f | ||
|
|
6a6e1e8c07 | ||
|
|
aa70556699 | ||
|
|
67583281e3 | ||
|
|
1357ae869f | ||
|
|
4dc05f3019 | ||
|
|
cbe01ccc3f | ||
|
|
40fa8294e3 | ||
|
|
0d59ae706a | ||
|
|
0cfbb35131 | ||
|
|
04fd6221de | ||
|
|
86b7d9b886 | ||
|
|
8756d5c87e | ||
|
|
23fa636ed7 | ||
|
|
d902181de9 | ||
|
|
85fcbba595 | ||
|
|
9192d74b0b | ||
|
|
2d0ba3f89a | ||
|
|
0f1befd0b1 | ||
|
|
c9dffd5b33 | ||
|
|
d125919963 | ||
|
|
8c8fcdb87d | ||
|
|
8c94dd3a27 | ||
|
|
581b37b5d6 | ||
|
|
19c0e55ef7 | ||
|
|
872ea2f4de | ||
|
|
1e73effebf | ||
|
|
09b588854e | ||
|
|
1c642156d7 | ||
|
|
5f77b74215 | ||
|
|
4dd7aa371b | ||
|
|
7fe3b908a5 |
132
.ci/scripts/auditwheel_wrapper.py
Executable file
132
.ci/scripts/auditwheel_wrapper.py
Executable file
@@ -0,0 +1,132 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
|
||||
# compatible wheel, if so rename the wheel before repairing it.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
from typing import Optional
|
||||
from zipfile import ZipFile
|
||||
|
||||
from packaging.tags import Tag
|
||||
from packaging.utils import parse_wheel_filename
|
||||
from packaging.version import Version
|
||||
|
||||
|
||||
def check_is_abi3_compatible(wheel_file: str) -> None:
|
||||
"""Check the contents of the built wheel for any `.so` files that are *not*
|
||||
abi3 compatible.
|
||||
"""
|
||||
|
||||
with ZipFile(wheel_file, "r") as wheel:
|
||||
for file in wheel.namelist():
|
||||
if not file.endswith(".so"):
|
||||
continue
|
||||
|
||||
if not file.endswith(".abi3.so"):
|
||||
raise Exception(f"Found non-abi3 lib: {file}")
|
||||
|
||||
|
||||
def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
|
||||
"""Replaces the cpython wheel file with a ABI3 compatible wheel"""
|
||||
|
||||
if tag.abi == "abi3":
|
||||
# Nothing to do.
|
||||
return wheel_file
|
||||
|
||||
check_is_abi3_compatible(wheel_file)
|
||||
|
||||
abi3_tag = Tag(tag.interpreter, "abi3", tag.platform)
|
||||
|
||||
dirname = os.path.dirname(wheel_file)
|
||||
new_wheel_file = os.path.join(
|
||||
dirname,
|
||||
f"{name}-{version}-{abi3_tag}.whl",
|
||||
)
|
||||
|
||||
os.rename(wheel_file, new_wheel_file)
|
||||
|
||||
print("Renamed wheel to", new_wheel_file)
|
||||
|
||||
return new_wheel_file
|
||||
|
||||
|
||||
def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None:
|
||||
"""Entry point"""
|
||||
|
||||
# Parse the wheel file name into its parts. Note that `parse_wheel_filename`
|
||||
# normalizes the package name (i.e. it converts matrix_synapse ->
|
||||
# matrix-synapse), which is not what we want.
|
||||
_, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
|
||||
name = os.path.basename(wheel_file).split("-")[0]
|
||||
|
||||
if len(tags) != 1:
|
||||
# We expect only a wheel file with only a single tag
|
||||
raise Exception(f"Unexpectedly found multiple tags: {tags}")
|
||||
|
||||
tag = next(iter(tags))
|
||||
|
||||
if build:
|
||||
# We don't use build tags in Synapse
|
||||
raise Exception(f"Unexpected build tag: {build}")
|
||||
|
||||
# If the wheel is for cpython then convert it into an abi3 wheel.
|
||||
if tag.interpreter.startswith("cp"):
|
||||
wheel_file = cpython(wheel_file, name, version, tag)
|
||||
|
||||
# Finally, repair the wheel.
|
||||
if archs is not None:
|
||||
# If we are given archs then we are on macos and need to use
|
||||
# `delocate-listdeps`.
|
||||
subprocess.run(["delocate-listdeps", wheel_file], check=True)
|
||||
subprocess.run(
|
||||
["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
|
||||
check=True,
|
||||
)
|
||||
else:
|
||||
subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
|
||||
|
||||
parser.add_argument(
|
||||
"--wheel-dir",
|
||||
"-w",
|
||||
metavar="WHEEL_DIR",
|
||||
help="Directory to store delocated wheels",
|
||||
required=True,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--require-archs",
|
||||
metavar="archs",
|
||||
default=None,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"wheel_file",
|
||||
metavar="WHEEL_FILE",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
wheel_file = args.wheel_file
|
||||
wheel_dir = args.wheel_dir
|
||||
archs = args.require_archs
|
||||
|
||||
main(wheel_file, wheel_dir, archs)
|
||||
@@ -46,7 +46,7 @@ if not IS_PR:
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.8", "3.9", "3.10")
|
||||
for version in ("3.8", "3.9", "3.10", "3.11")
|
||||
)
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"database": "postgres",
|
||||
"postgres-version": "10",
|
||||
"postgres-version": "11",
|
||||
"extras": "all",
|
||||
}
|
||||
]
|
||||
@@ -62,9 +62,9 @@ trial_postgres_tests = [
|
||||
if not IS_PR:
|
||||
trial_postgres_tests.append(
|
||||
{
|
||||
"python-version": "3.10",
|
||||
"python-version": "3.11",
|
||||
"database": "postgres",
|
||||
"postgres-version": "14",
|
||||
"postgres-version": "15",
|
||||
"extras": "all",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -21,7 +21,7 @@ endblock
|
||||
|
||||
block Install Complement Dependencies
|
||||
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
|
||||
go get -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
endblock
|
||||
|
||||
block Install custom gotestfmt template
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
root = true
|
||||
|
||||
# 4 space indentation
|
||||
[*.py]
|
||||
[*.{py,pyi}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
max_line_length = 88
|
||||
|
||||
45
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
45
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -74,6 +74,36 @@ body:
|
||||
- Debian packages from packages.matrix.org
|
||||
- pip (from PyPI)
|
||||
- Other (please mention below)
|
||||
- I don't know
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: database
|
||||
attributes:
|
||||
label: Database
|
||||
description: |
|
||||
Are you using SQLite or PostgreSQL? What's the version of your database?
|
||||
|
||||
If PostgreSQL, please also answer the following:
|
||||
- are you using a single PostgreSQL server
|
||||
or [separate servers for `main` and `state`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#databases)?
|
||||
- have you previously ported from SQLite using the Synapse "portdb" script?
|
||||
- have you previously restored from a backup?
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: workers
|
||||
attributes:
|
||||
label: Workers
|
||||
description: |
|
||||
Are you running a single Synapse process, or are you running
|
||||
[2 or more workers](https://matrix-org.github.io/synapse/latest/workers.html)?
|
||||
options:
|
||||
- Single process
|
||||
- Multiple workers
|
||||
- I don't know
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: platform
|
||||
attributes:
|
||||
@@ -83,17 +113,28 @@ body:
|
||||
e.g. distro, hardware, if it's running in a vm/container, etc.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: |
|
||||
Do you have any unusual config options turned on? If so, please provide details.
|
||||
|
||||
- Experimental or undocumented features
|
||||
- [Presence](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#presence)
|
||||
- [Message retention](https://matrix-org.github.io/synapse/latest/message_retention_policies.html)
|
||||
- [Synapse modules](https://matrix-org.github.io/synapse/latest/modules/index.html)
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: |
|
||||
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
|
||||
This will be automatically formatted into code, so there is no need for backticks.
|
||||
This will be automatically formatted into code, so there is no need for backticks (`\``).
|
||||
|
||||
Please be careful to remove any personal or private data.
|
||||
|
||||
**Bug reports are usually very difficult to diagnose without logging.**
|
||||
**Bug reports are usually impossible to diagnose without logging.**
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
|
||||
1
.github/dependabot.yml
vendored
1
.github/dependabot.yml
vendored
@@ -18,5 +18,6 @@ updates:
|
||||
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
versioning-strategy: "lockfile-only"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
34
.github/workflows/docs-pr-netlify.yaml
vendored
Normal file
34
.github/workflows/docs-pr-netlify.yaml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: Deploy documentation PR preview
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [ "Prepare documentation PR preview" ]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
netlify:
|
||||
if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@e6e25ac3a2b93187502a8be1ef9e9603afc34925 # v2.24.2
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
name: book
|
||||
path: book
|
||||
|
||||
- name: 📤 Deploy to Netlify
|
||||
uses: matrix-org/netlify-pr-preview@v1
|
||||
with:
|
||||
path: book
|
||||
owner: ${{ github.event.workflow_run.head_repository.owner.login }}
|
||||
branch: ${{ github.event.workflow_run.head_branch }}
|
||||
revision: ${{ github.event.workflow_run.head_sha }}
|
||||
token: ${{ secrets.NETLIFY_AUTH_TOKEN }}
|
||||
site_id: ${{ secrets.NETLIFY_SITE_ID }}
|
||||
desc: Documentation preview
|
||||
deployment_env: PR Documentation Preview
|
||||
34
.github/workflows/docs-pr.yaml
vendored
Normal file
34
.github/workflows/docs-pr.yaml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: Prepare documentation PR preview
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- docs/**
|
||||
|
||||
jobs:
|
||||
pages:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Build the documentation
|
||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
||||
# However, we're using docs/README.md for other purposes and need to pick a new page
|
||||
# as the default. Let's opt for the welcome page instead.
|
||||
run: |
|
||||
mdbook build
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: book
|
||||
path: book
|
||||
# We'll only use this in a workflow_run, then we're done with it
|
||||
retention-days: 1
|
||||
17
.github/workflows/latest_deps.yml
vendored
17
.github/workflows/latest_deps.yml
vendored
@@ -27,10 +27,9 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# The dev dependencies aren't exposed in the wheel metadata (at least with current
|
||||
@@ -62,10 +61,9 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
@@ -136,10 +134,9 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Ensure sytest runs `pip install`
|
||||
@@ -211,7 +208,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
- uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
74
.github/workflows/push_complement_image.yml
vendored
Normal file
74
.github/workflows/push_complement_image.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
# This task does not run complement tests, see tests.yaml instead.
|
||||
# This task does not build docker images for synapse for use on docker hub, see docker.yaml instead
|
||||
|
||||
name: Store complement-synapse image in ghcr.io
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
schedule:
|
||||
- cron: '0 5 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
branch:
|
||||
required: true
|
||||
default: 'develop'
|
||||
type: choice
|
||||
options:
|
||||
- develop
|
||||
- master
|
||||
|
||||
# Only run this action once per pull request/branch; restart if a new commit arrives.
|
||||
# C.f. https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency
|
||||
# and https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and push complement image
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout specific branch (debug build)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- name: Checkout clean copy of develop (scheduled build)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
ref: develop
|
||||
- name: Checkout clean copy of master (on-push)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
ref: master
|
||||
- name: Login to registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Work out labels for complement image
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}/complement-synapse
|
||||
tags: |
|
||||
type=schedule,pattern=nightly,enable=${{ github.event_name == 'schedule'}}
|
||||
type=raw,value=develop,enable=${{ github.event_name == 'schedule' || inputs.branch == 'develop' }}
|
||||
type=raw,value=latest,enable=${{ github.event_name == 'push' || inputs.branch == 'master' }}
|
||||
type=sha,format=long
|
||||
- name: Run scripts-dev/complement.sh to generate complement-synapse:latest image.
|
||||
run: scripts-dev/complement.sh --build-only
|
||||
- name: Tag and push generated image
|
||||
run: |
|
||||
for TAG in ${{ join(fromJson(steps.meta.outputs.json).tags, ' ') }}; do
|
||||
echo "tag and push $TAG"
|
||||
docker tag complement-synapse $TAG
|
||||
docker push $TAG
|
||||
done
|
||||
108
.github/workflows/tests.yml
vendored
108
.github/workflows/tests.yml
vendored
@@ -27,12 +27,15 @@ jobs:
|
||||
rust:
|
||||
- 'rust/**'
|
||||
- 'Cargo.toml'
|
||||
- 'Cargo.lock'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
extras: "all"
|
||||
@@ -44,6 +47,8 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
- run: scripts-dev/check_schema_delta.py --force-colors
|
||||
|
||||
@@ -68,6 +73,8 @@ jobs:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
- run: scripts-dev/check-newsfragment.sh
|
||||
env:
|
||||
@@ -93,14 +100,38 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo clippy
|
||||
- run: cargo clippy -- -D warnings
|
||||
|
||||
# We also lint against a nightly rustc so that we can lint the benchmark
|
||||
# suite, which requires a nightly compiler.
|
||||
lint-clippy-nightly:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo clippy --all-features -- -D warnings
|
||||
|
||||
lint-rustfmt:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -111,11 +142,13 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
components: rustfmt
|
||||
toolchain: 1.58.1
|
||||
components: rustfmt
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo fmt --check
|
||||
@@ -143,6 +176,8 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- id: get-matrix
|
||||
run: .ci/scripts/calculate_jobs.py
|
||||
outputs:
|
||||
@@ -162,11 +197,25 @@ jobs:
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
# 1. Mount postgres data files onto a tmpfs in-memory filesystem to reduce overhead of docker's overlayfs layer.
|
||||
# 2. Expose the unix socket for postgres. This removes latency of using docker-proxy for connections.
|
||||
run: |
|
||||
docker run -d -p 5432:5432 \
|
||||
--tmpfs /var/lib/postgres:rw,size=6144m \
|
||||
--mount 'type=bind,src=/var/run/postgresql,dst=/var/run/postgresql' \
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.job.python-version }}
|
||||
@@ -175,10 +224,10 @@ jobs:
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
- run: poetry run trial --jobs=6 tests
|
||||
env:
|
||||
SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES_HOST: localhost
|
||||
SYNAPSE_POSTGRES_HOST: /var/run/postgresql
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
- name: Dump logs
|
||||
@@ -203,10 +252,12 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# There aren't wheels for some of the older deps, so we need to install
|
||||
@@ -245,7 +296,7 @@ jobs:
|
||||
python-version: '3.7'
|
||||
extras: "all test"
|
||||
|
||||
- run: poetry run trial -j2 tests
|
||||
- run: poetry run trial -j6 tests
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
@@ -319,10 +370,12 @@ jobs:
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run SyTest
|
||||
@@ -383,10 +436,10 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.7"
|
||||
postgres-version: "10"
|
||||
postgres-version: "11"
|
||||
|
||||
- python-version: "3.10"
|
||||
postgres-version: "14"
|
||||
- python-version: "3.11"
|
||||
postgres-version: "15"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
@@ -404,6 +457,15 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Add PostgreSQL apt repository
|
||||
# We need a version of pg_dump that can handle the version of
|
||||
# PostgreSQL being tested against. The Ubuntu package repository lags
|
||||
# behind new releases, so we have to use the PostreSQL apt repository.
|
||||
# Steps taken from https://www.postgresql.org/download/linux/ubuntu/
|
||||
run: |
|
||||
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
|
||||
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
@@ -451,10 +513,12 @@ jobs:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
@@ -477,10 +541,12 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo test
|
||||
|
||||
54
.github/workflows/triage_labelled.yml
vendored
54
.github/workflows/triage_labelled.yml
vendored
@@ -11,34 +11,34 @@ jobs:
|
||||
if: >
|
||||
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
|
||||
steps:
|
||||
- uses: octokit/graphql-action@v2.x
|
||||
id: add_to_project
|
||||
- uses: actions/add-to-project@main
|
||||
id: add_project
|
||||
with:
|
||||
headers: '{"GraphQL-Features": "projects_next_graphql"}'
|
||||
query: |
|
||||
mutation {
|
||||
updateProjectV2ItemFieldValue(
|
||||
input: {
|
||||
projectId: $projectid
|
||||
itemId: $contentid
|
||||
fieldId: $fieldid
|
||||
value: {
|
||||
singleSelectOptionId: "Todo"
|
||||
project-url: "https://github.com/orgs/matrix-org/projects/67"
|
||||
github-token: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
- name: Set status
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
run: |
|
||||
gh api graphql -f query='
|
||||
mutation(
|
||||
$project: ID!
|
||||
$item: ID!
|
||||
$fieldid: ID!
|
||||
$columnid: String!
|
||||
) {
|
||||
updateProjectV2ItemFieldValue(
|
||||
input: {
|
||||
projectId: $project
|
||||
itemId: $item
|
||||
fieldId: $fieldid
|
||||
value: {
|
||||
singleSelectOptionId: $columnid
|
||||
}
|
||||
}
|
||||
) {
|
||||
projectV2Item {
|
||||
id
|
||||
}
|
||||
}
|
||||
) {
|
||||
projectV2Item {
|
||||
id
|
||||
}
|
||||
}
|
||||
|
||||
projectid: ${{ env.PROJECT_ID }}
|
||||
contentid: ${{ github.event.issue.node_id }}
|
||||
fieldid: ${{ env.FIELD_ID }}
|
||||
optionid: ${{ env.OPTION_ID }}
|
||||
env:
|
||||
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
|
||||
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
FIELD_ID: "PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4"
|
||||
OPTION_ID: "ba22e43c"
|
||||
}' -f project="PVT_kwDOAIB0Bs4AFDdZ" -f item=${{ steps.add_project.outputs.itemId }} -f fieldid="PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4" -f columnid=ba22e43c --silent
|
||||
|
||||
20
.github/workflows/twisted_trunk.yml
vendored
20
.github/workflows/twisted_trunk.yml
vendored
@@ -18,10 +18,9 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -44,10 +43,9 @@ jobs:
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -84,10 +82,9 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Patch dependencies
|
||||
@@ -151,12 +148,11 @@ jobs:
|
||||
run: |
|
||||
set -x
|
||||
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
|
||||
pipx install poetry==1.1.14
|
||||
pipx install poetry==1.2.0
|
||||
|
||||
poetry remove -n twisted
|
||||
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry lock --no-update
|
||||
# NOT IN 1.1.14 poetry lock --check
|
||||
working-directory: synapse
|
||||
|
||||
- run: |
|
||||
@@ -178,7 +174,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
- uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
403
CHANGES.md
403
CHANGES.md
@@ -1,3 +1,402 @@
|
||||
Synapse 1.74.0 (2022-12-20)
|
||||
===========================
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add release note and update documentation regarding optional ICU support in user search. ([\#14712](https://github.com/matrix-org/synapse/issues/14712))
|
||||
|
||||
|
||||
Synapse 1.74.0rc1 (2022-12-13)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Improve user search for international display names. ([\#14464](https://github.com/matrix-org/synapse/issues/14464))
|
||||
- Stop using deprecated `keyIds` parameter when calling `/_matrix/key/v2/server`. ([\#14490](https://github.com/matrix-org/synapse/issues/14490), [\#14525](https://github.com/matrix-org/synapse/issues/14525))
|
||||
- Add new `push.enabled` config option to allow opting out of push notification calculation. ([\#14551](https://github.com/matrix-org/synapse/issues/14551), [\#14619](https://github.com/matrix-org/synapse/issues/14619))
|
||||
- Advertise support for Matrix 1.5 on `/_matrix/client/versions`. ([\#14576](https://github.com/matrix-org/synapse/issues/14576))
|
||||
- Improve opentracing and logging for to-device message handling. ([\#14598](https://github.com/matrix-org/synapse/issues/14598))
|
||||
- Allow selecting "prejoin" events by state keys in addition to event types. ([\#14642](https://github.com/matrix-org/synapse/issues/14642))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug where a device list update might not be sent to clients in certain circumstances. ([\#14435](https://github.com/matrix-org/synapse/issues/14435), [\#14592](https://github.com/matrix-org/synapse/issues/14592), [\#14604](https://github.com/matrix-org/synapse/issues/14604))
|
||||
- Suppress a spurious warning when `POST /rooms/<room_id>/<membership>/`, `POST /join/<room_id_or_alias`, or the unspecced `PUT /join/<room_id_or_alias>/<txn_id>` receive an empty HTTP request body. ([\#14600](https://github.com/matrix-org/synapse/issues/14600))
|
||||
- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14620](https://github.com/matrix-org/synapse/issues/14620), [\#14621](https://github.com/matrix-org/synapse/issues/14621))
|
||||
- Update html templates to load images over HTTPS. Contributed by @ashfame. ([\#14625](https://github.com/matrix-org/synapse/issues/14625))
|
||||
- Fix a long-standing bug where the user directory would return 1 more row than requested. ([\#14631](https://github.com/matrix-org/synapse/issues/14631))
|
||||
- Reject invalid read receipt requests with empty room or event IDs. Contributed by Nick @ Beeper (@fizzadar). ([\#14632](https://github.com/matrix-org/synapse/issues/14632))
|
||||
- Fix a bug introduced in Synapse 1.67.0 where not specifying a config file or a server URL would lead to the `register_new_matrix_user` script failing. ([\#14637](https://github.com/matrix-org/synapse/issues/14637))
|
||||
- Fix a long-standing bug where the user directory and room/user stats might be out of sync. ([\#14639](https://github.com/matrix-org/synapse/issues/14639), [\#14643](https://github.com/matrix-org/synapse/issues/14643))
|
||||
- Fix a bug introduced in Synapse 1.72.0 where the background updates to add non-thread unique indexes on receipts would fail if they were previously interrupted. ([\#14650](https://github.com/matrix-org/synapse/issues/14650))
|
||||
- Improve validation of field size limits in events. ([\#14664](https://github.com/matrix-org/synapse/issues/14664))
|
||||
- Fix bugs introduced in Synapse 1.55.0 and 1.69.0 where application services would not be notified of events in the correct rooms, due to stale caches. ([\#14670](https://github.com/matrix-org/synapse/issues/14670))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Update worker settings for `pusher` and `federation_sender` functionality. ([\#14493](https://github.com/matrix-org/synapse/issues/14493))
|
||||
- Add links to third party package repositories, and point to the bug which highlights Ubuntu's out-of-date packages. ([\#14517](https://github.com/matrix-org/synapse/issues/14517))
|
||||
- Remove old, incorrect minimum postgres version note and replace with a link to the [Dependency Deprecation Policy](https://matrix-org.github.io/synapse/v1.73/deprecation_policy.html). ([\#14590](https://github.com/matrix-org/synapse/issues/14590))
|
||||
- Add Single-Sign On setup instructions for Mastodon-based instances. ([\#14594](https://github.com/matrix-org/synapse/issues/14594))
|
||||
- Change `turn_allow_guests` example value to lowercase `true`. ([\#14634](https://github.com/matrix-org/synapse/issues/14634))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Optimise push badge count calculations. Contributed by Nick @ Beeper (@fizzadar). ([\#14255](https://github.com/matrix-org/synapse/issues/14255))
|
||||
- Faster remote room joins: stream the un-partial-stating of rooms over replication. ([\#14473](https://github.com/matrix-org/synapse/issues/14473), [\#14474](https://github.com/matrix-org/synapse/issues/14474))
|
||||
- Share the `ClientRestResource` for both workers and the main process. ([\#14528](https://github.com/matrix-org/synapse/issues/14528))
|
||||
- Add `--editable` flag to `complement.sh` which uses an editable install of Synapse for faster turn-around times whilst developing iteratively. ([\#14548](https://github.com/matrix-org/synapse/issues/14548))
|
||||
- Faster joins: use servers list approximation to send read receipts when in partial state instead of waiting for the full state of the room. ([\#14549](https://github.com/matrix-org/synapse/issues/14549))
|
||||
- Modernize unit tests configuration related to workers. ([\#14568](https://github.com/matrix-org/synapse/issues/14568))
|
||||
- Bump jsonschema from 4.17.0 to 4.17.3. ([\#14591](https://github.com/matrix-org/synapse/issues/14591))
|
||||
- Fix Rust lint CI. ([\#14602](https://github.com/matrix-org/synapse/issues/14602))
|
||||
- Bump JasonEtco/create-an-issue from 2.5.0 to 2.8.1. ([\#14607](https://github.com/matrix-org/synapse/issues/14607))
|
||||
- Alter some unit test environment parameters to decrease time spent running tests. ([\#14610](https://github.com/matrix-org/synapse/issues/14610))
|
||||
- Switch to Go recommended installation method for `gotestfmt` template in CI. ([\#14611](https://github.com/matrix-org/synapse/issues/14611))
|
||||
- Bump phonenumbers from 8.13.0 to 8.13.1. ([\#14612](https://github.com/matrix-org/synapse/issues/14612))
|
||||
- Bump types-setuptools from 65.5.0.3 to 65.6.0.1. ([\#14613](https://github.com/matrix-org/synapse/issues/14613))
|
||||
- Bump twine from 4.0.1 to 4.0.2. ([\#14614](https://github.com/matrix-org/synapse/issues/14614))
|
||||
- Bump types-requests from 2.28.11.2 to 2.28.11.5. ([\#14615](https://github.com/matrix-org/synapse/issues/14615))
|
||||
- Bump cryptography from 38.0.3 to 38.0.4. ([\#14616](https://github.com/matrix-org/synapse/issues/14616))
|
||||
- Remove useless cargo install with apt from Dockerfile. ([\#14636](https://github.com/matrix-org/synapse/issues/14636))
|
||||
- Bump certifi from 2021.10.8 to 2022.12.7. ([\#14645](https://github.com/matrix-org/synapse/issues/14645))
|
||||
- Bump flake8-bugbear from 22.10.27 to 22.12.6. ([\#14656](https://github.com/matrix-org/synapse/issues/14656))
|
||||
- Bump packaging from 21.3 to 22.0. ([\#14657](https://github.com/matrix-org/synapse/issues/14657))
|
||||
- Bump types-pillow from 9.3.0.1 to 9.3.0.4. ([\#14658](https://github.com/matrix-org/synapse/issues/14658))
|
||||
- Bump serde from 1.0.148 to 1.0.150. ([\#14659](https://github.com/matrix-org/synapse/issues/14659))
|
||||
- Bump phonenumbers from 8.13.1 to 8.13.2. ([\#14660](https://github.com/matrix-org/synapse/issues/14660))
|
||||
- Bump authlib from 1.1.0 to 1.2.0. ([\#14661](https://github.com/matrix-org/synapse/issues/14661))
|
||||
- Move `StateFilter` to `synapse.types`. ([\#14668](https://github.com/matrix-org/synapse/issues/14668))
|
||||
- Improve type hints. ([\#14597](https://github.com/matrix-org/synapse/issues/14597), [\#14646](https://github.com/matrix-org/synapse/issues/14646), [\#14671](https://github.com/matrix-org/synapse/issues/14671))
|
||||
|
||||
|
||||
Synapse 1.73.0 (2022-12-06)
|
||||
===========================
|
||||
|
||||
Please note that legacy Prometheus metric names have been removed in this release; see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.73/docs/upgrade.md#legacy-prometheus-metric-names-have-now-been-removed) for more details.
|
||||
|
||||
No significant changes since 1.73.0rc2.
|
||||
|
||||
|
||||
Synapse 1.73.0rc2 (2022-12-01)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a regression in Synapse 1.73.0rc1 where Synapse's main process would stop responding to HTTP requests when a user with a large number of devices logs in. ([\#14582](https://github.com/matrix-org/synapse/issues/14582))
|
||||
|
||||
|
||||
Synapse 1.73.0rc1 (2022-11-29)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527))
|
||||
- Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534))
|
||||
- Add support for handling avatar in SSO OIDC login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917))
|
||||
- Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>`, `/_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471))
|
||||
- Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510))
|
||||
- Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524))
|
||||
- Prune user's old devices on login if they have too many. ([\#14038](https://github.com/matrix-org/synapse/issues/14038), [\#14580](https://github.com/matrix-org/synapse/issues/14580))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug where paginating from the start of a room did not work. Contributed by @gnunicorn. ([\#14149](https://github.com/matrix-org/synapse/issues/14149))
|
||||
- Fix a bug introduced in Synapse 1.58.0 where a user with presence state `org.matrix.msc3026.busy` would mistakenly be set to `online` when calling `/sync` or `/events` on a worker process. ([\#14393](https://github.com/matrix-org/synapse/issues/14393))
|
||||
- Fix a bug introduced in Synapse 1.70.0 where a receipt's thread ID was not sent over federation. ([\#14466](https://github.com/matrix-org/synapse/issues/14466))
|
||||
- Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information. ([\#14537](https://github.com/matrix-org/synapse/issues/14537))
|
||||
- Fix a bug introduced in Synapse 1.67.0 where two logging context warnings would be logged on startup. ([\#14574](https://github.com/matrix-org/synapse/issues/14574))
|
||||
- In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202). ([\#14565](https://github.com/matrix-org/synapse/issues/14565))
|
||||
- Fix a bug introduced in Synapse 0.9 where Synapse would fail to fetch server keys whose IDs contain a forward slash. ([\#14490](https://github.com/matrix-org/synapse/issues/14490))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Fixed link to 'Synapse administration endpoints'. ([\#14499](https://github.com/matrix-org/synapse/issues/14499))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove legacy Prometheus metrics names. They were deprecated in Synapse v1.69.0 and disabled by default in Synapse v1.71.0. ([\#14538](https://github.com/matrix-org/synapse/issues/14538))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Improve type hinting throughout Synapse. ([\#14055](https://github.com/matrix-org/synapse/issues/14055), [\#14412](https://github.com/matrix-org/synapse/issues/14412), [\#14529](https://github.com/matrix-org/synapse/issues/14529), [\#14452](https://github.com/matrix-org/synapse/issues/14452)).
|
||||
- Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). ([\#14376](https://github.com/matrix-org/synapse/issues/14376), [\#14468](https://github.com/matrix-org/synapse/issues/14468))
|
||||
- Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. ([\#14400](https://github.com/matrix-org/synapse/issues/14400), [\#14476](https://github.com/matrix-org/synapse/issues/14476))
|
||||
- Refactor `federation_sender` and `pusher` configuration loading. ([\#14496](https://github.com/matrix-org/synapse/issues/14496))
|
||||
([\#14509](https://github.com/matrix-org/synapse/issues/14509), [\#14573](https://github.com/matrix-org/synapse/issues/14573))
|
||||
- Faster joins: do not wait for full state when creating events to send. ([\#14403](https://github.com/matrix-org/synapse/issues/14403))
|
||||
- Faster joins: filter out non local events when a room doesn't have its full state. ([\#14404](https://github.com/matrix-org/synapse/issues/14404))
|
||||
- Faster joins: send events to initial list of servers if we don't have the full state yet. ([\#14408](https://github.com/matrix-org/synapse/issues/14408))
|
||||
- Faster joins: use servers list approximation received during `send_join` (potentially updated with received membership events) in `assert_host_in_room`. ([\#14515](https://github.com/matrix-org/synapse/issues/14515))
|
||||
- Fix type logic in TCP replication code that prevented correctly ignoring blank commands. ([\#14449](https://github.com/matrix-org/synapse/issues/14449))
|
||||
- Remove option to skip locking of tables when performing emulated upserts, to avoid a class of bugs in future. ([\#14469](https://github.com/matrix-org/synapse/issues/14469))
|
||||
- `scripts-dev/federation_client`: Fix routing on servers with `.well-known` files. ([\#14479](https://github.com/matrix-org/synapse/issues/14479))
|
||||
- Reduce default third party invite rate limit to 216 invites per day. ([\#14487](https://github.com/matrix-org/synapse/issues/14487))
|
||||
- Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row. ([\#14516](https://github.com/matrix-org/synapse/issues/14516))
|
||||
- Add more prompts to the bug report form. ([\#14522](https://github.com/matrix-org/synapse/issues/14522))
|
||||
- Extend editorconfig rules on indent and line length to `.pyi` files. ([\#14526](https://github.com/matrix-org/synapse/issues/14526))
|
||||
- Run Rust CI when `Cargo.lock` changes. This is particularly useful for dependabot updates. ([\#14571](https://github.com/matrix-org/synapse/issues/14571))
|
||||
- Fix a possible variable shadow in `create_new_client_event`. ([\#14575](https://github.com/matrix-org/synapse/issues/14575))
|
||||
- Bump various dependencies in the `poetry.lock` file and in CI scripts. ([\#14557](https://github.com/matrix-org/synapse/issues/14557), [\#14559](https://github.com/matrix-org/synapse/issues/14559), [\#14560](https://github.com/matrix-org/synapse/issues/14560), [\#14500](https://github.com/matrix-org/synapse/issues/14500), [\#14501](https://github.com/matrix-org/synapse/issues/14501), [\#14502](https://github.com/matrix-org/synapse/issues/14502), [\#14503](https://github.com/matrix-org/synapse/issues/14503), [\#14504](https://github.com/matrix-org/synapse/issues/14504), [\#14505](https://github.com/matrix-org/synapse/issues/14505)).
|
||||
|
||||
|
||||
Synapse 1.72.0 (2022-11-22)
|
||||
===========================
|
||||
|
||||
Please note that Synapse now only supports PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life, c.f. our [Deprecation Policy](https://github.com/matrix-org/synapse/blob/develop/docs/deprecation_policy.md).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Update forgotten references to legacy metrics in the included Grafana dashboard. ([\#14477](https://github.com/matrix-org/synapse/issues/14477))
|
||||
|
||||
|
||||
Synapse 1.72.0rc1 (2022-11-16)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. ([\#14260](https://github.com/matrix-org/synapse/issues/14260))
|
||||
- Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). ([\#14396](https://github.com/matrix-org/synapse/issues/14396))
|
||||
- Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. ([\#14405](https://github.com/matrix-org/synapse/issues/14405))
|
||||
- Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. ([\#14442](https://github.com/matrix-org/synapse/issues/14442))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Faster joins: do not block creation of or queries for room aliases during the resync. ([\#14292](https://github.com/matrix-org/synapse/issues/14292))
|
||||
- Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. ([\#14347](https://github.com/matrix-org/synapse/issues/14347))
|
||||
- Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. ([\#14356](https://github.com/matrix-org/synapse/issues/14356))
|
||||
- Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. ([\#14361](https://github.com/matrix-org/synapse/issues/14361))
|
||||
- Fix the refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364))
|
||||
- Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. ([\#14369](https://github.com/matrix-org/synapse/issues/14369))
|
||||
- Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. ([\#14374](https://github.com/matrix-org/synapse/issues/14374))
|
||||
- Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. ([\#14409](https://github.com/matrix-org/synapse/issues/14409))
|
||||
- Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. ([\#14448](https://github.com/matrix-org/synapse/issues/14448))
|
||||
- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14453](https://github.com/matrix-org/synapse/issues/14453))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Add all Stream Writer worker types to `configure_workers_and_start.py`. ([\#14197](https://github.com/matrix-org/synapse/issues/14197))
|
||||
- Remove references to legacy worker types in the multi-worker Dockerfile. ([\#14294](https://github.com/matrix-org/synapse/issues/14294))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370))
|
||||
- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293))
|
||||
- Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297))
|
||||
- Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove support for PostgreSQL 10. ([\#14392](https://github.com/matrix-org/synapse/issues/14392), [\#14397](https://github.com/matrix-org/synapse/issues/14397))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
|
||||
- Add TLS support for generic worker endpoints. ([\#14128](https://github.com/matrix-org/synapse/issues/14128), [\#14455](https://github.com/matrix-org/synapse/issues/14455))
|
||||
- Switch to a maintained action for installing Rust in CI. ([\#14313](https://github.com/matrix-org/synapse/issues/14313))
|
||||
- Add override ability to `complement.sh` command line script to request certain types of workers. ([\#14324](https://github.com/matrix-org/synapse/issues/14324))
|
||||
- Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. ([\#14339](https://github.com/matrix-org/synapse/issues/14339))
|
||||
- Concisely log a failure to resolve state due to missing `prev_events`. ([\#14346](https://github.com/matrix-org/synapse/issues/14346))
|
||||
- Use a maintained Github action to install Rust. ([\#14351](https://github.com/matrix-org/synapse/issues/14351))
|
||||
- Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). ([\#14375](https://github.com/matrix-org/synapse/issues/14375))
|
||||
- Test against PostgreSQL 15 in CI. ([\#14394](https://github.com/matrix-org/synapse/issues/14394))
|
||||
- Remove unreachable code. ([\#14410](https://github.com/matrix-org/synapse/issues/14410))
|
||||
- Clean-up event persistence code. ([\#14411](https://github.com/matrix-org/synapse/issues/14411))
|
||||
- Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. ([\#14417](https://github.com/matrix-org/synapse/issues/14417))
|
||||
- Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. ([\#14433](https://github.com/matrix-org/synapse/issues/14433))
|
||||
- Make Dependabot only bump Rust deps in the lock file. ([\#14434](https://github.com/matrix-org/synapse/issues/14434))
|
||||
- Fix an incorrect stub return type for `PushRuleEvaluator.run`. ([\#14451](https://github.com/matrix-org/synapse/issues/14451))
|
||||
- Improve performance of `/context` in large rooms. ([\#14461](https://github.com/matrix-org/synapse/issues/14461))
|
||||
|
||||
|
||||
Synapse 1.71.0 (2022-11-08)
|
||||
===========================
|
||||
|
||||
Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default.
|
||||
They will be removed altogether in Synapse 1.73.0.
|
||||
If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
|
||||
See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details.
|
||||
|
||||
**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support PostgreSQL 10, which reaches upstream end-of-life on November 10th, 2022. Future releases of Synapse will require PostgreSQL 11+.
|
||||
|
||||
No significant changes since 1.71.0rc2.
|
||||
|
||||
|
||||
Synapse 1.71.0rc2 (2022-11-04)
|
||||
==============================
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Document the changes to monthly active user metrics due to deprecation of legacy Prometheus metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358), [\#14360](https://github.com/matrix-org/synapse/issues/14360))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. ([\#14353](https://github.com/matrix-org/synapse/issues/14353))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
|
||||
|
||||
|
||||
Synapse 1.71.0rc1 (2022-11-01)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Support back-channel logouts from OpenID Connect providers. ([\#11414](https://github.com/matrix-org/synapse/issues/11414))
|
||||
- Allow use of Postgres and SQLlite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311))
|
||||
- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664), Pushrules for relations. Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804))
|
||||
- Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. ([\#13652](https://github.com/matrix-org/synapse/issues/13652))
|
||||
- Enable write-ahead logging for SQLite installations. Contributed by [@asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897))
|
||||
- Show erasure status when [listing users](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#query-user-account) in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205))
|
||||
- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper. ([\#13422](https://github.com/matrix-org/synapse/issues/13422))
|
||||
- Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. ([\#13927](https://github.com/matrix-org/synapse/issues/13927))
|
||||
- Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). ([\#13958](https://github.com/matrix-org/synapse/issues/13958))
|
||||
- Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14064](https://github.com/matrix-org/synapse/issues/14064))
|
||||
- Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. ([\#14243](https://github.com/matrix-org/synapse/issues/14243))
|
||||
- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal if debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258))
|
||||
- Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#14291](https://github.com/matrix-org/synapse/issues/14291))
|
||||
- Fix a bug introduced in Synapse 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304))
|
||||
- Fix a bug introduced in Synapse 0.34.0 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323))
|
||||
- Fix a bug introduced in Synapse 1.70.0 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Explain how to disable the use of [`trusted_key_servers`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#trusted_key_servers). ([\#13999](https://github.com/matrix-org/synapse/issues/13999))
|
||||
- Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). ([\#14086](https://github.com/matrix-org/synapse/issues/14086))
|
||||
- Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). ([\#14110](https://github.com/matrix-org/synapse/issues/14110))
|
||||
- Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. ([\#14191](https://github.com/matrix-org/synapse/issues/14191))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Remove unused `@lru_cache` decorator. ([\#13595](https://github.com/matrix-org/synapse/issues/13595))
|
||||
- Save login tokens in database and prevent login token reuse. ([\#13844](https://github.com/matrix-org/synapse/issues/13844))
|
||||
- Refactor OIDC tests to better mimic an actual OIDC provider. ([\#13910](https://github.com/matrix-org/synapse/issues/13910))
|
||||
- Fix type annotation causing import time error in the Complement forking launcher. ([\#14084](https://github.com/matrix-org/synapse/issues/14084))
|
||||
- Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. ([\#14096](https://github.com/matrix-org/synapse/issues/14096))
|
||||
- Add initial power level event to batch of bulk persisted events when creating a new room. ([\#14228](https://github.com/matrix-org/synapse/issues/14228))
|
||||
- Refactor `/key/` endpoints to use `RestServlet` classes. ([\#14229](https://github.com/matrix-org/synapse/issues/14229))
|
||||
- Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. ([\#14230](https://github.com/matrix-org/synapse/issues/14230))
|
||||
- Build wheels on macos 11, not 10.15. ([\#14249](https://github.com/matrix-org/synapse/issues/14249))
|
||||
- Add debugging to help diagnose lost device list updates. ([\#14268](https://github.com/matrix-org/synapse/issues/14268))
|
||||
- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287))
|
||||
- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303))
|
||||
- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305))
|
||||
|
||||
<details>
|
||||
<summary>Dependency updates</summary>
|
||||
|
||||
Runtime:
|
||||
|
||||
- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278))
|
||||
- Bump jinja2 from 3.0.3 to 3.1.2. ([\#14271](https://github.com/matrix-org/synapse/issues/14271))
|
||||
- Bump prometheus-client from 0.14.0 to 0.15.0. ([\#14274](https://github.com/matrix-org/synapse/issues/14274))
|
||||
- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331))
|
||||
- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270))
|
||||
- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330))
|
||||
- Bump serde from 1.0.145 to 1.0.147. ([\#14277](https://github.com/matrix-org/synapse/issues/14277))
|
||||
- Bump serde_json from 1.0.86 to 1.0.87. ([\#14279](https://github.com/matrix-org/synapse/issues/14279))
|
||||
|
||||
Tooling and CI:
|
||||
|
||||
- Bump black from 22.3.0 to 22.10.0. ([\#14328](https://github.com/matrix-org/synapse/issues/14328))
|
||||
- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042))
|
||||
- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276))
|
||||
- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275))
|
||||
- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273))
|
||||
- Bump twine from 3.8.0 to 4.0.1. ([\#14332](https://github.com/matrix-org/synapse/issues/14332))
|
||||
- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133))
|
||||
- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272))
|
||||
</details>
|
||||
|
||||
Synapse 1.70.1 (2022-10-28)
|
||||
===========================
|
||||
|
||||
This release fixes some regressions that were discovered in 1.70.0.
|
||||
|
||||
[#14300](https://github.com/matrix-org/synapse/issues/14300)
|
||||
was previously reported to be a regression in 1.70.0 as well. However, we have
|
||||
since concluded that it was limited to the reporter and thus have not needed
|
||||
to include any fix for it in 1.70.1.
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.70.0rc1 where the access tokens sent to application services as headers were malformed. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301))
|
||||
- Fix room creation being rate limited too aggressively since Synapse v1.69.0. ([\#14314](https://github.com/matrix-org/synapse/issues/14314))
|
||||
|
||||
|
||||
Synapse 1.70.0 (2022-10-26)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.70.0rc2.
|
||||
|
||||
|
||||
Synapse 1.70.0rc2 (2022-10-25)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. ([\#14248](https://github.com/matrix-org/synapse/issues/14248))
|
||||
- Fix a bug introduced in Synapse 1.70.0rc1 leading to broken outbound federation when using Python 3.7. ([\#14280](https://github.com/matrix-org/synapse/issues/14280))
|
||||
- Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. ([\#14283](https://github.com/matrix-org/synapse/issues/14283))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Build ABI3 wheels for CPython. ([\#14253](https://github.com/matrix-org/synapse/issues/14253))
|
||||
- For the aarch64 architecture, only build wheels for CPython manylinux. ([\#14259](https://github.com/matrix-org/synapse/issues/14259))
|
||||
|
||||
|
||||
Synapse 1.70.0rc1 (2022-10-19)
|
||||
==============================
|
||||
|
||||
@@ -13,7 +412,7 @@ Features
|
||||
- The `/relations` endpoint can now be used on workers. ([\#14028](https://github.com/matrix-org/synapse/issues/14028))
|
||||
- Advertise support for Matrix 1.3 and 1.4 on `/_matrix/client/versions`. ([\#14032](https://github.com/matrix-org/synapse/issues/14032), [\#14184](https://github.com/matrix-org/synapse/issues/14184))
|
||||
- Improve validation of request bodies for the [Device Management](https://spec.matrix.org/v1.4/client-server-api/#device-management) and [MSC2697 Device Dehyrdation](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) client-server API endpoints. ([\#14054](https://github.com/matrix-org/synapse/issues/14054))
|
||||
- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874). ([\#14148](https://github.com/matrix-org/synapse/issues/14148))
|
||||
- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874): Filtering threads from the `/messages` endpoint. ([\#14148](https://github.com/matrix-org/synapse/issues/14148))
|
||||
- Improve the validation of the following PUT endpoints: [`/directory/room/{roomAlias}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directoryroomroomalias), [`/directory/list/room/{roomId}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directorylistroomroomid) and [`/directory/list/appservice/{networkId}/{roomId}`](https://spec.matrix.org/v1.4/application-service-api/#put_matrixclientv3directorylistappservicenetworkidroomid). ([\#14179](https://github.com/matrix-org/synapse/issues/14179))
|
||||
- Build and publish binary wheels for `aarch64` platforms. ([\#14212](https://github.com/matrix-org/synapse/issues/14212))
|
||||
|
||||
@@ -21,7 +420,7 @@ Features
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Prevent device names from appearing in device list updates when `allow_device_name_lookup_over_federation` is `false`. ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
|
||||
- Prevent device names from appearing in device list updates in some situations when `allow_device_name_lookup_over_federation` is `false`. (This is not comprehensive: see [\#13114](https://github.com/matrix-org/synapse/issues/13114).) ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
|
||||
- Fix a long-standing bug where redactions were not being sent over federation if we did not have the original event. ([\#13813](https://github.com/matrix-org/synapse/issues/13813))
|
||||
- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled or have their new content applied. ([\#14034](https://github.com/matrix-org/synapse/issues/14034))
|
||||
- Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`. ([\#14053](https://github.com/matrix-org/synapse/issues/14053))
|
||||
|
||||
44
Cargo.lock
generated
44
Cargo.lock
generated
@@ -37,9 +37,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
version = "0.10.4"
|
||||
version = "0.10.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388"
|
||||
checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e"
|
||||
dependencies = [
|
||||
"digest",
|
||||
]
|
||||
@@ -194,9 +194,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3"
|
||||
version = "0.17.2"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "201b6887e5576bf2f945fe65172c1fcbf3fcf285b23e4d71eb171d9736e38d32"
|
||||
checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
@@ -212,9 +212,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-build-config"
|
||||
version = "0.17.2"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bf0708c9ed01692635cbf056e286008e5a2927ab1a5e48cdd3aeb1ba5a6fef47"
|
||||
checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"target-lexicon",
|
||||
@@ -222,9 +222,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-ffi"
|
||||
version = "0.17.2"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90352dea4f486932b72ddf776264d293f85b79a1d214de1d023927b41461132d"
|
||||
checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"pyo3-build-config",
|
||||
@@ -243,9 +243,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros"
|
||||
version = "0.17.2"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7eb24b804a2d9e88bfcc480a5a6dd76f006c1e3edaf064e8250423336e2cd79d"
|
||||
checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"pyo3-macros-backend",
|
||||
@@ -255,9 +255,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros-backend"
|
||||
version = "0.17.2"
|
||||
version = "0.17.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f22bb49f6a7348c253d7ac67a6875f2dc65f36c2ae64a82c381d528972bea6d6"
|
||||
checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -294,9 +294,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.6.0"
|
||||
version = "1.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
|
||||
checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.147"
|
||||
version = "1.0.150"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
|
||||
checksum = "e326c9ec8042f1b5da33252c8a37e9ffbd2c9bef0155215b6e6c80c790e05f91"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.147"
|
||||
version = "1.0.150"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
|
||||
checksum = "42a3df25b0713732468deadad63ab9da1f1fd75a48a15024b50363f128db627e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -343,9 +343,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.86"
|
||||
version = "1.0.89"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "41feea4228a6f1cd09ec7a3593a682276702cd67b5273544757dae23c096f074"
|
||||
checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -366,9 +366,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.102"
|
||||
version = "1.0.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1"
|
||||
checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper.
|
||||
@@ -1 +0,0 @@
|
||||
Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long-standing bug where Synapse would count codepoints instead of bytes when validating the size of some fields.
|
||||
@@ -1 +0,0 @@
|
||||
Bump flake8-bugbear from 21.3.2 to 22.9.23.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid).
|
||||
@@ -1 +0,0 @@
|
||||
Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type).
|
||||
@@ -1 +0,0 @@
|
||||
Bump types-opentracing from 2.4.7 to 2.4.10.
|
||||
@@ -1 +0,0 @@
|
||||
Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are.
|
||||
@@ -1 +0,0 @@
|
||||
Show erasure status when listing users in the Admin API.
|
||||
@@ -1 +0,0 @@
|
||||
Add initial power level event to batch of bulk persisted events when creating a new room.
|
||||
@@ -1 +0,0 @@
|
||||
Refactor `/key/` endpoints to use `RestServlet` classes.
|
||||
@@ -1 +0,0 @@
|
||||
Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI.
|
||||
@@ -1 +0,0 @@
|
||||
Build wheels on macos 11, not 10.15.
|
||||
@@ -1,2 +0,0 @@
|
||||
Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal, and debug logging was enabled.
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Add debugging to help diagnose lost device-list-update.
|
||||
@@ -1 +0,0 @@
|
||||
Bump pysaml2 from 7.1.2 to 7.2.1.
|
||||
@@ -1 +0,0 @@
|
||||
Bump jinja2 from 3.0.3 to 3.1.2.
|
||||
@@ -1 +0,0 @@
|
||||
Bump types-requests from 2.28.11 to 2.28.11.2.
|
||||
@@ -1 +0,0 @@
|
||||
Bump setuptools-rust from 1.5.1 to 1.5.2.
|
||||
@@ -1 +0,0 @@
|
||||
Bump prometheus-client from 0.14.0 to 0.15.0.
|
||||
@@ -1 +0,0 @@
|
||||
Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0.
|
||||
@@ -1 +0,0 @@
|
||||
Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0.
|
||||
@@ -1 +0,0 @@
|
||||
Bump serde from 1.0.145 to 1.0.147.
|
||||
@@ -1 +0,0 @@
|
||||
Bump anyhow from 1.0.65 to 1.0.66.
|
||||
File diff suppressed because it is too large
Load Diff
80
debian/changelog
vendored
80
debian/changelog
vendored
@@ -1,3 +1,83 @@
|
||||
matrix-synapse-py3 (1.74.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.74.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Dec 2022 16:07:38 +0000
|
||||
|
||||
matrix-synapse-py3 (1.74.0~rc1) stable; urgency=medium
|
||||
|
||||
* New dependency on libicu-dev to provide improved results for user
|
||||
search.
|
||||
* New Synapse release 1.74.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Dec 2022 13:30:01 +0000
|
||||
|
||||
matrix-synapse-py3 (1.73.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.73.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Dec 2022 11:48:56 +0000
|
||||
|
||||
matrix-synapse-py3 (1.73.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.73.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 01 Dec 2022 10:02:19 +0000
|
||||
|
||||
matrix-synapse-py3 (1.73.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.73.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Nov 2022 12:28:13 +0000
|
||||
|
||||
matrix-synapse-py3 (1.72.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.72.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Nov 2022 10:57:30 +0000
|
||||
|
||||
matrix-synapse-py3 (1.72.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.72.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 16 Nov 2022 15:10:59 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Nov 2022 10:38:10 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 04 Nov 2022 12:00:33 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Nov 2022 12:10:17 +0000
|
||||
|
||||
matrix-synapse-py3 (1.70.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 28 Oct 2022 12:10:21 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Oct 2022 11:11:50 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Oct 2022 10:59:47 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0rc1.
|
||||
|
||||
2
debian/control
vendored
2
debian/control
vendored
@@ -8,6 +8,8 @@ Build-Depends:
|
||||
dh-virtualenv (>= 1.1),
|
||||
libsystemd-dev,
|
||||
libpq-dev,
|
||||
libicu-dev,
|
||||
pkg-config,
|
||||
lsb-release,
|
||||
python3-dev,
|
||||
python3,
|
||||
|
||||
@@ -43,7 +43,7 @@ RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential cargo git libffi-dev libssl-dev \
|
||||
build-essential git libffi-dev libssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# We install poetry in its own build stage to avoid its dependencies conflicting with
|
||||
@@ -97,6 +97,8 @@ RUN \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
curl \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
@@ -165,6 +167,7 @@ RUN \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
libicu67 \
|
||||
libssl-dev \
|
||||
openssl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -84,6 +84,8 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
xmlsec1
|
||||
|
||||
# Install rust and ensure it's in the PATH
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
|
||||
|
||||
# first of all, we create a base image with an nginx which we can copy into the
|
||||
# target image. For repeated rebuilds, this is much faster than apt installing
|
||||
@@ -23,7 +24,7 @@ FROM debian:bullseye-slim AS deps_base
|
||||
FROM redis:6-bullseye AS redis_base
|
||||
|
||||
# now build the final image, based on the the regular Synapse docker image
|
||||
FROM matrixdotorg/synapse:$SYNAPSE_VERSION
|
||||
FROM $FROM
|
||||
|
||||
# Install supervisord with pip instead of apt, to avoid installing a second
|
||||
# copy of python.
|
||||
|
||||
@@ -7,8 +7,9 @@
|
||||
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
|
||||
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
|
||||
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
FROM $FROM
|
||||
# First of all, we copy postgres server from the official postgres image,
|
||||
# since for repeated rebuilds, this is much faster than apt installing
|
||||
# postgres each time.
|
||||
|
||||
@@ -45,7 +45,12 @@ esac
|
||||
|
||||
if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
# Specify the workers to test with
|
||||
export SYNAPSE_WORKER_TYPES="\
|
||||
# Allow overriding by explicitly setting SYNAPSE_WORKER_TYPES outside, while still
|
||||
# utilizing WORKERS=1 for backwards compatibility.
|
||||
# -n True if the length of string is non-zero.
|
||||
# -z True if the length of string is zero.
|
||||
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
|
||||
export SYNAPSE_WORKER_TYPES="\
|
||||
event_persister, \
|
||||
event_persister, \
|
||||
background_worker, \
|
||||
@@ -61,6 +66,8 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
appservice, \
|
||||
pusher"
|
||||
|
||||
fi
|
||||
log "Workers requested: $SYNAPSE_WORKER_TYPES"
|
||||
# Improve startup times by using a launcher based on fork()
|
||||
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
|
||||
else
|
||||
|
||||
@@ -92,8 +92,6 @@ allow_device_name_lookup_over_federation: true
|
||||
## Experimental Features ##
|
||||
|
||||
experimental_features:
|
||||
# Enable spaces support
|
||||
spaces_enabled: true
|
||||
# Enable history backfilling support
|
||||
msc2716_enabled: true
|
||||
# server-side support for partial state in /send_join responses
|
||||
@@ -102,8 +100,8 @@ experimental_features:
|
||||
# client-side support for partial state in /send_join responses
|
||||
faster_joins: true
|
||||
{% endif %}
|
||||
# Enable jump to date endpoint
|
||||
msc3030_enabled: true
|
||||
# Filtering /messages by relation type.
|
||||
msc3874_enabled: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
|
||||
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
|
||||
# below. Leave empty for no workers, or set to '*' for all possible workers.
|
||||
# below. Leave empty for no workers.
|
||||
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
|
||||
# will be treated as Application Service registration files.
|
||||
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
||||
@@ -50,13 +50,18 @@ from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
|
||||
|
||||
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
|
||||
# Watching /_matrix/client needs a "client" listener
|
||||
# Watching /_matrix/federation needs a "federation" listener
|
||||
# Watching /_matrix/media and related needs a "media" listener
|
||||
# Stream Writers require "client" and "replication" listeners because they
|
||||
# have to attach by instance_map to the master process and have client endpoints.
|
||||
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"pusher": {
|
||||
"app": "synapse.app.pusher",
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"start_pushers": False},
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"user_dir": {
|
||||
@@ -79,7 +84,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_synapse/admin/v1/media/.*$",
|
||||
"^/_synapse/admin/v1/quarantine_media/.*$",
|
||||
],
|
||||
"shared_extra_conf": {"enable_media_repo": False},
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
"enable_media_repo": False,
|
||||
"media_instance_running_background_jobs": "media_repository1",
|
||||
},
|
||||
"worker_extra_conf": "enable_media_repo: true",
|
||||
},
|
||||
"appservice": {
|
||||
@@ -90,10 +99,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_sender": {
|
||||
"app": "synapse.app.federation_sender",
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"send_federation": False},
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"synchrotron": {
|
||||
@@ -131,6 +140,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
|
||||
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
@@ -154,6 +164,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/federation/(v1|v2)/invite/",
|
||||
"^/_matrix/federation/(v1|v2)/query_auth/",
|
||||
"^/_matrix/federation/(v1|v2)/event_auth/",
|
||||
"^/_matrix/federation/v1/timestamp_to_event/",
|
||||
"^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
|
||||
"^/_matrix/federation/(v1|v2)/user/devices/",
|
||||
"^/_matrix/federation/(v1|v2)/get_groups_publicised$",
|
||||
@@ -200,14 +211,54 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"frontend_proxy": {
|
||||
"app": "synapse.app.frontend_proxy",
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": (
|
||||
"worker_main_http_uri: http://127.0.0.1:%d"
|
||||
% (MAIN_PROCESS_HTTP_LISTENER_PORT,)
|
||||
),
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"account_data": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(r0|v3|unstable)/.*/tags",
|
||||
"^/_matrix/client/(r0|v3|unstable)/.*/account_data",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"presence": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/presence/"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"receipts": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt",
|
||||
"^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"to_device": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(r0|v3|unstable)/sendToDevice/"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"typing": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -271,14 +322,14 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||
outfile.write(rendered)
|
||||
|
||||
|
||||
def add_sharding_to_shared_config(
|
||||
def add_worker_roles_to_shared_config(
|
||||
shared_config: dict,
|
||||
worker_type: str,
|
||||
worker_name: str,
|
||||
worker_port: int,
|
||||
) -> None:
|
||||
"""Given a dictionary representing a config file shared across all workers,
|
||||
append sharded worker information to it for the current worker_type instance.
|
||||
append appropriate worker information to it for the current worker_type instance.
|
||||
|
||||
Args:
|
||||
shared_config: The config dict that all worker instances share (after being converted to YAML)
|
||||
@@ -309,9 +360,19 @@ def add_sharding_to_shared_config(
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
elif worker_type == "media_repository":
|
||||
# The first configured media worker will run the media background jobs
|
||||
shared_config.setdefault("media_instance_running_background_jobs", worker_name)
|
||||
elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
|
||||
# Update the list of stream writers
|
||||
# It's convenient that the name of the worker type is the same as the stream to write
|
||||
shared_config.setdefault("stream_writers", {}).setdefault(
|
||||
worker_type, []
|
||||
).append(worker_name)
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
|
||||
def generate_base_homeserver_config() -> None:
|
||||
@@ -421,8 +482,7 @@ def generate_worker_files(
|
||||
if worker_config:
|
||||
worker_config = worker_config.copy()
|
||||
else:
|
||||
log(worker_type + " is an unknown worker type! It will be ignored")
|
||||
continue
|
||||
error(worker_type + " is an unknown worker type! Please fix!")
|
||||
|
||||
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
|
||||
worker_type_counter[worker_type] = new_worker_count
|
||||
@@ -441,11 +501,11 @@ def generate_worker_files(
|
||||
|
||||
# Check if more than one instance of this worker type has been specified
|
||||
worker_type_total_count = worker_types.count(worker_type)
|
||||
if worker_type_total_count > 1:
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_sharding_to_shared_config(
|
||||
shared_config, worker_type, worker_name, worker_port
|
||||
)
|
||||
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_worker_roles_to_shared_config(
|
||||
shared_config, worker_type, worker_name, worker_port
|
||||
)
|
||||
|
||||
# Enable the worker in supervisord
|
||||
worker_descriptors.append(worker_config)
|
||||
|
||||
75
docker/editable.Dockerfile
Normal file
75
docker/editable.Dockerfile
Normal file
@@ -0,0 +1,75 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# This dockerfile builds an editable install of Synapse.
|
||||
#
|
||||
# Used by `complement.sh`. Not suitable for production use.
|
||||
|
||||
ARG PYTHON_VERSION=3.9
|
||||
|
||||
###
|
||||
### Stage 0: generate requirements.txt
|
||||
###
|
||||
# We hardcode the use of Debian bullseye here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bullseye.
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
|
||||
|
||||
# Install Rust and other dependencies (stolen from normal Dockerfile)
|
||||
# install the OS build deps
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential \
|
||||
libffi-dev \
|
||||
libjpeg-dev \
|
||||
libpq-dev \
|
||||
libssl-dev \
|
||||
libwebp-dev \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
curl \
|
||||
gosu \
|
||||
libjpeg62-turbo \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||
|
||||
|
||||
# Make a base copy of the editable source tree, so that we have something to
|
||||
# install and build now — even though it's going to be covered up by a mount
|
||||
# at runtime.
|
||||
COPY synapse /editable-src/synapse/
|
||||
COPY rust /editable-src/rust/
|
||||
# ... and what we need to `pip install`.
|
||||
COPY pyproject.toml poetry.lock README.rst build_rust.py Cargo.toml Cargo.lock /editable-src/
|
||||
|
||||
RUN pip install poetry
|
||||
RUN poetry config virtualenvs.create false
|
||||
RUN cd /editable-src && poetry install --extras all
|
||||
|
||||
# Make copies of useful things for inspection:
|
||||
# - the Rust module (must be copied to the editable source tree before startup)
|
||||
# - poetry.lock is useful for checking if dependencies have changed.
|
||||
RUN cp /editable-src/synapse/synapse_rust.abi3.so /synapse_rust.abi3.so.bak
|
||||
RUN cp /editable-src/poetry.lock /poetry.lock.bak
|
||||
|
||||
|
||||
### Extra setup from original Dockerfile
|
||||
COPY ./docker/start.py /start.py
|
||||
COPY ./docker/conf /conf
|
||||
|
||||
EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
||||
|
||||
ENTRYPOINT ["/start.py"]
|
||||
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
@@ -9,6 +9,8 @@
|
||||
- [Configuring a Reverse Proxy](reverse_proxy.md)
|
||||
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
|
||||
- [Configuring a Turn Server](turn-howto.md)
|
||||
- [coturn TURN server](setup/turn/coturn.md)
|
||||
- [eturnal TURN server](setup/turn/eturnal.md)
|
||||
- [Delegation](delegate.md)
|
||||
|
||||
# Upgrading
|
||||
|
||||
@@ -1197,3 +1197,42 @@ Returns a `404` HTTP status code if no user was found, with a response body like
|
||||
```
|
||||
|
||||
_Added in Synapse 1.68.0._
|
||||
|
||||
|
||||
### Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/threepid/$medium/users/$address
|
||||
```
|
||||
|
||||
When a user matched the given address for the given medium, an HTTP code `200` with a response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"user_id": "@hello:example.org"
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- `medium` - Kind of third-party ID, either `email` or `msisdn`.
|
||||
- `address` - Value of the third-party ID.
|
||||
|
||||
The `address` may have characters that are not URL-safe, so it is advised to URL-encode those parameters.
|
||||
|
||||
**Errors**
|
||||
|
||||
Returns a `404` HTTP status code if no user was found, with a response body like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"errcode":"M_NOT_FOUND",
|
||||
"error":"User not found"
|
||||
}
|
||||
```
|
||||
|
||||
_Added in Synapse 1.72.0._
|
||||
|
||||
@@ -24,6 +24,8 @@ The code of Synapse is written in Python 3. To do pretty much anything, you'll n
|
||||
|
||||
Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`.
|
||||
|
||||
Synapse has an optional, improved user search with better Unicode support. For that you need the development package of `libicu`. On Debian or Ubuntu Linux, this can be installed with `sudo apt install libicu-dev`.
|
||||
|
||||
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
|
||||
|
||||
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
|
||||
@@ -324,6 +326,12 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
|
||||
|
||||
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
|
||||
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
|
||||
- If setting `WORKERS=1`, optionally set `WORKER_TYPES=` to declare which worker
|
||||
types you wish to test. A simple comma-delimited string containing the worker types
|
||||
defined from the `WORKERS_CONFIG` template in
|
||||
[here](https://github.com/matrix-org/synapse/blob/develop/docker/configure_workers_and_start.py#L54).
|
||||
A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
|
||||
See the [worker documentation](../workers.md) for additional information on workers.
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
|
||||
```sh
|
||||
|
||||
@@ -209,6 +209,9 @@ altogether in Synapse v1.73.0.**
|
||||
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
|
||||
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
|
||||
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
|
||||
| synapse_admin_mau_current | synapse_admin_mau:current |
|
||||
| synapse_admin_mau_max | synapse_admin_mau:max |
|
||||
| synapse_admin_mau_registered_reserved_users | synapse_admin_mau:registered_reserved_users |
|
||||
|
||||
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
||||
---------------------------------------------------------------------------------
|
||||
|
||||
@@ -49,6 +49,13 @@ setting in your configuration file.
|
||||
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
|
||||
the text below for example configurations for specific providers.
|
||||
|
||||
## OIDC Back-Channel Logout
|
||||
|
||||
Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications.
|
||||
|
||||
This lets the OpenID Connect Provider notify Synapse when a user logs out, so that Synapse can end that user session.
|
||||
This feature can be enabled by setting the `backchannel_logout_enabled` property to `true` in the provider configuration, and setting the following URL as destination for Back-Channel Logout notifications in your OpenID Connect Provider: `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout`
|
||||
|
||||
## Sample configs
|
||||
|
||||
Here are a few configs for providers that should work with Synapse.
|
||||
@@ -123,6 +130,9 @@ oidc_providers:
|
||||
|
||||
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
|
||||
|
||||
Keycloak supports OIDC Back-Channel Logout, which sends logout notification to Synapse, so that Synapse users get logged out when they log out from Keycloak.
|
||||
This can be optionally enabled by setting `backchannel_logout_enabled` to `true` in the Synapse configuration, and by setting the "Backchannel Logout URL" in Keycloak.
|
||||
|
||||
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
|
||||
|
||||
1. Click `Clients` in the sidebar and click `Create`
|
||||
@@ -144,6 +154,8 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
|
||||
| Client Protocol | `openid-connect` |
|
||||
| Access Type | `confidential` |
|
||||
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
|
||||
| Backchannel Logout URL (optional) | `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` |
|
||||
| Backchannel Logout Session Required (optional) | `On` |
|
||||
|
||||
5. Click `Save`
|
||||
6. On the Credentials tab, update the fields:
|
||||
@@ -167,7 +179,9 @@ oidc_providers:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
backchannel_logout_enabled: true # Optional
|
||||
```
|
||||
|
||||
### Auth0
|
||||
|
||||
[Auth0][auth0] is a hosted SaaS IdP solution.
|
||||
@@ -576,3 +590,44 @@ oidc_providers:
|
||||
display_name_template: "{{ user.first_name }} {{ user.last_name }}"
|
||||
email_template: "{{ user.email }}"
|
||||
```
|
||||
|
||||
### Mastodon
|
||||
|
||||
[Mastodon](https://docs.joinmastodon.org/) instances provide an [OAuth API](https://docs.joinmastodon.org/spec/oauth/), allowing those instances to be used as a single sign-on provider for Synapse.
|
||||
|
||||
The first step is to register Synapse as an application with your Mastodon instance, using the [Create an application API](https://docs.joinmastodon.org/methods/apps/#create) (see also [here](https://docs.joinmastodon.org/client/token/)). There are several ways to do this, but in the example below we are using CURL.
|
||||
|
||||
This example assumes that:
|
||||
* the Mastodon instance website URL is `https://your.mastodon.instance.url`, and
|
||||
* Synapse will be registered as an app named `my_synapse_app`.
|
||||
|
||||
Send the following request, substituting the value of `synapse_public_baseurl` from your Synapse installation.
|
||||
```sh
|
||||
curl -d "client_name=my_synapse_app&redirect_uris=https://[synapse_public_baseurl]/_synapse/client/oidc/callback" -X POST https://your.mastodon.instance.url/api/v1/apps
|
||||
```
|
||||
|
||||
You should receive a response similar to the following. Make sure to save it.
|
||||
```json
|
||||
{"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"}
|
||||
```
|
||||
|
||||
As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: my_mastodon
|
||||
idp_name: "Mastodon Instance Example"
|
||||
discover: false
|
||||
issuer: "https://your.mastodon.instance.url/@admin"
|
||||
client_id: "someclientid_123"
|
||||
client_secret: "someclientsecret_123"
|
||||
authorization_endpoint: "https://your.mastodon.instance.url/oauth/authorize"
|
||||
token_endpoint: "https://your.mastodon.instance.url/oauth/token"
|
||||
userinfo_endpoint: "https://your.mastodon.instance.url/api/v1/accounts/verify_credentials"
|
||||
scopes: ["read"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
```
|
||||
|
||||
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Using Postgres
|
||||
|
||||
Synapse supports PostgreSQL versions 10 or later.
|
||||
The minimum supported version of PostgreSQL is determined by the [Dependency
|
||||
Deprecation Policy](deprecation_policy.md).
|
||||
|
||||
## Install postgres client libraries
|
||||
|
||||
|
||||
@@ -79,6 +79,9 @@ server {
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
client_max_body_size 50M;
|
||||
|
||||
# Synapse responses may be chunked, which is an HTTP/1.1 feature.
|
||||
proxy_http_version 1.1;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# Synapse also supports structured logging for machine readable logs which can
|
||||
# be ingested by ELK stacks. See [2] for details.
|
||||
#
|
||||
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
|
||||
# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
|
||||
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
|
||||
|
||||
version: 1
|
||||
|
||||
@@ -84,7 +84,9 @@ file when you upgrade the Debian package to a later version.
|
||||
|
||||
##### Downstream Debian packages
|
||||
|
||||
Andrej Shadura maintains a `matrix-synapse` package in the Debian repositories.
|
||||
Andrej Shadura maintains a
|
||||
[`matrix-synapse`](https://packages.debian.org/sid/matrix-synapse) package in
|
||||
the Debian repositories.
|
||||
For `bookworm` and `sid`, it can be installed simply with:
|
||||
|
||||
```sh
|
||||
@@ -100,23 +102,27 @@ for information on how to use backports.
|
||||
##### Downstream Ubuntu packages
|
||||
|
||||
We do not recommend using the packages in the default Ubuntu repository
|
||||
at this time, as they are old and suffer from known security vulnerabilities.
|
||||
at this time, as they are [old and suffer from known security vulnerabilities](
|
||||
https://bugs.launchpad.net/ubuntu/+source/matrix-synapse/+bug/1848709
|
||||
).
|
||||
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
|
||||
|
||||
#### Fedora
|
||||
|
||||
Synapse is in the Fedora repositories as `matrix-synapse`:
|
||||
Synapse is in the Fedora repositories as
|
||||
[`matrix-synapse`](https://src.fedoraproject.org/rpms/matrix-synapse):
|
||||
|
||||
```sh
|
||||
sudo dnf install matrix-synapse
|
||||
```
|
||||
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
Additionally, Oleg Girko provides Fedora RPMs at
|
||||
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
|
||||
|
||||
#### OpenSUSE
|
||||
|
||||
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
|
||||
Synapse is in the OpenSUSE repositories as
|
||||
[`matrix-synapse`](https://software.opensuse.org/package/matrix-synapse):
|
||||
|
||||
```sh
|
||||
sudo zypper install matrix-synapse
|
||||
@@ -151,7 +157,8 @@ sudo pip install py-bcrypt
|
||||
|
||||
#### Void Linux
|
||||
|
||||
Synapse can be found in the void repositories as 'synapse':
|
||||
Synapse can be found in the void repositories as
|
||||
['synapse'](https://github.com/void-linux/void-packages/tree/master/srcpkgs/synapse):
|
||||
|
||||
```sh
|
||||
xbps-install -Su
|
||||
@@ -271,7 +278,7 @@ Installing prerequisites on Ubuntu or Debian:
|
||||
```sh
|
||||
sudo apt install build-essential python3-dev libffi-dev \
|
||||
python3-pip python3-setuptools sqlite3 \
|
||||
libssl-dev virtualenv libjpeg-dev libxslt1-dev
|
||||
libssl-dev virtualenv libjpeg-dev libxslt1-dev libicu-dev
|
||||
```
|
||||
|
||||
##### ArchLinux
|
||||
@@ -280,7 +287,7 @@ Installing prerequisites on ArchLinux:
|
||||
|
||||
```sh
|
||||
sudo pacman -S base-devel python python-pip \
|
||||
python-setuptools python-virtualenv sqlite3
|
||||
python-setuptools python-virtualenv sqlite3 icu
|
||||
```
|
||||
|
||||
##### CentOS/Fedora
|
||||
@@ -290,7 +297,8 @@ Installing prerequisites on CentOS or Fedora Linux:
|
||||
```sh
|
||||
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
|
||||
python3-virtualenv libffi-devel openssl-devel python3-devel
|
||||
python3-virtualenv libffi-devel openssl-devel python3-devel \
|
||||
libicu-devel
|
||||
sudo dnf groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
@@ -303,8 +311,12 @@ You may need to install the latest Xcode developer tools:
|
||||
xcode-select --install
|
||||
```
|
||||
|
||||
On ARM-based Macs you may need to install libjpeg and libpq.
|
||||
You can use Homebrew (https://brew.sh):
|
||||
Some extra dependencies may be needed. You can use Homebrew (https://brew.sh) for them.
|
||||
|
||||
You may need to install icu, and make the icu binaries and libraries accessible.
|
||||
Please follow [the official instructions of PyICU](https://pypi.org/project/PyICU/) to do so.
|
||||
|
||||
On ARM-based Macs you may also need to install libjpeg and libpq:
|
||||
```sh
|
||||
brew install jpeg libpq
|
||||
```
|
||||
@@ -325,7 +337,8 @@ Installing prerequisites on openSUSE:
|
||||
```sh
|
||||
sudo zypper in -t pattern devel_basis
|
||||
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel \
|
||||
libicu-devel
|
||||
```
|
||||
|
||||
##### OpenBSD
|
||||
|
||||
188
docs/setup/turn/coturn.md
Normal file
188
docs/setup/turn/coturn.md
Normal file
@@ -0,0 +1,188 @@
|
||||
# coturn TURN server
|
||||
|
||||
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API).
|
||||
|
||||
## `coturn` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
|
||||
|
||||
#### Debian and Ubuntu based distributions
|
||||
|
||||
Just install the debian package:
|
||||
|
||||
```sh
|
||||
sudo apt install coturn
|
||||
```
|
||||
|
||||
This will install and start a systemd service called `coturn`.
|
||||
|
||||
#### Source installation
|
||||
|
||||
1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
|
||||
|
||||
1. Configure it:
|
||||
|
||||
```sh
|
||||
./configure
|
||||
```
|
||||
|
||||
You may need to install `libevent2`: if so, you should do so in
|
||||
the way recommended by your operating system. You can ignore
|
||||
warnings about lack of database support: a database is unnecessary
|
||||
for this purpose.
|
||||
|
||||
1. Build and install it:
|
||||
|
||||
```sh
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
|
||||
lines, with example values, are:
|
||||
|
||||
```
|
||||
use-auth-secret
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
```
|
||||
|
||||
See `turnserver.conf` for explanations of the options. One way to generate
|
||||
the `static-auth-secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
A `realm` must be specified, but its value is somewhat arbitrary. (It is
|
||||
sent to clients as part of the authentication flow.) It is conventional to
|
||||
set it to be your server name.
|
||||
|
||||
1. You will most likely want to configure `coturn` to write logs somewhere. The
|
||||
easiest way is normally to send them to the syslog:
|
||||
|
||||
```sh
|
||||
syslog
|
||||
```
|
||||
|
||||
(in which case, the logs will be available via `journalctl -u coturn` on a
|
||||
systemd system). Alternatively, `coturn` can be configured to write to a
|
||||
logfile - check the example config file supplied with `coturn`.
|
||||
|
||||
1. Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point:
|
||||
|
||||
```
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
|
||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
|
||||
# recommended additional local peers to block, to mitigate external access to internal services.
|
||||
# https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
|
||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||
# this should be one of the turn server's listening IPs
|
||||
allowed-peer-ip=10.0.0.1
|
||||
|
||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||
total-quota=1200
|
||||
```
|
||||
|
||||
1. Also consider supporting TLS/DTLS. To do this, add the following settings
|
||||
to `turnserver.conf`:
|
||||
|
||||
```
|
||||
# TLS certificates, including intermediate certs.
|
||||
# For Let's Encrypt certificates, use `fullchain.pem` here.
|
||||
cert=/path/to/fullchain.pem
|
||||
|
||||
# TLS private key file
|
||||
pkey=/path/to/privkey.pem
|
||||
|
||||
# Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
|
||||
#no-tls
|
||||
#no-dtls
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in the `turn_uris` settings below
|
||||
with `turns:`.
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. You must configure `coturn` to advertise that
|
||||
address to connecting clients:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
You may optionally limit the TURN server to listen only on the local
|
||||
address that is mapped by NAT to the external address:
|
||||
|
||||
```
|
||||
listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure `coturn` to advertise each available address:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
external-ip=EXTERNAL_NAT_IPv6_ADDRESS
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. (Re)start the turn server:
|
||||
|
||||
* If you used the Debian package (or have set up a systemd unit yourself):
|
||||
```sh
|
||||
sudo systemctl restart coturn
|
||||
```
|
||||
|
||||
* If you built from source:
|
||||
|
||||
```sh
|
||||
/usr/local/bin/turnserver -o
|
||||
```
|
||||
170
docs/setup/turn/eturnal.md
Normal file
170
docs/setup/turn/eturnal.md
Normal file
@@ -0,0 +1,170 @@
|
||||
# eturnal TURN server
|
||||
|
||||
The following sections describe how to install [eturnal](<https://github.com/processone/eturnal>)
|
||||
(which implements the TURN REST API).
|
||||
|
||||
## `eturnal` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The `eturnal` TURN server implementation is available from a variety of sources
|
||||
such as native package managers, binary packages, installation from source or
|
||||
[container image](https://eturnal.net/documentation/code/docker.html). They are
|
||||
all described [here](https://github.com/processone/eturnal#installation).
|
||||
|
||||
Quick-Test instructions in a [Linux Shell](https://github.com/processone/eturnal/blob/master/QUICK-TEST.md)
|
||||
or with [Docker](https://github.com/processone/eturnal/blob/master/docker-k8s/QUICK-TEST.md)
|
||||
are available as well.
|
||||
|
||||
### Configuration
|
||||
|
||||
After installation, `eturnal` usually ships a [default configuration file](https://github.com/processone/eturnal/blob/master/config/eturnal.yml)
|
||||
here: `/etc/eturnal.yml` (and, if not found there, there is a backup file here:
|
||||
`/opt/eturnal/etc/eturnal.yml`). It uses the (indentation-sensitive!) [YAML](https://en.wikipedia.org/wiki/YAML)
|
||||
format. The file contains further explanations.
|
||||
|
||||
Here are some hints how to configure eturnal on your [host machine](https://github.com/processone/eturnal#configuration)
|
||||
or when using e.g. [Docker](https://eturnal.net/documentation/code/docker.html).
|
||||
You may also further deep dive into the [reference documentation](https://eturnal.net/documentation/).
|
||||
|
||||
`eturnal` runs out of the box with the default configuration. To enable TURN and
|
||||
to integrate it with your homeserver, some aspects in `eturnal`'s default configuration file
|
||||
must be edited:
|
||||
|
||||
1. Homeserver's [`turn_shared_secret`](../../usage/configuration/config_documentation.md#turn_shared_secret)
|
||||
and eturnal's shared `secret` for authentication
|
||||
|
||||
Both need to have the same value. Uncomment and adjust this line in `eturnal`'s
|
||||
configuration file:
|
||||
|
||||
```yaml
|
||||
secret: "long-and-cryptic" # Shared secret, CHANGE THIS.
|
||||
```
|
||||
|
||||
One way to generate a `secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
1. Public IP address
|
||||
|
||||
If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. `eturnal` tries to autodetect the public IP address,
|
||||
however, it may also be configured by uncommenting and adjusting this line, so
|
||||
`eturnal` advertises that address to connecting clients:
|
||||
|
||||
```yaml
|
||||
relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure `eturnal` to advertise each available address:
|
||||
|
||||
```yaml
|
||||
relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
|
||||
relay_ipv6_addr: "2001:db8::4" # The server's public IPv6 address (optional).
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. Logging
|
||||
|
||||
If `eturnal` was started by systemd, log files are written into the
|
||||
`/var/log/eturnal` directory by default. In order to log to the [journal](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html)
|
||||
instead, the `log_dir` option can be set to `stdout` in the configuration file.
|
||||
|
||||
1. Security considerations
|
||||
|
||||
Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point, [see also the official documentation](https://eturnal.net/documentation/#blacklist):
|
||||
|
||||
```yaml
|
||||
## Reject TURN relaying from/to the following addresses/networks:
|
||||
blacklist: # This is the default blacklist.
|
||||
- "127.0.0.0/8" # IPv4 loopback.
|
||||
- "::1" # IPv6 loopback.
|
||||
- recommended # Expands to a number of networks recommended to be
|
||||
# blocked, but includes private networks. Those
|
||||
# would have to be 'whitelist'ed if eturnal serves
|
||||
# local clients/peers within such networks.
|
||||
```
|
||||
|
||||
To whitelist IP addresses or specific (private) networks, you need to **add** a
|
||||
whitelist part into the configuration file, e.g.:
|
||||
|
||||
```yaml
|
||||
whitelist:
|
||||
- "192.168.0.0/16"
|
||||
- "203.0.113.113"
|
||||
- "2001:db8::/64"
|
||||
```
|
||||
|
||||
The more specific, the better.
|
||||
|
||||
1. TURNS (TURN via TLS/DTLS)
|
||||
|
||||
Also consider supporting TLS/DTLS. To do this, adjust the following settings
|
||||
in the `eturnal.yml` configuration file (TLS parts should not be commented anymore):
|
||||
|
||||
```yaml
|
||||
listen:
|
||||
- ip: "::"
|
||||
port: 3478
|
||||
transport: udp
|
||||
- ip: "::"
|
||||
port: 3478
|
||||
transport: tcp
|
||||
- ip: "::"
|
||||
port: 5349
|
||||
transport: tls
|
||||
|
||||
## TLS certificate/key files (must be readable by 'eturnal' user!):
|
||||
tls_crt_file: /etc/eturnal/tls/crt.pem
|
||||
tls_key_file: /etc/eturnal/tls/key.pem
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in homeserver's `turn_uris` settings
|
||||
with `turns:`. More is described [here](../../usage/configuration/config_documentation.md#turn_uris).
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Firewall
|
||||
|
||||
Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. Reload/ restarting `eturnal`
|
||||
|
||||
Changes in the configuration file require `eturnal` to reload/ restart, this
|
||||
can be achieved by:
|
||||
|
||||
```sh
|
||||
eturnalctl reload
|
||||
```
|
||||
|
||||
`eturnal` performs a configuration check before actually reloading/ restarting
|
||||
and provides hints, if something is not correctly configured.
|
||||
|
||||
### eturnalctl opterations script
|
||||
|
||||
`eturnal` offers a handy [operations script](https://eturnal.net/documentation/#Operation)
|
||||
which can be called e.g. to check, whether the service is up, to restart the service,
|
||||
to query how many active sessions exist, to change logging behaviour and so on.
|
||||
|
||||
Hint: If `eturnalctl` is not part of your `$PATH`, consider either sym-linking it (e.g. ´ln -s /opt/eturnal/bin/eturnalctl /usr/local/bin/eturnalctl´) or call it from the default `eturnal` directory directly: e.g. `/opt/eturnal/bin/eturnalctl info`
|
||||
@@ -9,222 +9,28 @@ allows the homeserver to generate credentials that are valid for use on the
|
||||
TURN server through the use of a secret shared between the homeserver and the
|
||||
TURN server.
|
||||
|
||||
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API) and integrate it with synapse.
|
||||
This documentation provides two TURN server configuration examples:
|
||||
|
||||
* [coturn](setup/turn/coturn.md)
|
||||
* [eturnal](setup/turn/eturnal.md)
|
||||
|
||||
## Requirements
|
||||
|
||||
For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP.
|
||||
For TURN relaying to work, the TURN service must be hosted on a server/endpoint with a public IP.
|
||||
|
||||
Hosting TURN behind NAT requires port forwaring and for the NAT gateway to have a public IP.
|
||||
However, even with appropriate configuration, NAT is known to cause issues and to often not work.
|
||||
|
||||
## `coturn` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
|
||||
|
||||
#### Debian installation
|
||||
|
||||
Just install the debian package:
|
||||
|
||||
```sh
|
||||
apt install coturn
|
||||
```
|
||||
|
||||
This will install and start a systemd service called `coturn`.
|
||||
|
||||
#### Source installation
|
||||
|
||||
1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
|
||||
|
||||
1. Configure it:
|
||||
|
||||
```sh
|
||||
./configure
|
||||
```
|
||||
|
||||
You may need to install `libevent2`: if so, you should do so in
|
||||
the way recommended by your operating system. You can ignore
|
||||
warnings about lack of database support: a database is unnecessary
|
||||
for this purpose.
|
||||
|
||||
1. Build and install it:
|
||||
|
||||
```sh
|
||||
make
|
||||
make install
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
|
||||
lines, with example values, are:
|
||||
|
||||
```
|
||||
use-auth-secret
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
```
|
||||
|
||||
See `turnserver.conf` for explanations of the options. One way to generate
|
||||
the `static-auth-secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
A `realm` must be specified, but its value is somewhat arbitrary. (It is
|
||||
sent to clients as part of the authentication flow.) It is conventional to
|
||||
set it to be your server name.
|
||||
|
||||
1. You will most likely want to configure coturn to write logs somewhere. The
|
||||
easiest way is normally to send them to the syslog:
|
||||
|
||||
```sh
|
||||
syslog
|
||||
```
|
||||
|
||||
(in which case, the logs will be available via `journalctl -u coturn` on a
|
||||
systemd system). Alternatively, coturn can be configured to write to a
|
||||
logfile - check the example config file supplied with coturn.
|
||||
|
||||
1. Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point:
|
||||
|
||||
```
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
|
||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
|
||||
# recommended additional local peers to block, to mitigate external access to internal services.
|
||||
# https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
|
||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||
# this should be one of the turn server's listening IPs
|
||||
allowed-peer-ip=10.0.0.1
|
||||
|
||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||
total-quota=1200
|
||||
```
|
||||
|
||||
1. Also consider supporting TLS/DTLS. To do this, add the following settings
|
||||
to `turnserver.conf`:
|
||||
|
||||
```
|
||||
# TLS certificates, including intermediate certs.
|
||||
# For Let's Encrypt certificates, use `fullchain.pem` here.
|
||||
cert=/path/to/fullchain.pem
|
||||
|
||||
# TLS private key file
|
||||
pkey=/path/to/privkey.pem
|
||||
|
||||
# Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
|
||||
#no-tls
|
||||
#no-dtls
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in the `turn_uris` settings below
|
||||
with `turns:`.
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. You must configure coturn to advertise that
|
||||
address to connecting clients:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
You may optionally limit the TURN server to listen only on the local
|
||||
address that is mapped by NAT to the external address:
|
||||
|
||||
```
|
||||
listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure coturn to advertise each available address:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
external-ip=EXTERNAL_NAT_IPv6_ADDRESS
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. (Re)start the turn server:
|
||||
|
||||
* If you used the Debian package (or have set up a systemd unit yourself):
|
||||
```sh
|
||||
systemctl restart coturn
|
||||
```
|
||||
|
||||
* If you installed from source:
|
||||
|
||||
```sh
|
||||
bin/turnserver -o
|
||||
```
|
||||
Afterwards, the homeserver needs some further configuration.
|
||||
|
||||
## Synapse setup
|
||||
|
||||
Your homeserver configuration file needs the following extra keys:
|
||||
|
||||
1. "`turn_uris`": This needs to be a yaml list of public-facing URIs
|
||||
for your TURN server to be given out to your clients. Add separate
|
||||
entries for each transport your TURN server supports.
|
||||
2. "`turn_shared_secret`": This is the secret shared between your
|
||||
homeserver and your TURN server, so you should set it to the same
|
||||
string you used in turnserver.conf.
|
||||
3. "`turn_user_lifetime`": This is the amount of time credentials
|
||||
generated by your homeserver are valid for (in milliseconds).
|
||||
Shorter times offer less potential for abuse at the expense of
|
||||
increased traffic between web clients and your homeserver to
|
||||
refresh credentials. The TURN REST API specification recommends
|
||||
one day (86400000).
|
||||
4. "`turn_allow_guests`": Whether to allow guest users to use the
|
||||
TURN server. This is enabled by default, as otherwise VoIP will
|
||||
not work reliably for guests. However, it does introduce a
|
||||
security risk as it lets guests connect to arbitrary endpoints
|
||||
without having gone through a CAPTCHA or similar to register a
|
||||
real account.
|
||||
1. [`turn_uris`](usage/configuration/config_documentation.md#turn_uris)
|
||||
2. [`turn_shared_secret`](usage/configuration/config_documentation.md#turn_shared_secret)
|
||||
3. [`turn_user_lifetime`](usage/configuration/config_documentation.md#turn_user_lifetime)
|
||||
4. [`turn_allow_guests`](usage/configuration/config_documentation.md#turn_allow_guests)
|
||||
|
||||
As an example, here is the relevant section of the config file for `matrix.org`. The
|
||||
`turn_uris` are appropriate for TURN servers listening on the default ports, with no TLS.
|
||||
@@ -232,7 +38,7 @@ As an example, here is the relevant section of the config file for `matrix.org`.
|
||||
turn_uris: [ "turn:turn.matrix.org?transport=udp", "turn:turn.matrix.org?transport=tcp" ]
|
||||
turn_shared_secret: "n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons"
|
||||
turn_user_lifetime: 86400000
|
||||
turn_allow_guests: True
|
||||
turn_allow_guests: true
|
||||
|
||||
After updating the homeserver configuration, you must restart synapse:
|
||||
|
||||
@@ -263,7 +69,7 @@ Here are a few things to try:
|
||||
* Check that you have opened your firewall to allow UDP traffic to the UDP
|
||||
relay ports (49152-65535 by default).
|
||||
|
||||
* Try disabling `coturn`'s TLS/DTLS listeners and enable only its (unencrypted)
|
||||
* Try disabling TLS/DTLS listeners and enable only its (unencrypted)
|
||||
TCP/UDP listeners. (This will only leave signaling traffic unencrypted;
|
||||
voice & video WebRTC traffic is always encrypted.)
|
||||
|
||||
@@ -288,12 +94,19 @@ Here are a few things to try:
|
||||
|
||||
* ensure that your TURN server uses the NAT gateway as its default route.
|
||||
|
||||
* Enable more verbose logging in coturn via the `verbose` setting:
|
||||
* Enable more verbose logging, in `coturn` via the `verbose` setting:
|
||||
|
||||
```
|
||||
verbose
|
||||
```
|
||||
|
||||
or with `eturnal` with the shell command `eturnalctl loglevel debug` or in the configuration file (the service needs to [reload](https://eturnal.net/documentation/#Operation) for it to become effective):
|
||||
|
||||
```yaml
|
||||
## Logging configuration:
|
||||
log_level: debug
|
||||
```
|
||||
|
||||
... and then see if there are any clues in its logs.
|
||||
|
||||
* If you are using a browser-based client under Chrome, check
|
||||
@@ -317,7 +130,7 @@ Here are a few things to try:
|
||||
matrix client to your homeserver in your browser's network inspector. In
|
||||
the response you should see `username` and `password`. Or:
|
||||
|
||||
* Use the following shell commands:
|
||||
* Use the following shell commands for `coturn`:
|
||||
|
||||
```sh
|
||||
secret=staticAuthSecretHere
|
||||
@@ -327,11 +140,16 @@ Here are a few things to try:
|
||||
echo -e "username: $u\npassword: $p"
|
||||
```
|
||||
|
||||
Or:
|
||||
or for `eturnal`
|
||||
|
||||
* Temporarily configure coturn to accept a static username/password. To do
|
||||
this, comment out `use-auth-secret` and `static-auth-secret` and add the
|
||||
following:
|
||||
```sh
|
||||
eturnalctl credentials
|
||||
```
|
||||
|
||||
|
||||
* Or (**coturn only**): Temporarily configure `coturn` to accept a static
|
||||
username/password. To do this, comment out `use-auth-secret` and
|
||||
`static-auth-secret` and add the following:
|
||||
|
||||
```
|
||||
lt-cred-mech
|
||||
|
||||
@@ -88,6 +88,98 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.74.0
|
||||
|
||||
## Unicode support in user search
|
||||
|
||||
This version introduces optional support for an [improved user search dealing with Unicode characters](https://github.com/matrix-org/synapse/pull/14464).
|
||||
|
||||
If you want to take advantage of this feature you need to install PyICU,
|
||||
the ICU native dependency and its development headers
|
||||
so that PyICU can build since no prebuilt wheels are available.
|
||||
|
||||
You can follow [the PyICU documentation](https://pypi.org/project/PyICU/) to do so,
|
||||
and then do `pip install matrix-synapse[icu]` for a PyPI install.
|
||||
|
||||
Docker images and Debian packages need nothing specific as they already
|
||||
include or specify ICU as an explicit dependency.
|
||||
|
||||
# Upgrading to v1.73.0
|
||||
|
||||
## Legacy Prometheus metric names have now been removed
|
||||
|
||||
Synapse v1.69.0 included the deprecation of legacy Prometheus metric names
|
||||
and offered an option to disable them.
|
||||
Synapse v1.71.0 disabled legacy Prometheus metric names by default.
|
||||
|
||||
This version, v1.73.0, removes those legacy Prometheus metric names entirely.
|
||||
This also means that the `enable_legacy_metrics` configuration option has been
|
||||
removed; it will no longer be possible to re-enable the legacy metric names.
|
||||
|
||||
If you use metrics and have not yet updated your Grafana dashboard(s),
|
||||
Prometheus console(s) or alerting rule(s), please consider doing so when upgrading
|
||||
to this version.
|
||||
Note that the included Grafana dashboard was updated in v1.72.0 to correct some
|
||||
metric names which were missed when legacy metrics were disabled by default.
|
||||
|
||||
See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names)
|
||||
for more context.
|
||||
|
||||
|
||||
# Upgrading to v1.72.0
|
||||
|
||||
## Dropping support for PostgreSQL 10
|
||||
|
||||
In line with our [deprecation policy](deprecation_policy.md), we've dropped
|
||||
support for PostgreSQL 10, as it is no longer supported upstream.
|
||||
|
||||
This release of Synapse requires PostgreSQL 11+.
|
||||
|
||||
|
||||
# Upgrading to v1.71.0
|
||||
|
||||
## Removal of the `generate_short_term_login_token` module API method
|
||||
|
||||
As announced with the release of [Synapse 1.69.0](#deprecation-of-the-generate_short_term_login_token-module-api-method), the deprecated `generate_short_term_login_token` module method has been removed.
|
||||
|
||||
Modules relying on it can instead use the `create_login_token` method.
|
||||
|
||||
|
||||
## Changes to the events received by application services (interest)
|
||||
|
||||
To align with spec (changed in
|
||||
[MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905)), Synapse now
|
||||
only considers local users to be interesting. In other words, the `users` namespace
|
||||
regex is only be applied against local users of the homeserver.
|
||||
|
||||
Please note, this probably doesn't affect the expected behavior of your application
|
||||
service, since an interesting local user in a room still means all messages in the room
|
||||
(from local or remote users) will still be considered interesting. And matching a room
|
||||
with the `rooms` or `aliases` namespace regex will still consider all events sent in the
|
||||
room to be interesting to the application service.
|
||||
|
||||
If one of your application service's `users` regex was intending to match a remote user,
|
||||
this will no longer match as you expect. The behavioral mismatch between matching all
|
||||
local users and some remote users is why the spec was changed/clarified and this
|
||||
caveat is no longer supported.
|
||||
|
||||
|
||||
## Legacy Prometheus metric names are now disabled by default
|
||||
|
||||
Synapse v1.71.0 disables legacy Prometheus metric names by default.
|
||||
For administrators that still rely on them and have not yet had chance to update their
|
||||
uses of the metrics, it's still possible to specify `enable_legacy_metrics: true` in
|
||||
the configuration to re-enable them temporarily.
|
||||
|
||||
Synapse v1.73.0 will **remove legacy metric names altogether** and at that point,
|
||||
it will no longer be possible to re-enable them.
|
||||
|
||||
If you do not use metrics or you have already updated your Grafana dashboard(s),
|
||||
Prometheus console(s) and alerting rule(s), there is no action needed.
|
||||
|
||||
See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names).
|
||||
|
||||
|
||||
# Upgrading to v1.69.0
|
||||
|
||||
## Changes to the receipts replication streams
|
||||
|
||||
@@ -19,7 +19,7 @@ already on your `$PATH` depending on how Synapse was installed.
|
||||
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||
|
||||
## Making an Admin API request
|
||||
For security reasons, we [recommend](reverse_proxy.md#synapse-administration-endpoints)
|
||||
For security reasons, we [recommend](../../../reverse_proxy.md#synapse-administration-endpoints)
|
||||
that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a
|
||||
reverse proxy. This means you should typically query the Admin API from a terminal on
|
||||
the machine which runs Synapse.
|
||||
|
||||
@@ -79,7 +79,7 @@ Here we can see that the request has been tagged with `GET-37`. (The tag depends
|
||||
grep 'GET-37' homeserver.log
|
||||
```
|
||||
|
||||
If you want to paste that output into a github issue or matrix room, please remember to surround it with triple-backticks (```) to make it legible (see https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code).
|
||||
If you want to paste that output into a github issue or matrix room, please remember to surround it with triple-backticks (```) to make it legible (see [quoting code](https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code)).
|
||||
|
||||
|
||||
What do all those fields in the 'Processed' line mean?
|
||||
|
||||
@@ -73,12 +73,12 @@ When a request is blocked, the response will have the `errcode` `M_RESOURCE_LIMI
|
||||
|
||||
Synapse records several different prometheus metrics for MAU.
|
||||
|
||||
`synapse_admin_mau:current` records the current MAU figure for native (non-application-service) users.
|
||||
`synapse_admin_mau_current` records the current MAU figure for native (non-application-service) users.
|
||||
|
||||
`synapse_admin_mau:max` records the maximum MAU as dictated by the `max_mau_value` config value.
|
||||
`synapse_admin_mau_max` records the maximum MAU as dictated by the `max_mau_value` config value.
|
||||
|
||||
`synapse_admin_mau_current_mau_by_service` records the current MAU including application service users. The label `app_service` can be used
|
||||
to filter by a specific service ID. This *also* includes non-application-service users under `app_service=native` .
|
||||
|
||||
`synapse_admin_mau:registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
|
||||
`synapse_admin_mau_registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
|
||||
registered accounts on the homeserver.
|
||||
|
||||
@@ -99,7 +99,7 @@ modules:
|
||||
config: {}
|
||||
```
|
||||
---
|
||||
## Server ##
|
||||
## Server
|
||||
|
||||
Define your homeserver name and other base options.
|
||||
|
||||
@@ -159,7 +159,7 @@ including _matrix/...). This is the same URL a user might enter into the
|
||||
'Custom Homeserver URL' field on their client. If you use Synapse with a
|
||||
reverse proxy, this should be the URL to reach Synapse via the proxy.
|
||||
Otherwise, it should be the URL to reach Synapse's client HTTP listener (see
|
||||
'listeners' below).
|
||||
['listeners'](#listeners) below).
|
||||
|
||||
Defaults to `https://<server_name>/`.
|
||||
|
||||
@@ -570,7 +570,7 @@ Example configuration:
|
||||
delete_stale_devices_after: 1y
|
||||
```
|
||||
|
||||
## Homeserver blocking ##
|
||||
## Homeserver blocking
|
||||
Useful options for Synapse admins.
|
||||
|
||||
---
|
||||
@@ -922,7 +922,7 @@ retention:
|
||||
interval: 1d
|
||||
```
|
||||
---
|
||||
## TLS ##
|
||||
## TLS
|
||||
|
||||
Options related to TLS.
|
||||
|
||||
@@ -1012,7 +1012,7 @@ federation_custom_ca_list:
|
||||
- myCA3.pem
|
||||
```
|
||||
---
|
||||
## Federation ##
|
||||
## Federation
|
||||
|
||||
Options related to federation.
|
||||
|
||||
@@ -1071,7 +1071,7 @@ Example configuration:
|
||||
allow_device_name_lookup_over_federation: true
|
||||
```
|
||||
---
|
||||
## Caching ##
|
||||
## Caching
|
||||
|
||||
Options related to caching.
|
||||
|
||||
@@ -1185,7 +1185,7 @@ file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by using
|
||||
`systemctl reload matrix-synapse`.
|
||||
|
||||
---
|
||||
## Database ##
|
||||
## Database
|
||||
Config options related to database settings.
|
||||
|
||||
---
|
||||
@@ -1332,20 +1332,21 @@ databases:
|
||||
cp_max: 10
|
||||
```
|
||||
---
|
||||
## Logging ##
|
||||
## Logging
|
||||
Config options related to logging.
|
||||
|
||||
---
|
||||
### `log_config`
|
||||
|
||||
This option specifies a yaml python logging config file as described [here](https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema).
|
||||
This option specifies a yaml python logging config file as described
|
||||
[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
```
|
||||
---
|
||||
## Ratelimiting ##
|
||||
## Ratelimiting
|
||||
Options related to ratelimiting in Synapse.
|
||||
|
||||
Each ratelimiting configuration is made of two parameters:
|
||||
@@ -1576,7 +1577,7 @@ Example configuration:
|
||||
federation_rr_transactions_per_room_per_second: 40
|
||||
```
|
||||
---
|
||||
## Media Store ##
|
||||
## Media Store
|
||||
Config options related to Synapse's media store.
|
||||
|
||||
---
|
||||
@@ -1766,7 +1767,7 @@ url_preview_ip_range_blacklist:
|
||||
- 'ff00::/8'
|
||||
- 'fec0::/10'
|
||||
```
|
||||
----
|
||||
---
|
||||
### `url_preview_ip_range_whitelist`
|
||||
|
||||
This option sets a list of IP address CIDR ranges that the URL preview spider is allowed
|
||||
@@ -1860,7 +1861,7 @@ Example configuration:
|
||||
- 'fr;q=0.8'
|
||||
- '*;q=0.7'
|
||||
```
|
||||
----
|
||||
---
|
||||
### `oembed`
|
||||
|
||||
oEmbed allows for easier embedding content from a website. It can be
|
||||
@@ -1877,7 +1878,7 @@ oembed:
|
||||
- oembed/my_providers.json
|
||||
```
|
||||
---
|
||||
## Captcha ##
|
||||
## Captcha
|
||||
|
||||
See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha.
|
||||
|
||||
@@ -1926,7 +1927,7 @@ Example configuration:
|
||||
recaptcha_siteverify_api: "https://my.recaptcha.site"
|
||||
```
|
||||
---
|
||||
## TURN ##
|
||||
## TURN
|
||||
Options related to adding a TURN server to Synapse.
|
||||
|
||||
---
|
||||
@@ -1947,7 +1948,7 @@ Example configuration:
|
||||
```yaml
|
||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
```
|
||||
----
|
||||
---
|
||||
### `turn_username` and `turn_password`
|
||||
|
||||
The Username and password if the TURN server needs them and does not use a token.
|
||||
@@ -2366,7 +2367,7 @@ Example configuration:
|
||||
```yaml
|
||||
session_lifetime: 24h
|
||||
```
|
||||
----
|
||||
---
|
||||
### `refresh_access_token_lifetime`
|
||||
|
||||
Time that an access token remains valid for, if the session is using refresh tokens.
|
||||
@@ -2422,7 +2423,7 @@ nonrefreshable_access_token_lifetime: 24h
|
||||
```
|
||||
|
||||
---
|
||||
## Metrics ###
|
||||
## Metrics
|
||||
Config options related to metrics.
|
||||
|
||||
---
|
||||
@@ -2436,31 +2437,6 @@ Example configuration:
|
||||
enable_metrics: true
|
||||
```
|
||||
---
|
||||
### `enable_legacy_metrics`
|
||||
|
||||
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
|
||||
or to `false` to only publish non-legacy Prometheus metric names.
|
||||
Defaults to `true`. Has no effect if `enable_metrics` is `false`.
|
||||
**In Synapse v1.71.0, this will default to `false` before being removed in Synapse v1.73.0.**
|
||||
|
||||
Legacy metric names include:
|
||||
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
|
||||
- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
|
||||
|
||||
These legacy metric names are unconventional and not compliant with OpenMetrics standards.
|
||||
They are included for backwards compatibility.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_legacy_metrics: false
|
||||
```
|
||||
|
||||
See https://github.com/matrix-org/synapse/issues/11106 for context.
|
||||
|
||||
*Since v1.67.0.*
|
||||
|
||||
**Will be removed in v1.73.0.**
|
||||
---
|
||||
### `sentry`
|
||||
|
||||
Use this option to enable sentry integration. Provide the DSN assigned to you by sentry
|
||||
@@ -2519,38 +2495,59 @@ Example configuration:
|
||||
report_stats_endpoint: https://example.com/report-usage-stats/push
|
||||
```
|
||||
---
|
||||
## API Configuration ##
|
||||
## API Configuration
|
||||
Config settings related to the client/server API
|
||||
|
||||
---
|
||||
### `room_prejoin_state`
|
||||
|
||||
Controls for the state that is shared with users who receive an invite
|
||||
to a room. By default, the following state event types are shared with users who
|
||||
receive invites to the room:
|
||||
- m.room.join_rules
|
||||
- m.room.canonical_alias
|
||||
- m.room.avatar
|
||||
- m.room.encryption
|
||||
- m.room.name
|
||||
- m.room.create
|
||||
- m.room.topic
|
||||
This setting controls the state that is shared with users upon receiving an
|
||||
invite to a room, or in reply to a knock on a room. By default, the following
|
||||
state events are shared with users:
|
||||
|
||||
- `m.room.join_rules`
|
||||
- `m.room.canonical_alias`
|
||||
- `m.room.avatar`
|
||||
- `m.room.encryption`
|
||||
- `m.room.name`
|
||||
- `m.room.create`
|
||||
- `m.room.topic`
|
||||
|
||||
To change the default behavior, use the following sub-options:
|
||||
* `disable_default_event_types`: set to true to disable the above defaults. If this
|
||||
is enabled, only the event types listed in `additional_event_types` are shared.
|
||||
Defaults to false.
|
||||
* `additional_event_types`: Additional state event types to share with users when they are invited
|
||||
to a room. By default, this list is empty (so only the default event types are shared).
|
||||
* `disable_default_event_types`: boolean. Set to `true` to disable the above
|
||||
defaults. If this is enabled, only the event types listed in
|
||||
`additional_event_types` are shared. Defaults to `false`.
|
||||
* `additional_event_types`: A list of additional state events to include in the
|
||||
events to be shared. By default, this list is empty (so only the default event
|
||||
types are shared).
|
||||
|
||||
Each entry in this list should be either a single string or a list of two
|
||||
strings.
|
||||
* A standalone string `t` represents all events with type `t` (i.e.
|
||||
with no restrictions on state keys).
|
||||
* A pair of strings `[t, s]` represents a single event with type `t` and
|
||||
state key `s`. The same type can appear in two entries with different state
|
||||
keys: in this situation, both state keys are included in prejoin state.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
room_prejoin_state:
|
||||
disable_default_event_types: true
|
||||
disable_default_event_types: false
|
||||
additional_event_types:
|
||||
- org.example.custom.event.type
|
||||
- m.room.join_rules
|
||||
# Share all events of type `org.example.custom.event.typeA`
|
||||
- org.example.custom.event.typeA
|
||||
# Share only events of type `org.example.custom.event.typeB` whose
|
||||
# state_key is "foo"
|
||||
- ["org.example.custom.event.typeB", "foo"]
|
||||
# Share only events of type `org.example.custom.event.typeC` whose
|
||||
# state_key is "bar" or "baz"
|
||||
- ["org.example.custom.event.typeC", "bar"]
|
||||
- ["org.example.custom.event.typeC", "baz"]
|
||||
```
|
||||
|
||||
*Changed in Synapse 1.74:* admins can filter the events in prejoin state based
|
||||
on their state key.
|
||||
|
||||
---
|
||||
### `track_puppeted_user_ips`
|
||||
|
||||
@@ -2619,7 +2616,7 @@ Example configuration:
|
||||
form_secret: <PRIVATE STRING>
|
||||
```
|
||||
---
|
||||
## Signing Keys ##
|
||||
## Signing Keys
|
||||
Config options relating to signing keys
|
||||
|
||||
---
|
||||
@@ -2680,6 +2677,12 @@ is still supported for backwards-compatibility, but it is deprecated.
|
||||
warning on start-up. To suppress this warning, set
|
||||
`suppress_key_server_warning` to true.
|
||||
|
||||
If the use of a trusted key server has to be deactivated, e.g. in a private
|
||||
federation or for privacy reasons, this can be realised by setting
|
||||
an empty array (`trusted_key_servers: []`). Then Synapse will request the keys
|
||||
directly from the server that owns the keys. If Synapse does not get keys directly
|
||||
from the server, the events of this server will be rejected.
|
||||
|
||||
Options for each entry in the list include:
|
||||
* `server_name`: the name of the server. Required.
|
||||
* `verify_keys`: an optional map from key id to base64-encoded public key.
|
||||
@@ -2728,7 +2731,7 @@ Example configuration:
|
||||
key_server_signing_keys_path: "key_server_signing_keys.key"
|
||||
```
|
||||
---
|
||||
## Single sign-on integration ##
|
||||
## Single sign-on integration
|
||||
|
||||
The following settings can be used to make Synapse use a single sign-on
|
||||
provider for authentication, instead of its internal password database.
|
||||
@@ -2986,10 +2989,17 @@ Options for each entry include:
|
||||
|
||||
For the default provider, the following settings are available:
|
||||
|
||||
* subject_claim: name of the claim containing a unique identifier
|
||||
* `subject_claim`: name of the claim containing a unique identifier
|
||||
for the user. Defaults to 'sub', which OpenID Connect
|
||||
compliant providers should provide.
|
||||
|
||||
* `picture_claim`: name of the claim containing an url for the user's profile picture.
|
||||
Defaults to 'picture', which OpenID Connect compliant providers should provide
|
||||
and has to refer to a direct image file such as PNG, JPEG, or GIF image file.
|
||||
|
||||
Currently only supported in monolithic (single-process) server configurations
|
||||
where the media repository runs within the Synapse process.
|
||||
|
||||
* `localpart_template`: Jinja2 template for the localpart of the MXID.
|
||||
If this is not set, the user will be prompted to choose their
|
||||
own username (see the documentation for the `sso_auth_account_details.html`
|
||||
@@ -3014,6 +3024,15 @@ Options for each entry include:
|
||||
which is set to the claims returned by the UserInfo Endpoint and/or
|
||||
in the ID Token.
|
||||
|
||||
* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
|
||||
Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`.
|
||||
Defaults to `false`.
|
||||
|
||||
* `backchannel_logout_ignore_sub`: by default, the OIDC Back-Channel Logout feature checks that the
|
||||
`sub` claim matches the subject claim received during login. This check can be disabled by setting
|
||||
this to `true`. Defaults to `false`.
|
||||
|
||||
You might want to disable this if the `subject_claim` returned by the mapping provider is not `sub`.
|
||||
|
||||
It is possible to configure Synapse to only allow logins if certain attributes
|
||||
match particular values in the OIDC userinfo. The requirements can be listed under
|
||||
@@ -3348,7 +3367,7 @@ email:
|
||||
email_validation: "[%(server_name)s] Validate your email"
|
||||
```
|
||||
---
|
||||
## Push ##
|
||||
## Push
|
||||
Configuration settings related to push notifications
|
||||
|
||||
---
|
||||
@@ -3357,6 +3376,10 @@ Configuration settings related to push notifications
|
||||
This setting defines options for push notifications.
|
||||
|
||||
This option has a number of sub-options. They are as follows:
|
||||
* `enabled`: Enables or disables push notification calculation. Note, disabling this will also
|
||||
stop unread counts being calculated for rooms. This mode of operation is intended
|
||||
for homeservers which may only have bots or appservice users connected, or are otherwise
|
||||
not interested in push/unread counters. This is enabled by default.
|
||||
* `include_content`: Clients requesting push notifications can either have the body of
|
||||
the message sent in the notification poke along with other details
|
||||
like the sender, or just the event ID and room ID (`event_id_only`).
|
||||
@@ -3377,11 +3400,12 @@ This option has a number of sub-options. They are as follows:
|
||||
Example configuration:
|
||||
```yaml
|
||||
push:
|
||||
enabled: true
|
||||
include_content: false
|
||||
group_unread_count_by_room: false
|
||||
```
|
||||
---
|
||||
## Rooms ##
|
||||
## Rooms
|
||||
Config options relating to rooms.
|
||||
|
||||
---
|
||||
@@ -3627,7 +3651,7 @@ default_power_level_content_override:
|
||||
```
|
||||
|
||||
---
|
||||
## Opentracing ##
|
||||
## Opentracing
|
||||
Configuration options related to Opentracing support.
|
||||
|
||||
---
|
||||
@@ -3670,14 +3694,78 @@ opentracing:
|
||||
false
|
||||
```
|
||||
---
|
||||
## Workers ##
|
||||
Configuration options related to workers.
|
||||
## Coordinating workers
|
||||
Configuration options related to workers which belong in the main config file
|
||||
(usually called `homeserver.yaml`).
|
||||
A Synapse deployment can scale horizontally by running multiple Synapse processes
|
||||
called _workers_. Incoming requests are distributed between workers to handle higher
|
||||
loads. Some workers are privileged and can accept requests from other workers.
|
||||
|
||||
As a result, the worker configuration is divided into two parts.
|
||||
|
||||
1. The first part (in this section of the manual) defines which shardable tasks
|
||||
are delegated to privileged workers. This allows unprivileged workers to make
|
||||
requests to a privileged worker to act on their behalf.
|
||||
1. [The second part](#individual-worker-configuration)
|
||||
controls the behaviour of individual workers in isolation.
|
||||
|
||||
For guidance on setting up workers, see the [worker documentation](../../workers.md).
|
||||
|
||||
---
|
||||
### `worker_replication_secret`
|
||||
|
||||
A shared secret used by the replication APIs on the main process to authenticate
|
||||
HTTP requests from workers.
|
||||
|
||||
The default, this value is omitted (equivalently `null`), which means that
|
||||
traffic between the workers and the main process is not authenticated.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_secret: "secret_secret"
|
||||
```
|
||||
---
|
||||
### `start_pushers`
|
||||
|
||||
Unnecessary to set if using [`pusher_instances`](#pusher_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker).
|
||||
|
||||
Controls sending of push notifications on the main process. Set to `false`
|
||||
if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
start_pushers: false
|
||||
```
|
||||
---
|
||||
### `pusher_instances`
|
||||
|
||||
It is possible to scale the processes that handle sending push notifications to [sygnal](https://github.com/matrix-org/sygnal)
|
||||
and email by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
|
||||
a `pusher_instances` map. Doing so will remove handling of this function from the main
|
||||
process. Multiple workers can be added to this map, in which case the work is balanced
|
||||
across them. Ensure the main process and all pusher workers are restarted after changing
|
||||
this option.
|
||||
|
||||
Example configuration for a single worker:
|
||||
```yaml
|
||||
pusher_instances:
|
||||
- pusher_worker1
|
||||
```
|
||||
And for multiple workers:
|
||||
```yaml
|
||||
pusher_instances:
|
||||
- pusher_worker1
|
||||
- pusher_worker2
|
||||
```
|
||||
|
||||
---
|
||||
### `send_federation`
|
||||
|
||||
Unnecessary to set if using [`federation_sender_instances`](#federation_sender_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker).
|
||||
|
||||
Controls sending of outbound federation transactions on the main process.
|
||||
Set to false if using a federation sender worker. Defaults to true.
|
||||
Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender).
|
||||
Defaults to `true`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3686,24 +3774,37 @@ send_federation: false
|
||||
---
|
||||
### `federation_sender_instances`
|
||||
|
||||
It is possible to run multiple federation sender workers, in which case the
|
||||
work is balanced across them. Use this setting to list the senders.
|
||||
It is possible to scale the processes that handle sending outbound federation requests
|
||||
by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
|
||||
a `federation_sender_instances` map. Doing so will remove handling of this function from
|
||||
the main process. Multiple workers can be added to this map, in which case the work is
|
||||
balanced across them.
|
||||
|
||||
This configuration setting must be shared between all federation sender workers, and if
|
||||
changed all federation sender workers must be stopped at the same time and then
|
||||
started, to ensure that all instances are running with the same config (otherwise
|
||||
This configuration setting must be shared between all workers handling federation
|
||||
sending, and if changed all federation sender workers must be stopped at the same time
|
||||
and then started, to ensure that all instances are running with the same config (otherwise
|
||||
events may be dropped).
|
||||
|
||||
Example configuration:
|
||||
Example configuration for a single worker:
|
||||
```yaml
|
||||
federation_sender_instances:
|
||||
- federation_sender1
|
||||
```
|
||||
And for multiple workers:
|
||||
```yaml
|
||||
federation_sender_instances:
|
||||
- federation_sender1
|
||||
- federation_sender2
|
||||
```
|
||||
---
|
||||
### `instance_map`
|
||||
|
||||
When using workers this should be a map from worker name to the
|
||||
When using workers this should be a map from [`worker_name`](#worker_name) to the
|
||||
HTTP replication listener of the worker, if configured.
|
||||
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
|
||||
a HTTP replication listener, and that listener should be included in the `instance_map`.
|
||||
(The main process also needs an HTTP replication listener, but it should not be
|
||||
listed in the `instance_map`.)
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3716,8 +3817,11 @@ instance_map:
|
||||
### `stream_writers`
|
||||
|
||||
Experimental: When using workers you can define which workers should
|
||||
handle event persistence and typing notifications. Any worker
|
||||
specified here must also be in the `instance_map`.
|
||||
handle writing to streams such as event persistence and typing notifications.
|
||||
Any worker specified here must also be in the [`instance_map`](#instance_map).
|
||||
|
||||
See the list of available streams in the
|
||||
[worker documentation](../../workers.md#stream-writers).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3728,29 +3832,18 @@ stream_writers:
|
||||
---
|
||||
### `run_background_tasks_on`
|
||||
|
||||
The worker that is used to run background tasks (e.g. cleaning up expired
|
||||
data). If not provided this defaults to the main process.
|
||||
The [worker](../../workers.md#background-tasks) that is used to run
|
||||
background tasks (e.g. cleaning up expired data). If not provided this
|
||||
defaults to the main process.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
run_background_tasks_on: worker1
|
||||
```
|
||||
---
|
||||
### `worker_replication_secret`
|
||||
|
||||
A shared secret used by the replication APIs to authenticate HTTP requests
|
||||
from workers.
|
||||
|
||||
By default this is unused and traffic is not authenticated.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_secret: "secret_secret"
|
||||
```
|
||||
### `redis`
|
||||
|
||||
Configuration for Redis when using workers. This *must* be enabled when
|
||||
using workers (unless using old style direct TCP configuration).
|
||||
Configuration for Redis when using workers. This *must* be enabled when using workers.
|
||||
This setting has the following sub-options:
|
||||
* `enabled`: whether to use Redis support. Defaults to false.
|
||||
* `host` and `port`: Optional host and port to use to connect to redis. Defaults to
|
||||
@@ -3765,7 +3858,143 @@ redis:
|
||||
port: 6379
|
||||
password: <secret_password>
|
||||
```
|
||||
## Background Updates ##
|
||||
---
|
||||
## Individual worker configuration
|
||||
These options configure an individual worker, in its worker configuration file.
|
||||
They should be not be provided when configuring the main process.
|
||||
|
||||
Note also the configuration above for
|
||||
[coordinating a cluster of workers](#coordinating-workers).
|
||||
|
||||
For guidance on setting up workers, see the [worker documentation](../../workers.md).
|
||||
|
||||
---
|
||||
### `worker_app`
|
||||
|
||||
The type of worker. The currently available worker applications are listed
|
||||
in [worker documentation](../../workers.md#available-worker-applications).
|
||||
|
||||
The most common worker is the
|
||||
[`synapse.app.generic_worker`](../../workers.md#synapseappgeneric_worker).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_app: synapse.app.generic_worker
|
||||
```
|
||||
---
|
||||
### `worker_name`
|
||||
|
||||
A unique name for the worker. The worker needs a name to be addressed in
|
||||
further parameters and identification in log files. We strongly recommend
|
||||
giving each worker a unique `worker_name`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_name: generic_worker1
|
||||
```
|
||||
---
|
||||
### `worker_replication_host`
|
||||
|
||||
The HTTP replication endpoint that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_host: 127.0.0.1
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_port`
|
||||
|
||||
The HTTP replication port that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_port: 9093
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_tls`
|
||||
|
||||
Whether TLS should be used for talking to the HTTP replication port on the main
|
||||
Synapse process.
|
||||
The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
|
||||
has the `replication` resource enabled.
|
||||
|
||||
**Please note:** by default, it is not safe to expose replication ports to the
|
||||
public Internet, even with TLS enabled.
|
||||
See [`worker_replication_secret`](#worker_replication_secret).
|
||||
|
||||
Defaults to `false`.
|
||||
|
||||
*Added in Synapse 1.72.0.*
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_tls: true
|
||||
```
|
||||
---
|
||||
### `worker_listeners`
|
||||
|
||||
A worker can handle HTTP requests. To do so, a `worker_listeners` option
|
||||
must be declared, in the same way as the [`listeners` option](#listeners)
|
||||
in the shared config.
|
||||
|
||||
Workers declared in [`stream_writers`](#stream_writers) will need to include a
|
||||
`replication` listener here, in order to accept internal HTTP requests from
|
||||
other workers.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
---
|
||||
### `worker_daemonize`
|
||||
|
||||
Specifies whether the worker should be started as a daemon process.
|
||||
If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
|
||||
must be omitted or set to `false`.
|
||||
|
||||
Defaults to `false`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_daemonize: true
|
||||
```
|
||||
---
|
||||
### `worker_pid_file`
|
||||
|
||||
When running a worker as a daemon, we need a place to store the
|
||||
[PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker.
|
||||
This option defines the location of that "pid file".
|
||||
|
||||
This option is required if `worker_daemonize` is `true` and ignored
|
||||
otherwise. It has no default.
|
||||
|
||||
See also the [`pid_file` option](#pid_file) option for the main Synapse process.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_pid_file: DATADIR/generic_worker1.pid
|
||||
```
|
||||
---
|
||||
### `worker_log_config`
|
||||
|
||||
This option specifies a yaml python logging config file as described
|
||||
[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
|
||||
See also the [`log_config` option](#log_config) option for the main Synapse process.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
|
||||
```
|
||||
---
|
||||
## Background Updates
|
||||
Configuration settings related to background updates.
|
||||
|
||||
---
|
||||
@@ -3794,4 +4023,3 @@ background_updates:
|
||||
min_batch_size: 10
|
||||
default_batch_size: 50
|
||||
```
|
||||
|
||||
|
||||
123
docs/workers.md
123
docs/workers.md
@@ -88,10 +88,12 @@ shared configuration file.
|
||||
### Shared configuration
|
||||
|
||||
Normally, only a couple of changes are needed to make an existing configuration
|
||||
file suitable for use with workers. First, you need to enable an "HTTP replication
|
||||
listener" for the main process; and secondly, you need to enable redis-based
|
||||
replication. Optionally, a shared secret can be used to authenticate HTTP
|
||||
traffic between workers. For example:
|
||||
file suitable for use with workers. First, you need to enable an
|
||||
["HTTP replication listener"](usage/configuration/config_documentation.md#listeners)
|
||||
for the main process; and secondly, you need to enable
|
||||
[redis-based replication](usage/configuration/config_documentation.md#redis).
|
||||
Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
can be used to authenticate HTTP traffic between workers. For example:
|
||||
|
||||
```yaml
|
||||
# extend the existing `listeners` section. This defines the ports that the
|
||||
@@ -111,27 +113,30 @@ redis:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
See the [configuration manual](usage/configuration/config_documentation.html) for the full documentation of each option.
|
||||
See the [configuration manual](usage/configuration/config_documentation.md)
|
||||
for the full documentation of each option.
|
||||
|
||||
Under **no circumstances** should the replication listener be exposed to the
|
||||
public internet; replication traffic is:
|
||||
|
||||
* always unencrypted
|
||||
* unauthenticated, unless `worker_replication_secret` is configured
|
||||
* unauthenticated, unless [`worker_replication_secret`](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
is configured
|
||||
|
||||
|
||||
### Worker configuration
|
||||
|
||||
In the config file for each worker, you must specify:
|
||||
* The type of worker (`worker_app`). The currently available worker applications are listed below.
|
||||
* A unique name for the worker (`worker_name`).
|
||||
* The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)).
|
||||
The currently available worker applications are listed [below](#available-worker-applications).
|
||||
* A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)).
|
||||
* The HTTP replication endpoint that it should talk to on the main synapse process
|
||||
(`worker_replication_host` and `worker_replication_http_port`)
|
||||
* If handling HTTP requests, a `worker_listeners` option with an `http`
|
||||
listener, in the same way as the [`listeners`](usage/configuration/config_documentation.md#listeners)
|
||||
option in the shared config.
|
||||
* If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`).
|
||||
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
|
||||
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
|
||||
* If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
|
||||
with an `http` listener.
|
||||
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
|
||||
|
||||
For example:
|
||||
|
||||
@@ -146,7 +151,6 @@ plain HTTP endpoint on port 8083 separately serving various endpoints, e.g.
|
||||
Obviously you should configure your reverse-proxy to route the relevant
|
||||
endpoints to the worker (`localhost:8083` in the above example).
|
||||
|
||||
|
||||
### Running Synapse with workers
|
||||
|
||||
Finally, you need to start your worker processes. This can be done with either
|
||||
@@ -187,6 +191,7 @@ information.
|
||||
^/_matrix/federation/(v1|v2)/send_leave/
|
||||
^/_matrix/federation/(v1|v2)/invite/
|
||||
^/_matrix/federation/v1/event_auth/
|
||||
^/_matrix/federation/v1/timestamp_to_event/
|
||||
^/_matrix/federation/v1/exchange_third_party_invite/
|
||||
^/_matrix/federation/v1/user/devices/
|
||||
^/_matrix/key/v2/query
|
||||
@@ -214,10 +219,10 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
|
||||
^/_matrix/client/v1/rooms/.*/timestamp_to_event$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
|
||||
|
||||
# Encryption requests
|
||||
# Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri`
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/query$
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/changes$
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/claim$
|
||||
@@ -288,7 +293,8 @@ For multiple workers not handling the SSO endpoints properly, see
|
||||
[#9427](https://github.com/matrix-org/synapse/issues/9427).
|
||||
|
||||
Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners)
|
||||
with `client` and `federation` `resources` must be configured in the `worker_listeners`
|
||||
with `client` and `federation` `resources` must be configured in the
|
||||
[`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners)
|
||||
option in the worker config.
|
||||
|
||||
#### Load balancing
|
||||
@@ -300,9 +306,11 @@ may wish to run multiple groups of workers handling different endpoints so that
|
||||
load balancing can be done in different ways.
|
||||
|
||||
For `/sync` and `/initialSync` requests it will be more efficient if all
|
||||
requests from a particular user are routed to a single instance. Extracting a
|
||||
user ID from the access token or `Authorization` header is currently left as an
|
||||
exercise for the reader. Admins may additionally wish to separate out `/sync`
|
||||
requests from a particular user are routed to a single instance. This can
|
||||
be done e.g. in nginx via IP `hash $http_x_forwarded_for;` or via
|
||||
`hash $http_authorization consistent;` which contains the users access token.
|
||||
|
||||
Admins may additionally wish to separate out `/sync`
|
||||
requests that have a `since` query parameter from those that don't (and
|
||||
`/initialSync`), as requests that don't are known as "initial sync" that happens
|
||||
when a user logs in on a new device and can be *very* resource intensive, so
|
||||
@@ -331,9 +339,10 @@ of the main process to a particular worker.
|
||||
|
||||
To enable this, the worker must have a
|
||||
[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
|
||||
have a `worker_name` and be listed in the `instance_map` config. The same worker
|
||||
can handle multiple streams, but unless otherwise documented, each stream can only
|
||||
have a single writer.
|
||||
have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
|
||||
and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map)
|
||||
config. The same worker can handle multiple streams, but unless otherwise documented,
|
||||
each stream can only have a single writer.
|
||||
|
||||
For example, to move event persistence off to a dedicated worker, the shared
|
||||
configuration would include:
|
||||
@@ -360,9 +369,26 @@ streams and the endpoints associated with them:
|
||||
|
||||
##### The `events` stream
|
||||
|
||||
The `events` stream experimentally supports having multiple writers, where work
|
||||
is sharded between them by room ID. Note that you *must* restart all worker
|
||||
instances when adding or removing event persisters. An example `stream_writers`
|
||||
The `events` stream experimentally supports having multiple writer workers, where load
|
||||
is sharded between them by room ID. Each writer is called an _event persister_. They are
|
||||
responsible for
|
||||
- receiving new events,
|
||||
- linking them to those already in the room [DAG](development/room-dag-concepts.md),
|
||||
- persisting them to the DB, and finally
|
||||
- updating the events stream.
|
||||
|
||||
Because load is sharded in this way, you *must* restart all worker instances when
|
||||
adding or removing event persisters.
|
||||
|
||||
An `event_persister` should not be mistaken for an `event_creator`.
|
||||
An `event_creator` listens for requests from clients to create new events and does
|
||||
so. It will then pass those events over HTTP replication to any configured event
|
||||
persisters (or the main process if none are configured).
|
||||
|
||||
Note that `event_creator`s and `event_persister`s are implemented using the same
|
||||
[`synapse.app.generic_worker`](#synapse.app.generic_worker).
|
||||
|
||||
An example [`stream_writers`](usage/configuration/config_documentation.md#stream_writers)
|
||||
configuration with multiple writers:
|
||||
|
||||
```yaml
|
||||
@@ -416,16 +442,18 @@ worker. Background tasks are run periodically or started via replication. Exactl
|
||||
which tasks are configured to run depends on your Synapse configuration (e.g. if
|
||||
stats is enabled). This worker doesn't handle any REST endpoints itself.
|
||||
|
||||
To enable this, the worker must have a `worker_name` and can be configured to run
|
||||
background tasks. For example, to move background tasks to a dedicated worker,
|
||||
the shared configuration would include:
|
||||
To enable this, the worker must have a unique
|
||||
[`worker_name`](usage/configuration/config_documentation.md#worker_name)
|
||||
and can be configured to run background tasks. For example, to move background tasks
|
||||
to a dedicated worker, the shared configuration would include:
|
||||
|
||||
```yaml
|
||||
run_background_tasks_on: background_worker
|
||||
```
|
||||
|
||||
You might also wish to investigate the `update_user_directory_from_worker` and
|
||||
`media_instance_running_background_jobs` settings.
|
||||
You might also wish to investigate the
|
||||
[`update_user_directory_from_worker`](#updating-the-user-directory) and
|
||||
[`media_instance_running_background_jobs`](#synapseappmedia_repository) settings.
|
||||
|
||||
An example for a dedicated background worker instance:
|
||||
|
||||
@@ -477,14 +505,21 @@ worker application type.
|
||||
|
||||
### `synapse.app.pusher`
|
||||
|
||||
It is likely this option will be deprecated in the future and is not recommended for new
|
||||
installations. Instead, [use `synapse.app.generic_worker` with the `pusher_instances`](usage/configuration/config_documentation.md#pusher_instances).
|
||||
|
||||
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||
REST endpoints itself, but you should set `start_pushers: False` in the
|
||||
REST endpoints itself, but you should set
|
||||
[`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the
|
||||
shared configuration file to stop the main synapse sending push notifications.
|
||||
|
||||
To run multiple instances at once the `pusher_instances` option should list all
|
||||
pusher instances by their worker name, e.g.:
|
||||
To run multiple instances at once the
|
||||
[`pusher_instances`](usage/configuration/config_documentation.md#pusher_instances)
|
||||
option should list all pusher instances by their
|
||||
[`worker_name`](usage/configuration/config_documentation.md#worker_name), e.g.:
|
||||
|
||||
```yaml
|
||||
start_pushers: false
|
||||
pusher_instances:
|
||||
- pusher_worker1
|
||||
- pusher_worker2
|
||||
@@ -511,16 +546,24 @@ Note this worker cannot be load-balanced: only one instance should be active.
|
||||
|
||||
### `synapse.app.federation_sender`
|
||||
|
||||
It is likely this option will be deprecated in the future and not recommended for
|
||||
new installations. Instead, [use `synapse.app.generic_worker` with the `federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances).
|
||||
|
||||
Handles sending federation traffic to other servers. Doesn't handle any
|
||||
REST endpoints itself, but you should set `send_federation: False` in the
|
||||
shared configuration file to stop the main synapse sending this traffic.
|
||||
REST endpoints itself, but you should set
|
||||
[`send_federation: false`](usage/configuration/config_documentation.md#send_federation)
|
||||
in the shared configuration file to stop the main synapse sending this traffic.
|
||||
|
||||
If running multiple federation senders then you must list each
|
||||
instance in the `federation_sender_instances` option by their `worker_name`.
|
||||
instance in the
|
||||
[`federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances)
|
||||
option by their
|
||||
[`worker_name`](usage/configuration/config_documentation.md#worker_name).
|
||||
All instances must be stopped and started when adding or removing instances.
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
send_federation: false
|
||||
federation_sender_instances:
|
||||
- federation_sender1
|
||||
- federation_sender2
|
||||
@@ -547,7 +590,9 @@ Handles the media repository. It can handle all endpoints starting with:
|
||||
^/_synapse/admin/v1/quarantine_media/.*$
|
||||
^/_synapse/admin/v1/users/.*/media$
|
||||
|
||||
You should also set `enable_media_repo: False` in the shared configuration
|
||||
You should also set
|
||||
[`enable_media_repo: False`](usage/configuration/config_documentation.md#enable_media_repo)
|
||||
in the shared configuration
|
||||
file to stop the main synapse running background jobs related to managing the
|
||||
media repository. Note that doing so will prevent the main process from being
|
||||
able to handle the above endpoints.
|
||||
@@ -600,7 +645,9 @@ equivalent to `synapse.app.generic_worker`:
|
||||
* `synapse.app.client_reader`
|
||||
* `synapse.app.event_creator`
|
||||
* `synapse.app.federation_reader`
|
||||
* `synapse.app.federation_sender`
|
||||
* `synapse.app.frontend_proxy`
|
||||
* `synapse.app.pusher`
|
||||
* `synapse.app.synchrotron`
|
||||
|
||||
|
||||
|
||||
75
mypy.ini
75
mypy.ini
@@ -11,6 +11,8 @@ warn_unused_ignores = True
|
||||
local_partial_types = True
|
||||
no_implicit_optional = True
|
||||
disallow_untyped_defs = True
|
||||
strict_equality = True
|
||||
warn_redundant_casts = True
|
||||
|
||||
files =
|
||||
docker/,
|
||||
@@ -56,24 +58,8 @@ exclude = (?x)
|
||||
|tests/rest/media/v1/test_media_storage.py
|
||||
|tests/server.py
|
||||
|tests/server_notices/test_resource_limits_server_notices.py
|
||||
|tests/test_metrics.py
|
||||
|tests/test_state.py
|
||||
|tests/test_terms_auth.py
|
||||
|tests/util/caches/test_cached_call.py
|
||||
|tests/util/caches/test_deferred_cache.py
|
||||
|tests/util/caches/test_descriptors.py
|
||||
|tests/util/caches/test_response_cache.py
|
||||
|tests/util/caches/test_ttlcache.py
|
||||
|tests/util/test_async_helpers.py
|
||||
|tests/util/test_batching_queue.py
|
||||
|tests/util/test_dict_cache.py
|
||||
|tests/util/test_expiring_cache.py
|
||||
|tests/util/test_file_consumer.py
|
||||
|tests/util/test_linearizer.py
|
||||
|tests/util/test_logcontext.py
|
||||
|tests/util/test_lrucache.py
|
||||
|tests/util/test_rwlock.py
|
||||
|tests/util/test_wheel_timer.py
|
||||
)$
|
||||
|
||||
[mypy-synapse.federation.transport.client]
|
||||
@@ -103,33 +89,50 @@ disallow_untyped_defs = False
|
||||
[mypy-tests.*]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-tests.handlers.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.push.test_bulk_push_rule_evaluator]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.test_server]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.state.test_profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.storage.test_profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.storage.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.rest.*]
|
||||
[mypy-tests.config.test_api]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.federation.transport.test_client]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.utils]
|
||||
[mypy-tests.handlers.test_sso]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.handlers.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.metrics.test_background_process_metrics]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.push.test_bulk_push_rule_evaluator]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.rest.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.state.test_profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.storage.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.test_server]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.types.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.util.caches.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.util.caches.test_descriptors]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-tests.util.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.utils]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
;; Dependencies without annotations
|
||||
;; Before ignoring a module, check to see if type stubs are available.
|
||||
|
||||
713
poetry.lock
generated
713
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.70.0rc1"
|
||||
version = "1.74.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -141,7 +141,8 @@ pyasn1 = ">=0.1.9"
|
||||
pyasn1-modules = ">=0.0.7"
|
||||
bcrypt = ">=3.1.7"
|
||||
Pillow = ">=5.4.0"
|
||||
sortedcontainers = ">=1.4.4"
|
||||
# We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2.
|
||||
sortedcontainers = ">=1.5.2"
|
||||
pymacaroons = ">=0.13.0"
|
||||
msgpack = ">=0.5.2"
|
||||
phonenumbers = ">=8.2.0"
|
||||
@@ -192,7 +193,7 @@ psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'Py
|
||||
psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true }
|
||||
psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true }
|
||||
pysaml2 = { version = ">=4.5.0", optional = true }
|
||||
authlib = { version = ">=0.14.0", optional = true }
|
||||
authlib = { version = ">=0.15.1", optional = true }
|
||||
# systemd-python is necessary for logging to the systemd journal via
|
||||
# `systemd.journal.JournalHandler`, as is documented in
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
@@ -207,6 +208,7 @@ hiredis = { version = "*", optional = true }
|
||||
Pympler = { version = "*", optional = true }
|
||||
parameterized = { version = ">=0.7.4", optional = true }
|
||||
idna = { version = ">=2.5", optional = true }
|
||||
pyicu = { version = ">=2.10.2", optional = true }
|
||||
|
||||
[tool.poetry.extras]
|
||||
# NB: Packages that should be part of `pip install matrix-synapse[all]` need to be specified
|
||||
@@ -229,6 +231,10 @@ redis = ["txredisapi", "hiredis"]
|
||||
# Required to use experimental `caches.track_memory_usage` config option.
|
||||
cache-memory = ["pympler"]
|
||||
test = ["parameterized", "idna"]
|
||||
# Allows for better search for international characters in the user directory. This
|
||||
# requires libicu's development headers installed on the system (e.g. libicu-dev on
|
||||
# Debian-based distributions).
|
||||
user-search = ["pyicu"]
|
||||
|
||||
# The duplication here is awful. I hate hate hate hate hate it. However, for now I want
|
||||
# to ensure you can still `pip install matrix-synapse[all]` like today. Two motivations:
|
||||
@@ -260,6 +266,8 @@ all = [
|
||||
"txredisapi", "hiredis",
|
||||
# cache-memory
|
||||
"pympler",
|
||||
# improved user search
|
||||
"pyicu",
|
||||
# omitted:
|
||||
# - test: it's useful to have this separate from dev deps in the olddeps job
|
||||
# - systemd: this is a system-based requirement
|
||||
@@ -318,7 +326,7 @@ build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.cibuildwheel]
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
skip = "cp36* *-musllinux_i686"
|
||||
skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
|
||||
@@ -330,3 +338,12 @@ environment= { PATH = "$PATH:$HOME/.cargo/bin" }
|
||||
before-build = "rm -rf {project}/build"
|
||||
build-frontend = "build"
|
||||
test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'"
|
||||
|
||||
|
||||
[tool.cibuildwheel.linux]
|
||||
# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
|
||||
repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py -w {dest_dir} {wheel}"
|
||||
|
||||
[tool.cibuildwheel.macos]
|
||||
# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
|
||||
repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py --require-archs {delocate_archs} -w {dest_dir} {wheel}"
|
||||
|
||||
@@ -20,14 +20,14 @@ crate-type = ["lib", "cdylib"]
|
||||
name = "synapse.synapse_rust"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.66"
|
||||
anyhow = "1.0.63"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.17"
|
||||
pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] }
|
||||
pyo3-log = "0.7.0"
|
||||
pythonize = "0.17.0"
|
||||
regex = "1.6.0"
|
||||
serde = { version = "1.0.147", features = ["derive"] }
|
||||
serde = { version = "1.0.144", features = ["derive"] }
|
||||
serde_json = "1.0.85"
|
||||
|
||||
[build-dependencies]
|
||||
|
||||
@@ -33,10 +33,12 @@ fn bench_match_exact(b: &mut Bencher) {
|
||||
let eval = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
0,
|
||||
Some(0),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
true,
|
||||
vec![],
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -67,10 +69,12 @@ fn bench_match_word(b: &mut Bencher) {
|
||||
let eval = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
0,
|
||||
Some(0),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
true,
|
||||
vec![],
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -101,10 +105,12 @@ fn bench_match_word_miss(b: &mut Bencher) {
|
||||
let eval = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
0,
|
||||
Some(0),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
true,
|
||||
vec![],
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -135,10 +141,12 @@ fn bench_eval_message(b: &mut Bencher) {
|
||||
let eval = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
0,
|
||||
Some(0),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
true,
|
||||
vec![],
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ use crate::push::Action;
|
||||
use crate::push::Condition;
|
||||
use crate::push::EventMatchCondition;
|
||||
use crate::push::PushRule;
|
||||
use crate::push::RelatedEventMatchCondition;
|
||||
use crate::push::SetTweak;
|
||||
use crate::push::TweakValue;
|
||||
|
||||
@@ -114,6 +115,22 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.im.nheko.msc3664.reply"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatch(
|
||||
RelatedEventMatchCondition {
|
||||
key: Some(Cow::Borrowed("sender")),
|
||||
pattern: None,
|
||||
pattern_type: Some(Cow::Borrowed("user_id")),
|
||||
rel_type: Cow::Borrowed("m.in_reply_to"),
|
||||
include_fallbacks: None,
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.contains_display_name"),
|
||||
priority_class: 5,
|
||||
@@ -257,6 +274,156 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.encrypted_room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.encrypted")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.message.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.message")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.file.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.file")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.image.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.image")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.video.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.video")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.audio.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.audio")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.message"),
|
||||
priority_class: 1,
|
||||
@@ -285,6 +452,126 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.encrypted"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.encrypted")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.message"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.message")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.file"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.file")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.image"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.image")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.video"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.video")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.audio"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.audio")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.im.vector.jitsi"),
|
||||
priority_class: 1,
|
||||
|
||||
@@ -23,11 +23,39 @@ use regex::Regex;
|
||||
use super::{
|
||||
utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType},
|
||||
Action, Condition, EventMatchCondition, FilteredPushRules, KnownCondition,
|
||||
RelatedEventMatchCondition,
|
||||
};
|
||||
|
||||
lazy_static! {
|
||||
/// Used to parse the `is` clause in the room member count condition.
|
||||
static ref INEQUALITY_EXPR: Regex = Regex::new(r"^([=<>]*)([0-9]+)$").expect("valid regex");
|
||||
|
||||
/// Used to determine which MSC3931 room version feature flags are actually known to
|
||||
/// the push evaluator.
|
||||
static ref KNOWN_RVER_FLAGS: Vec<String> = vec![
|
||||
RoomVersionFeatures::ExtensibleEvents.as_str().to_string(),
|
||||
];
|
||||
|
||||
/// The "safe" rule IDs which are not affected by MSC3932's behaviour (room versions which
|
||||
/// declare Extensible Events support ultimately *disable* push rules which do not declare
|
||||
/// *any* MSC3931 room_version_supports condition).
|
||||
static ref SAFE_EXTENSIBLE_EVENTS_RULE_IDS: Vec<String> = vec![
|
||||
"global/override/.m.rule.master".to_string(),
|
||||
"global/override/.m.rule.roomnotif".to_string(),
|
||||
"global/content/.m.rule.contains_user_name".to_string(),
|
||||
];
|
||||
}
|
||||
|
||||
enum RoomVersionFeatures {
|
||||
ExtensibleEvents,
|
||||
}
|
||||
|
||||
impl RoomVersionFeatures {
|
||||
fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
RoomVersionFeatures::ExtensibleEvents => "org.matrix.msc3932.extensible_events",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows running a set of push rules against a particular event.
|
||||
@@ -49,17 +77,36 @@ pub struct PushRuleEvaluator {
|
||||
/// The power level of the sender of the event, or None if event is an
|
||||
/// outlier.
|
||||
sender_power_level: Option<i64>,
|
||||
|
||||
/// The related events, indexed by relation type. Flattened in the same manner as
|
||||
/// `flattened_keys`.
|
||||
related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
|
||||
|
||||
/// If msc3664, push rules for related events, is enabled.
|
||||
related_event_match_enabled: bool,
|
||||
|
||||
/// If MSC3931 is applicable, the feature flags for the room version.
|
||||
room_version_feature_flags: Vec<String>,
|
||||
|
||||
/// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
|
||||
/// flag as MSC1767 (extensible events core).
|
||||
msc3931_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl PushRuleEvaluator {
|
||||
/// Create a new `PushRuleEvaluator`. See struct docstring for details.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[new]
|
||||
pub fn py_new(
|
||||
flattened_keys: BTreeMap<String, String>,
|
||||
room_member_count: u64,
|
||||
sender_power_level: Option<i64>,
|
||||
notification_power_levels: BTreeMap<String, i64>,
|
||||
related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
|
||||
related_event_match_enabled: bool,
|
||||
room_version_feature_flags: Vec<String>,
|
||||
msc3931_enabled: bool,
|
||||
) -> Result<Self, Error> {
|
||||
let body = flattened_keys
|
||||
.get("content.body")
|
||||
@@ -72,6 +119,10 @@ impl PushRuleEvaluator {
|
||||
room_member_count,
|
||||
notification_power_levels,
|
||||
sender_power_level,
|
||||
related_events_flattened,
|
||||
related_event_match_enabled,
|
||||
room_version_feature_flags,
|
||||
msc3931_enabled,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -94,7 +145,19 @@ impl PushRuleEvaluator {
|
||||
continue;
|
||||
}
|
||||
|
||||
let rule_id = &push_rule.rule_id().to_string();
|
||||
let extev_flag = &RoomVersionFeatures::ExtensibleEvents.as_str().to_string();
|
||||
let supports_extensible_events = self.room_version_feature_flags.contains(extev_flag);
|
||||
let safe_from_rver_condition = SAFE_EXTENSIBLE_EVENTS_RULE_IDS.contains(rule_id);
|
||||
let mut has_rver_condition = false;
|
||||
|
||||
for condition in push_rule.conditions.iter() {
|
||||
has_rver_condition |= matches!(
|
||||
condition,
|
||||
// per MSC3932, we just need *any* room version condition to match
|
||||
Condition::Known(KnownCondition::RoomVersionSupports { feature: _ }),
|
||||
);
|
||||
|
||||
match self.match_condition(condition, user_id, display_name) {
|
||||
Ok(true) => {}
|
||||
Ok(false) => continue 'outer,
|
||||
@@ -105,6 +168,13 @@ impl PushRuleEvaluator {
|
||||
}
|
||||
}
|
||||
|
||||
// MSC3932: Disable push rules in extensible event-supporting room versions if they
|
||||
// don't describe *any* MSC3931 room version condition, unless the rule is on the
|
||||
// safe list.
|
||||
if !has_rver_condition && !safe_from_rver_condition && supports_extensible_events {
|
||||
continue;
|
||||
}
|
||||
|
||||
let actions = push_rule
|
||||
.actions
|
||||
.iter()
|
||||
@@ -156,6 +226,9 @@ impl PushRuleEvaluator {
|
||||
KnownCondition::EventMatch(event_match) => {
|
||||
self.match_event_match(event_match, user_id)?
|
||||
}
|
||||
KnownCondition::RelatedEventMatch(event_match) => {
|
||||
self.match_related_event_match(event_match, user_id)?
|
||||
}
|
||||
KnownCondition::ContainsDisplayName => {
|
||||
if let Some(dn) = display_name {
|
||||
if !dn.is_empty() {
|
||||
@@ -189,6 +262,15 @@ impl PushRuleEvaluator {
|
||||
false
|
||||
}
|
||||
}
|
||||
KnownCondition::RoomVersionSupports { feature } => {
|
||||
if !self.msc3931_enabled {
|
||||
false
|
||||
} else {
|
||||
let flag = feature.to_string();
|
||||
KNOWN_RVER_FLAGS.contains(&flag)
|
||||
&& self.room_version_feature_flags.contains(&flag)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(result)
|
||||
@@ -239,6 +321,79 @@ impl PushRuleEvaluator {
|
||||
compiled_pattern.is_match(haystack)
|
||||
}
|
||||
|
||||
/// Evaluates a `related_event_match` condition. (MSC3664)
|
||||
fn match_related_event_match(
|
||||
&self,
|
||||
event_match: &RelatedEventMatchCondition,
|
||||
user_id: Option<&str>,
|
||||
) -> Result<bool, Error> {
|
||||
// First check if related event matching is enabled...
|
||||
if !self.related_event_match_enabled {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// get the related event, fail if there is none.
|
||||
let event = if let Some(event) = self.related_events_flattened.get(&*event_match.rel_type) {
|
||||
event
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
// If we are not matching fallbacks, don't match if our special key indicating this is a
|
||||
// fallback relation is not present.
|
||||
if !event_match.include_fallbacks.unwrap_or(false)
|
||||
&& event.contains_key("im.vector.is_falling_back")
|
||||
{
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// if we have no key, accept the event as matching, if it existed without matching any
|
||||
// fields.
|
||||
let key = if let Some(key) = &event_match.key {
|
||||
key
|
||||
} else {
|
||||
return Ok(true);
|
||||
};
|
||||
|
||||
let pattern = if let Some(pattern) = &event_match.pattern {
|
||||
pattern
|
||||
} else if let Some(pattern_type) = &event_match.pattern_type {
|
||||
// The `pattern_type` can either be "user_id" or "user_localpart",
|
||||
// either way if we don't have a `user_id` then the condition can't
|
||||
// match.
|
||||
let user_id = if let Some(user_id) = user_id {
|
||||
user_id
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
match &**pattern_type {
|
||||
"user_id" => user_id,
|
||||
"user_localpart" => get_localpart_from_id(user_id)?,
|
||||
_ => return Ok(false),
|
||||
}
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
let haystack = if let Some(haystack) = event.get(&**key) {
|
||||
haystack
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
// For the content.body we match against "words", but for everything
|
||||
// else we match against the entire value.
|
||||
let match_type = if key == "content.body" {
|
||||
GlobMatchType::Word
|
||||
} else {
|
||||
GlobMatchType::Whole
|
||||
};
|
||||
|
||||
let mut compiled_pattern = get_glob_matcher(pattern, match_type)?;
|
||||
compiled_pattern.is_match(haystack)
|
||||
}
|
||||
|
||||
/// Match the member count against an 'is' condition
|
||||
/// The `is` condition can be things like '>2', '==3' or even just '4'.
|
||||
fn match_member_count(&self, is: &str) -> Result<bool, Error> {
|
||||
@@ -267,9 +422,70 @@ impl PushRuleEvaluator {
|
||||
fn push_rule_evaluator() {
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
|
||||
let evaluator =
|
||||
PushRuleEvaluator::py_new(flattened_keys, 10, Some(0), BTreeMap::new()).unwrap();
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
Some(0),
|
||||
BTreeMap::new(),
|
||||
BTreeMap::new(),
|
||||
true,
|
||||
vec![],
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
|
||||
assert_eq!(result.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_requires_room_version_supports_condition() {
|
||||
use std::borrow::Cow;
|
||||
|
||||
use crate::push::{PushRule, PushRules};
|
||||
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
|
||||
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
Some(0),
|
||||
BTreeMap::new(),
|
||||
BTreeMap::new(),
|
||||
false,
|
||||
flags,
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// first test: are the master and contains_user_name rules excluded from the "requires room
|
||||
// version condition" check?
|
||||
let mut result = evaluator.run(
|
||||
&FilteredPushRules::default(),
|
||||
Some("@bob:example.org"),
|
||||
None,
|
||||
);
|
||||
assert_eq!(result.len(), 3);
|
||||
|
||||
// second test: if an appropriate push rule is in play, does it get handled?
|
||||
let custom_rule = PushRule {
|
||||
rule_id: Cow::from("global/underride/.org.example.extensible"),
|
||||
priority_class: 1, // underride
|
||||
conditions: Cow::from(vec![Condition::Known(
|
||||
KnownCondition::RoomVersionSupports {
|
||||
feature: Cow::from(RoomVersionFeatures::ExtensibleEvents.as_str().to_string()),
|
||||
},
|
||||
)]),
|
||||
actions: Cow::from(vec![Action::Notify]),
|
||||
default: false,
|
||||
default_enabled: true,
|
||||
};
|
||||
let rules = PushRules::new(vec![custom_rule]);
|
||||
result = evaluator.run(
|
||||
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, true),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert_eq!(result.len(), 1);
|
||||
}
|
||||
|
||||
@@ -267,6 +267,8 @@ pub enum Condition {
|
||||
#[serde(tag = "kind")]
|
||||
pub enum KnownCondition {
|
||||
EventMatch(EventMatchCondition),
|
||||
#[serde(rename = "im.nheko.msc3664.related_event_match")]
|
||||
RelatedEventMatch(RelatedEventMatchCondition),
|
||||
ContainsDisplayName,
|
||||
RoomMemberCount {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@@ -275,6 +277,10 @@ pub enum KnownCondition {
|
||||
SenderNotificationPermission {
|
||||
key: Cow<'static, str>,
|
||||
},
|
||||
#[serde(rename = "org.matrix.msc3931.room_version_supports")]
|
||||
RoomVersionSupports {
|
||||
feature: Cow<'static, str>,
|
||||
},
|
||||
}
|
||||
|
||||
impl IntoPy<PyObject> for Condition {
|
||||
@@ -299,6 +305,20 @@ pub struct EventMatchCondition {
|
||||
pub pattern_type: Option<Cow<'static, str>>,
|
||||
}
|
||||
|
||||
/// The body of a [`Condition::RelatedEventMatch`]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct RelatedEventMatchCondition {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub key: Option<Cow<'static, str>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pattern: Option<Cow<'static, str>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pattern_type: Option<Cow<'static, str>>,
|
||||
pub rel_type: Cow<'static, str>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub include_fallbacks: Option<bool>,
|
||||
}
|
||||
|
||||
/// The collection of push rules for a user.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
#[pyclass(frozen)]
|
||||
@@ -391,15 +411,24 @@ impl PushRules {
|
||||
pub struct FilteredPushRules {
|
||||
push_rules: PushRules,
|
||||
enabled_map: BTreeMap<String, bool>,
|
||||
msc3664_enabled: bool,
|
||||
msc1767_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl FilteredPushRules {
|
||||
#[new]
|
||||
pub fn py_new(push_rules: PushRules, enabled_map: BTreeMap<String, bool>) -> Self {
|
||||
pub fn py_new(
|
||||
push_rules: PushRules,
|
||||
enabled_map: BTreeMap<String, bool>,
|
||||
msc3664_enabled: bool,
|
||||
msc1767_enabled: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
push_rules,
|
||||
enabled_map,
|
||||
msc3664_enabled,
|
||||
msc1767_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -414,13 +443,29 @@ impl FilteredPushRules {
|
||||
/// Iterates over all the rules and their enabled state, including base
|
||||
/// rules, in the order they should be executed in.
|
||||
fn iter(&self) -> impl Iterator<Item = (&PushRule, bool)> {
|
||||
self.push_rules.iter().map(|r| {
|
||||
let enabled = *self
|
||||
.enabled_map
|
||||
.get(&*r.rule_id)
|
||||
.unwrap_or(&r.default_enabled);
|
||||
(r, enabled)
|
||||
})
|
||||
self.push_rules
|
||||
.iter()
|
||||
.filter(|rule| {
|
||||
// Ignore disabled experimental push rules
|
||||
if !self.msc3664_enabled
|
||||
&& rule.rule_id == "global/override/.im.nheko.msc3664.reply"
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
})
|
||||
.map(|r| {
|
||||
let enabled = *self
|
||||
.enabled_map
|
||||
.get(&*r.rule_id)
|
||||
.unwrap_or(&r.default_enabled);
|
||||
(r, enabled)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -446,6 +491,29 @@ fn test_deserialize_condition() {
|
||||
let _: Condition = serde_json::from_str(json).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_unstable_msc3664_condition() {
|
||||
let json = r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern":"coffee","rel_type":"m.in_reply_to"}"#;
|
||||
|
||||
let condition: Condition = serde_json::from_str(json).unwrap();
|
||||
assert!(matches!(
|
||||
condition,
|
||||
Condition::Known(KnownCondition::RelatedEventMatch(_))
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_unstable_msc3931_condition() {
|
||||
let json =
|
||||
r#"{"kind":"org.matrix.msc3931.room_version_supports","feature":"org.example.feature"}"#;
|
||||
|
||||
let condition: Condition = serde_json::from_str(json).unwrap();
|
||||
assert!(matches!(
|
||||
condition,
|
||||
Condition::Known(KnownCondition::RoomVersionSupports { feature: _ })
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_custom_condition() {
|
||||
let json = r#"{"kind":"custom_tag"}"#;
|
||||
|
||||
@@ -27,6 +27,7 @@ DISTS = (
|
||||
"debian:sid",
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
|
||||
@@ -53,6 +53,12 @@ Run the complement test suite on Synapse.
|
||||
Only build the Docker images. Don't actually run Complement.
|
||||
Conflicts with -f/--fast.
|
||||
|
||||
-e, --editable
|
||||
Use an editable build of Synapse, rebuilding the image if necessary.
|
||||
This is suitable for use in development where a fast turn-around time
|
||||
is important.
|
||||
Not suitable for use in CI in case the editable environment is impure.
|
||||
|
||||
For help on arguments to 'go test', run 'go help testflag'.
|
||||
EOF
|
||||
}
|
||||
@@ -73,6 +79,9 @@ while [ $# -ge 1 ]; do
|
||||
"--build-only")
|
||||
skip_complement_run=1
|
||||
;;
|
||||
"-e"|"--editable")
|
||||
use_editable_synapse=1
|
||||
;;
|
||||
*)
|
||||
# unknown arg: presumably an argument to gotest. break the loop.
|
||||
break
|
||||
@@ -96,25 +105,76 @@ if [[ -z "$COMPLEMENT_DIR" ]]; then
|
||||
echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
|
||||
fi
|
||||
|
||||
if [ -n "$use_editable_synapse" ]; then
|
||||
if [[ -e synapse/synapse_rust.abi3.so ]]; then
|
||||
# In an editable install, back up the host's compiled Rust module to prevent
|
||||
# inconvenience; the container will overwrite the module with its own copy.
|
||||
mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host
|
||||
# And restore it on exit:
|
||||
synapse_pkg=`realpath synapse`
|
||||
trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT
|
||||
fi
|
||||
|
||||
editable_mount="$(realpath .):/editable-src:z"
|
||||
if docker inspect complement-synapse-editable &>/dev/null; then
|
||||
# complement-synapse-editable already exists: see if we can still use it:
|
||||
# - The Rust module must still be importable; it will fail to import if the Rust source has changed.
|
||||
# - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
|
||||
|
||||
# First set up the module in the right place for an editable installation.
|
||||
docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
|
||||
if (docker run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
|
||||
&& docker run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
|
||||
skip_docker_build=1
|
||||
else
|
||||
echo "Editable Synapse image is stale. Will rebuild."
|
||||
unset skip_docker_build
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$skip_docker_build" ]; then
|
||||
# Build the base Synapse image from the local checkout
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
|
||||
docker build -t matrixdotorg/synapse \
|
||||
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
|
||||
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
|
||||
-f "docker/Dockerfile" .
|
||||
echo_if_github "::endgroup::"
|
||||
if [ -n "$use_editable_synapse" ]; then
|
||||
|
||||
# Build the workers docker image (from the base Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
|
||||
docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
|
||||
echo_if_github "::endgroup::"
|
||||
# Build a special image designed for use in development with editable
|
||||
# installs.
|
||||
docker build -t synapse-editable \
|
||||
-f "docker/editable.Dockerfile" .
|
||||
|
||||
# Build the unified Complement image (from the worker Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: complement/Dockerfile"
|
||||
docker build -t complement-synapse \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
echo_if_github "::endgroup::"
|
||||
docker build -t synapse-workers-editable \
|
||||
--build-arg FROM=synapse-editable \
|
||||
-f "docker/Dockerfile-workers" .
|
||||
|
||||
docker build -t complement-synapse-editable \
|
||||
--build-arg FROM=synapse-workers-editable \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
|
||||
# Prepare the Rust module
|
||||
docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
|
||||
else
|
||||
|
||||
# Build the base Synapse image from the local checkout
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
|
||||
docker build -t matrixdotorg/synapse \
|
||||
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
|
||||
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
|
||||
-f "docker/Dockerfile" .
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
# Build the workers docker image (from the base Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
|
||||
docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
# Build the unified Complement image (from the worker Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: complement/Dockerfile"
|
||||
docker build -t complement-synapse \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$skip_complement_run" ]; then
|
||||
@@ -123,10 +183,14 @@ if [ -n "$skip_complement_run" ]; then
|
||||
fi
|
||||
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
if [ -n "$use_editable_synapse" ]; then
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse-editable
|
||||
export COMPLEMENT_HOST_MOUNTS="$editable_mount"
|
||||
fi
|
||||
|
||||
extra_test_args=()
|
||||
|
||||
test_tags="synapse_blacklist,msc3787"
|
||||
test_tags="synapse_blacklist,msc3787,msc3874"
|
||||
|
||||
# All environment variables starting with PASS_ will be shared.
|
||||
# (The prefix is stripped off before reaching the container.)
|
||||
@@ -139,6 +203,9 @@ if [[ -n "$WORKERS" ]]; then
|
||||
# Use workers.
|
||||
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true
|
||||
|
||||
# Pass through the workers defined. If none, it will be an empty string
|
||||
export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES"
|
||||
|
||||
# Workers can only use Postgres as a database.
|
||||
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
|
||||
|
||||
@@ -159,9 +226,9 @@ else
|
||||
# We only test faster room joins on monoliths, because they are purposefully
|
||||
# being developed without worker support to start with.
|
||||
#
|
||||
# The tests for importing historical messages (MSC2716) and jump to date (MSC3030)
|
||||
# also only pass with monoliths, currently.
|
||||
test_tags="$test_tags,faster_joins,msc2716,msc3030"
|
||||
# The tests for importing historical messages (MSC2716) also only pass with monoliths,
|
||||
# currently.
|
||||
test_tags="$test_tags,faster_joins,msc2716"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
@@ -46,11 +46,12 @@ import signedjson.key
|
||||
import signedjson.types
|
||||
import srvlookup
|
||||
import yaml
|
||||
from requests import PreparedRequest, Response
|
||||
from requests.adapters import HTTPAdapter
|
||||
from urllib3 import HTTPConnectionPool
|
||||
|
||||
# uncomment the following to enable debug logging of http requests
|
||||
# from httplib import HTTPConnection
|
||||
# from http.client import HTTPConnection
|
||||
# HTTPConnection.debuglevel = 1
|
||||
|
||||
|
||||
@@ -103,6 +104,7 @@ def request(
|
||||
destination: str,
|
||||
path: str,
|
||||
content: Optional[str],
|
||||
verify_tls: bool,
|
||||
) -> requests.Response:
|
||||
if method is None:
|
||||
if content is None:
|
||||
@@ -141,7 +143,6 @@ def request(
|
||||
s.mount("matrix://", MatrixConnectionAdapter())
|
||||
|
||||
headers: Dict[str, str] = {
|
||||
"Host": destination,
|
||||
"Authorization": authorization_headers[0],
|
||||
}
|
||||
|
||||
@@ -152,7 +153,7 @@ def request(
|
||||
method=method,
|
||||
url=dest,
|
||||
headers=headers,
|
||||
verify=False,
|
||||
verify=verify_tls,
|
||||
data=content,
|
||||
stream=True,
|
||||
)
|
||||
@@ -202,6 +203,12 @@ def main() -> None:
|
||||
|
||||
parser.add_argument("--body", help="Data to send as the body of the HTTP request")
|
||||
|
||||
parser.add_argument(
|
||||
"--insecure",
|
||||
action="store_true",
|
||||
help="Disable TLS certificate verification",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"path", help="request path, including the '/_matrix/federation/...' prefix."
|
||||
)
|
||||
@@ -227,6 +234,7 @@ def main() -> None:
|
||||
args.destination,
|
||||
args.path,
|
||||
content=args.body,
|
||||
verify_tls=not args.insecure,
|
||||
)
|
||||
|
||||
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
||||
@@ -254,36 +262,93 @@ def read_args_from_config(args: argparse.Namespace) -> None:
|
||||
|
||||
|
||||
class MatrixConnectionAdapter(HTTPAdapter):
|
||||
@staticmethod
|
||||
def lookup(s: str, skip_well_known: bool = False) -> Tuple[str, int]:
|
||||
if s[-1] == "]":
|
||||
# ipv6 literal (with no port)
|
||||
return s, 8448
|
||||
def send(
|
||||
self,
|
||||
request: PreparedRequest,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> Response:
|
||||
# overrides the send() method in the base class.
|
||||
|
||||
if ":" in s:
|
||||
out = s.rsplit(":", 1)
|
||||
# We need to look for .well-known redirects before passing the request up to
|
||||
# HTTPAdapter.send().
|
||||
assert isinstance(request.url, str)
|
||||
parsed = urlparse.urlsplit(request.url)
|
||||
server_name = parsed.netloc
|
||||
well_known = self._get_well_known(parsed.netloc)
|
||||
|
||||
if well_known:
|
||||
server_name = well_known
|
||||
|
||||
# replace the scheme in the uri with https, so that cert verification is done
|
||||
# also replace the hostname if we got a .well-known result
|
||||
request.url = urlparse.urlunsplit(
|
||||
("https", server_name, parsed.path, parsed.query, parsed.fragment)
|
||||
)
|
||||
|
||||
# at this point we also add the host header (otherwise urllib will add one
|
||||
# based on the `host` from the connection returned by `get_connection`,
|
||||
# which will be wrong if there is an SRV record).
|
||||
request.headers["Host"] = server_name
|
||||
|
||||
return super().send(request, *args, **kwargs)
|
||||
|
||||
def get_connection(
|
||||
self, url: str, proxies: Optional[Dict[str, str]] = None
|
||||
) -> HTTPConnectionPool:
|
||||
# overrides the get_connection() method in the base class
|
||||
parsed = urlparse.urlsplit(url)
|
||||
(host, port, ssl_server_name) = self._lookup(parsed.netloc)
|
||||
print(
|
||||
f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr
|
||||
)
|
||||
return self.poolmanager.connection_from_host(
|
||||
host,
|
||||
port=port,
|
||||
scheme="https",
|
||||
pool_kwargs={"server_hostname": ssl_server_name},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _lookup(server_name: str) -> Tuple[str, int, str]:
|
||||
"""
|
||||
Do an SRV lookup on a server name and return the host:port to connect to
|
||||
Given the server_name (after any .well-known lookup), return the host, port and
|
||||
the ssl server name
|
||||
"""
|
||||
if server_name[-1] == "]":
|
||||
# ipv6 literal (with no port)
|
||||
return server_name, 8448, server_name
|
||||
|
||||
if ":" in server_name:
|
||||
# explicit port
|
||||
out = server_name.rsplit(":", 1)
|
||||
try:
|
||||
port = int(out[1])
|
||||
except ValueError:
|
||||
raise ValueError("Invalid host:port '%s'" % s)
|
||||
return out[0], port
|
||||
|
||||
# try a .well-known lookup
|
||||
if not skip_well_known:
|
||||
well_known = MatrixConnectionAdapter.get_well_known(s)
|
||||
if well_known:
|
||||
return MatrixConnectionAdapter.lookup(well_known, skip_well_known=True)
|
||||
raise ValueError("Invalid host:port '%s'" % (server_name,))
|
||||
return out[0], port, out[0]
|
||||
|
||||
try:
|
||||
srv = srvlookup.lookup("matrix", "tcp", s)[0]
|
||||
return srv.host, srv.port
|
||||
srv = srvlookup.lookup("matrix", "tcp", server_name)[0]
|
||||
print(
|
||||
f"SRV lookup on _matrix._tcp.{server_name} gave {srv}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return srv.host, srv.port, server_name
|
||||
except Exception:
|
||||
return s, 8448
|
||||
return server_name, 8448, server_name
|
||||
|
||||
@staticmethod
|
||||
def get_well_known(server_name: str) -> Optional[str]:
|
||||
uri = "https://%s/.well-known/matrix/server" % (server_name,)
|
||||
print("fetching %s" % (uri,), file=sys.stderr)
|
||||
def _get_well_known(server_name: str) -> Optional[str]:
|
||||
if ":" in server_name:
|
||||
# explicit port, or ipv6 literal. Either way, no .well-known
|
||||
return None
|
||||
|
||||
# TODO: check for ipv4 literals
|
||||
|
||||
uri = f"https://{server_name}/.well-known/matrix/server"
|
||||
print(f"fetching {uri}", file=sys.stderr)
|
||||
|
||||
try:
|
||||
resp = requests.get(uri)
|
||||
@@ -304,19 +369,6 @@ class MatrixConnectionAdapter(HTTPAdapter):
|
||||
print("Invalid response from %s: %s" % (uri, e), file=sys.stderr)
|
||||
return None
|
||||
|
||||
def get_connection(
|
||||
self, url: str, proxies: Optional[Dict[str, str]] = None
|
||||
) -> HTTPConnectionPool:
|
||||
parsed = urlparse.urlparse(url)
|
||||
|
||||
(host, port) = self.lookup(parsed.netloc)
|
||||
netloc = "%s:%d" % (host, port)
|
||||
print("Connecting to %s" % (netloc,), file=sys.stderr)
|
||||
url = urlparse.urlunparse(
|
||||
("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment)
|
||||
)
|
||||
return super().get_connection(url, proxies)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -27,7 +27,7 @@ import time
|
||||
import urllib.request
|
||||
from os import path
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Any, List, Optional, cast
|
||||
from typing import Any, List, Optional
|
||||
|
||||
import attr
|
||||
import click
|
||||
@@ -174,9 +174,7 @@ def _prepare() -> None:
|
||||
click.get_current_context().abort()
|
||||
|
||||
# Switch to the release branch.
|
||||
# Cast safety: parse() won't return a version.LegacyVersion from our
|
||||
# version string format.
|
||||
parsed_new_version = cast(version.Version, version.parse(new_version))
|
||||
parsed_new_version = version.parse(new_version)
|
||||
|
||||
# We assume for debian changelogs that we only do RCs or full releases.
|
||||
assert not parsed_new_version.is_devrelease
|
||||
@@ -219,9 +217,7 @@ def _prepare() -> None:
|
||||
update_branch(repo)
|
||||
|
||||
# Create the new release branch
|
||||
# Type ignore will no longer be needed after GitPython 3.1.28.
|
||||
# See https://github.com/gitpython-developers/GitPython/pull/1419
|
||||
repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type]
|
||||
repo.create_head(release_branch_name, commit=base_branch)
|
||||
|
||||
# Special-case SyTest: we don't actually prepare any files so we may
|
||||
# as well push it now (and only when we create a release branch;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,9 +12,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage.databases.main.keys import KeyStore
|
||||
# Stub for PyICU.
|
||||
|
||||
# KeyStore isn't really safe to use from a worker, but for now we do so and hope that
|
||||
# the races it creates aren't too bad.
|
||||
class Locale:
|
||||
@staticmethod
|
||||
def getDefault() -> Locale: ...
|
||||
|
||||
SlavedKeyStore = KeyStore
|
||||
class BreakIterator:
|
||||
@staticmethod
|
||||
def createWordInstance(locale: Locale) -> BreakIterator: ...
|
||||
def setText(self, text: str) -> None: ...
|
||||
def nextBoundary(self) -> int: ...
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union
|
||||
from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union
|
||||
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@@ -25,7 +25,13 @@ class PushRules:
|
||||
def rules(self) -> Collection[PushRule]: ...
|
||||
|
||||
class FilteredPushRules:
|
||||
def __init__(self, push_rules: PushRules, enabled_map: Dict[str, bool]): ...
|
||||
def __init__(
|
||||
self,
|
||||
push_rules: PushRules,
|
||||
enabled_map: Dict[str, bool],
|
||||
msc3664_enabled: bool,
|
||||
msc1767_enabled: bool,
|
||||
): ...
|
||||
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
|
||||
|
||||
def get_base_rule_ids() -> Collection[str]: ...
|
||||
@@ -37,10 +43,14 @@ class PushRuleEvaluator:
|
||||
room_member_count: int,
|
||||
sender_power_level: Optional[int],
|
||||
notification_power_levels: Mapping[str, int],
|
||||
related_events_flattened: Mapping[str, Mapping[str, str]],
|
||||
related_event_match_enabled: bool,
|
||||
room_version_feature_flags: Tuple[str, ...],
|
||||
msc3931_enabled: bool,
|
||||
): ...
|
||||
def run(
|
||||
self,
|
||||
push_rules: FilteredPushRules,
|
||||
user_id: Optional[str],
|
||||
display_name: Optional[str],
|
||||
) -> Collection[dict]: ...
|
||||
) -> Collection[Union[Mapping, str]]: ...
|
||||
|
||||
@@ -222,6 +222,7 @@ def main() -> None:
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
config: Optional[Dict[str, Any]] = None
|
||||
if "config" in args and args.config:
|
||||
config = yaml.safe_load(args.config)
|
||||
|
||||
@@ -229,7 +230,7 @@ def main() -> None:
|
||||
secret = args.shared_secret
|
||||
else:
|
||||
# argparse should check that we have either config or shared secret
|
||||
assert config
|
||||
assert config is not None
|
||||
|
||||
secret = config.get("registration_shared_secret")
|
||||
secret_file = config.get("registration_shared_secret_path")
|
||||
@@ -244,7 +245,7 @@ def main() -> None:
|
||||
|
||||
if args.server_url:
|
||||
server_url = args.server_url
|
||||
elif config:
|
||||
elif config is not None:
|
||||
server_url = _find_client_listener(config)
|
||||
if not server_url:
|
||||
server_url = _DEFAULT_SERVER_URL
|
||||
|
||||
@@ -125,6 +125,8 @@ class EventTypes:
|
||||
MSC2716_BATCH: Final = "org.matrix.msc2716.batch"
|
||||
MSC2716_MARKER: Final = "org.matrix.msc2716.marker"
|
||||
|
||||
Reaction: Final = "m.reaction"
|
||||
|
||||
|
||||
class ToDeviceEventTypes:
|
||||
RoomKeyRequest: Final = "m.room_key_request"
|
||||
@@ -150,6 +152,7 @@ class EduTypes:
|
||||
|
||||
class RejectedReason:
|
||||
AUTH_ERROR: Final = "auth_error"
|
||||
OVERSIZED_EVENT: Final = "oversized_event"
|
||||
|
||||
|
||||
class RoomCreationPreset:
|
||||
@@ -228,6 +231,9 @@ class EventContentFields:
|
||||
# The authorising user for joining a restricted room.
|
||||
AUTHORISING_USER: Final = "join_authorised_via_users_server"
|
||||
|
||||
# an unspecced field added to to-device messages to identify them uniquely-ish
|
||||
TO_DEVICE_MSGID: Final = "org.matrix.msgid"
|
||||
|
||||
|
||||
class RoomTypes:
|
||||
"""Understood values of the room_type field of m.room.create events."""
|
||||
|
||||
@@ -300,10 +300,8 @@ class InteractiveAuthIncompleteError(Exception):
|
||||
class UnrecognizedRequestError(SynapseError):
|
||||
"""An error indicating we don't understand the request you're trying to make"""
|
||||
|
||||
def __init__(
|
||||
self, msg: str = "Unrecognized request", errcode: str = Codes.UNRECOGNIZED
|
||||
):
|
||||
super().__init__(400, msg, errcode)
|
||||
def __init__(self, msg: str = "Unrecognized request", code: int = 400):
|
||||
super().__init__(code, msg, Codes.UNRECOGNIZED)
|
||||
|
||||
|
||||
class NotFoundError(SynapseError):
|
||||
@@ -426,8 +424,17 @@ class ResourceLimitError(SynapseError):
|
||||
class EventSizeError(SynapseError):
|
||||
"""An error raised when an event is too big."""
|
||||
|
||||
def __init__(self, msg: str):
|
||||
def __init__(self, msg: str, unpersistable: bool):
|
||||
"""
|
||||
unpersistable:
|
||||
if True, the PDU must not be persisted, not even as a rejected PDU
|
||||
when received over federation.
|
||||
This is notably true when the entire PDU exceeds the size limit for a PDU,
|
||||
(as opposed to an individual key's size limit being exceeded).
|
||||
"""
|
||||
|
||||
super().__init__(413, msg, Codes.TOO_LARGE)
|
||||
self.unpersistable = unpersistable
|
||||
|
||||
|
||||
class LoginError(SynapseError):
|
||||
@@ -713,7 +720,7 @@ class HttpResponseException(CodeMessageException):
|
||||
set to the reason code from the HTTP response.
|
||||
|
||||
Returns:
|
||||
SynapseError:
|
||||
The error converted to a SynapseError.
|
||||
"""
|
||||
# try to parse the body as json, to get better errcode/msg, but
|
||||
# default to M_UNKNOWN with the HTTP status as the error text
|
||||
|
||||
@@ -43,7 +43,7 @@ if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
"additionalProperties": True, # Allow new fields for forward compatibility
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"limit": {"type": "number"},
|
||||
@@ -63,7 +63,7 @@ FILTER_SCHEMA = {
|
||||
}
|
||||
|
||||
ROOM_FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
"additionalProperties": True, # Allow new fields for forward compatibility
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"not_rooms": {"$ref": "#/definitions/room_id_array"},
|
||||
@@ -77,7 +77,7 @@ ROOM_FILTER_SCHEMA = {
|
||||
}
|
||||
|
||||
ROOM_EVENT_FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
"additionalProperties": True, # Allow new fields for forward compatibility
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"limit": {"type": "number"},
|
||||
@@ -143,7 +143,7 @@ USER_FILTER_SCHEMA = {
|
||||
},
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"additionalProperties": True, # Allow new fields for forward compatibility
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -343,6 +343,7 @@ class RequestRatelimiter:
|
||||
requester: Requester,
|
||||
update: bool = True,
|
||||
is_admin_redaction: bool = False,
|
||||
n_actions: int = 1,
|
||||
) -> None:
|
||||
"""Ratelimits requests.
|
||||
|
||||
@@ -355,6 +356,8 @@ class RequestRatelimiter:
|
||||
is_admin_redaction: Whether this is a room admin/moderator
|
||||
redacting an event. If so then we may apply different
|
||||
ratelimits depending on config.
|
||||
n_actions: Multiplier for the number of actions to apply to the
|
||||
rate limiter at once.
|
||||
|
||||
Raises:
|
||||
LimitExceededError if the request should be ratelimited
|
||||
@@ -383,7 +386,9 @@ class RequestRatelimiter:
|
||||
if is_admin_redaction and self.admin_redaction_ratelimiter:
|
||||
# If we have separate config for admin redactions, use a separate
|
||||
# ratelimiter as to not have user_ids clash
|
||||
await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
|
||||
await self.admin_redaction_ratelimiter.ratelimit(
|
||||
requester, update=update, n_actions=n_actions
|
||||
)
|
||||
else:
|
||||
# Override rate and burst count per-user
|
||||
await self.request_ratelimiter.ratelimit(
|
||||
@@ -391,4 +396,5 @@ class RequestRatelimiter:
|
||||
rate_hz=messages_per_second,
|
||||
burst_count=burst_count,
|
||||
update=update,
|
||||
n_actions=n_actions,
|
||||
)
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Callable, Dict, Optional
|
||||
from typing import Callable, Dict, Optional, Tuple
|
||||
|
||||
import attr
|
||||
|
||||
@@ -51,6 +51,13 @@ class RoomDisposition:
|
||||
UNSTABLE = "unstable"
|
||||
|
||||
|
||||
class PushRuleRoomFlag:
|
||||
"""Enum for listing possible MSC3931 room version feature flags, for push rules"""
|
||||
|
||||
# MSC3932: Room version supports MSC1767 Extensible Events.
|
||||
EXTENSIBLE_EVENTS = "org.matrix.msc3932.extensible_events"
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class RoomVersion:
|
||||
"""An object which describes the unique attributes of a room version."""
|
||||
@@ -91,6 +98,12 @@ class RoomVersion:
|
||||
msc3787_knock_restricted_join_rule: bool
|
||||
# MSC3667: Enforce integer power levels
|
||||
msc3667_int_only_power_levels: bool
|
||||
# MSC3931: Adds a push rule condition for "room version feature flags", making
|
||||
# some push rules room version dependent. Note that adding a flag to this list
|
||||
# is not enough to mark it "supported": the push rule evaluator also needs to
|
||||
# support the flag. Unknown flags are ignored by the evaluator, making conditions
|
||||
# fail if used.
|
||||
msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@@ -111,6 +124,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -129,6 +143,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -147,6 +162,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -165,6 +181,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -183,6 +200,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@@ -201,6 +219,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
@@ -219,6 +238,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
@@ -237,6 +257,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
@@ -255,6 +276,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
)
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
@@ -273,6 +295,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
)
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
@@ -291,6 +314,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
)
|
||||
V10 = RoomVersion(
|
||||
"10",
|
||||
@@ -309,6 +333,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
)
|
||||
MSC2716v4 = RoomVersion(
|
||||
"org.matrix.msc2716v4",
|
||||
@@ -327,6 +352,27 @@ class RoomVersions:
|
||||
msc2716_redactions=True,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
)
|
||||
MSC1767v10 = RoomVersion(
|
||||
# MSC1767 (Extensible Events) based on room version "10"
|
||||
"org.matrix.msc1767.10",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@ from twisted.internet.tcp import Port
|
||||
from twisted.logger import LoggingFile, LogLevel
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
import synapse.util.caches
|
||||
from synapse.api.constants import MAX_PDU_SIZE
|
||||
@@ -55,12 +56,13 @@ from synapse.app.phone_stats_home import start_phone_stats_home
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config._base import format_config_error
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ManholeConfig
|
||||
from synapse.config.server import ListenerConfig, ManholeConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.events.presence_router import load_legacy_presence_router
|
||||
from synapse.events.spamcheck import load_legacy_spam_checkers
|
||||
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
|
||||
from synapse.handlers.auth import load_legacy_password_auth_providers
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.logging.opentracing import init_tracer
|
||||
from synapse.metrics import install_gc_manager, register_threadpool
|
||||
@@ -264,26 +266,18 @@ def register_start(
|
||||
reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper()))
|
||||
|
||||
|
||||
def listen_metrics(
|
||||
bind_addresses: Iterable[str], port: int, enable_legacy_metric_names: bool
|
||||
) -> None:
|
||||
def listen_metrics(bind_addresses: Iterable[str], port: int) -> None:
|
||||
"""
|
||||
Start Prometheus metrics server.
|
||||
"""
|
||||
from prometheus_client import start_http_server as start_http_server_prometheus
|
||||
|
||||
from synapse.metrics import (
|
||||
RegistryProxy,
|
||||
start_http_server as start_http_server_legacy,
|
||||
)
|
||||
from synapse.metrics import RegistryProxy
|
||||
|
||||
for host in bind_addresses:
|
||||
logger.info("Starting metrics listener on %s:%d", host, port)
|
||||
if enable_legacy_metric_names:
|
||||
start_http_server_legacy(port, addr=host, registry=RegistryProxy)
|
||||
else:
|
||||
_set_prometheus_client_use_created_metrics(False)
|
||||
start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
|
||||
_set_prometheus_client_use_created_metrics(False)
|
||||
start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
|
||||
|
||||
|
||||
def _set_prometheus_client_use_created_metrics(new_value: bool) -> None:
|
||||
@@ -357,6 +351,55 @@ def listen_tcp(
|
||||
return r # type: ignore[return-value]
|
||||
|
||||
|
||||
def listen_http(
|
||||
listener_config: ListenerConfig,
|
||||
root_resource: Resource,
|
||||
version_string: str,
|
||||
max_request_body_size: int,
|
||||
context_factory: Optional[IOpenSSLContextFactory],
|
||||
reactor: ISynapseReactor = reactor,
|
||||
) -> List[Port]:
|
||||
port = listener_config.port
|
||||
bind_addresses = listener_config.bind_addresses
|
||||
tls = listener_config.tls
|
||||
|
||||
assert listener_config.http_options is not None
|
||||
|
||||
site_tag = listener_config.http_options.tag
|
||||
if site_tag is None:
|
||||
site_tag = str(port)
|
||||
|
||||
site = SynapseSite(
|
||||
"synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
version_string,
|
||||
max_request_body_size=max_request_body_size,
|
||||
reactor=reactor,
|
||||
)
|
||||
if tls:
|
||||
# refresh_certificate should have been called before this.
|
||||
assert context_factory is not None
|
||||
ports = listen_ssl(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
context_factory,
|
||||
reactor=reactor,
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d (TLS)", port)
|
||||
else:
|
||||
ports = listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
reactor=reactor,
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d", port)
|
||||
return ports
|
||||
|
||||
|
||||
def listen_ssl(
|
||||
bind_addresses: Collection[str],
|
||||
port: int,
|
||||
|
||||
@@ -28,10 +28,6 @@ from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.events import EventBase
|
||||
from synapse.handlers.admin import ExfiltrationWriter
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
|
||||
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
|
||||
@@ -40,10 +36,24 @@ from synapse.storage.databases.main.appservice import (
|
||||
ApplicationServiceWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
|
||||
from synapse.storage.databases.main.devices import DeviceWorkerStore
|
||||
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.databases.main.event_push_actions import (
|
||||
EventPushActionsWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.databases.main.filtering import FilteringWorkerStore
|
||||
from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
|
||||
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
|
||||
from synapse.storage.databases.main.registration import RegistrationWorkerStore
|
||||
from synapse.storage.databases.main.relations import RelationsWorkerStore
|
||||
from synapse.storage.databases.main.room import RoomWorkerStore
|
||||
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
|
||||
from synapse.storage.databases.main.signatures import SignatureWorkerStore
|
||||
from synapse.storage.databases.main.state import StateGroupWorkerStore
|
||||
from synapse.storage.databases.main.stream import StreamWorkerStore
|
||||
from synapse.storage.databases.main.tags import TagsWorkerStore
|
||||
from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
|
||||
from synapse.types import StateMap
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
@@ -52,17 +62,25 @@ logger = logging.getLogger("synapse.app.admin_cmd")
|
||||
|
||||
|
||||
class AdminCmdSlavedStore(
|
||||
SlavedFilteringStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedDeviceStore,
|
||||
FilteringWorkerStore,
|
||||
DeviceWorkerStore,
|
||||
TagsWorkerStore,
|
||||
DeviceInboxWorkerStore,
|
||||
AccountDataWorkerStore,
|
||||
PushRulesWorkerStore,
|
||||
ApplicationServiceTransactionWorkerStore,
|
||||
ApplicationServiceWorkerStore,
|
||||
RegistrationWorkerStore,
|
||||
RoomMemberWorkerStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
EventPushActionsWorkerStore,
|
||||
StateGroupWorkerStore,
|
||||
SignatureWorkerStore,
|
||||
UserErasureWorkerStore,
|
||||
ReceiptsWorkerStore,
|
||||
StreamWorkerStore,
|
||||
EventsWorkerStore,
|
||||
RegistrationWorkerStore,
|
||||
RoomWorkerStore,
|
||||
):
|
||||
def __init__(
|
||||
|
||||
@@ -55,13 +55,13 @@ import os
|
||||
import signal
|
||||
import sys
|
||||
from types import FrameType
|
||||
from typing import Any, Callable, List, Optional
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
from twisted.internet.main import installReactor
|
||||
|
||||
# a list of the original signal handlers, before we installed our custom ones.
|
||||
# We restore these in our child processes.
|
||||
_original_signal_handlers: dict[int, Any] = {}
|
||||
_original_signal_handlers: Dict[int, Any] = {}
|
||||
|
||||
|
||||
class ProxiedReactor:
|
||||
|
||||
@@ -14,14 +14,12 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from typing import Dict, List
|
||||
|
||||
from twisted.internet import address
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
import synapse
|
||||
import synapse.events
|
||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||
from synapse.api.urls import (
|
||||
CLIENT_API_PREFIX,
|
||||
FEDERATION_PREFIX,
|
||||
@@ -43,51 +41,11 @@ from synapse.config.logger import setup_logging
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.server import JsonResource, OptionsResource
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest, SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.client import (
|
||||
account_data,
|
||||
events,
|
||||
initial_sync,
|
||||
login,
|
||||
presence,
|
||||
profile,
|
||||
push_rule,
|
||||
read_marker,
|
||||
receipts,
|
||||
relations,
|
||||
room,
|
||||
room_batch,
|
||||
room_keys,
|
||||
sendtodevice,
|
||||
sync,
|
||||
tags,
|
||||
user_directory,
|
||||
versions,
|
||||
voip,
|
||||
)
|
||||
from synapse.rest.client._base import client_patterns
|
||||
from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
|
||||
from synapse.rest.client.devices import DevicesRestServlet
|
||||
from synapse.rest.client.keys import (
|
||||
KeyChangesServlet,
|
||||
KeyQueryServlet,
|
||||
OneTimeKeyServlet,
|
||||
)
|
||||
from synapse.rest.client.register import (
|
||||
RegisterRestServlet,
|
||||
RegistrationTokenValidityRestServlet,
|
||||
)
|
||||
from synapse.rest.health import HealthResource
|
||||
from synapse.rest.key.v2 import KeyResource
|
||||
from synapse.rest.synapse.client import build_synapse_client_resource_tree
|
||||
@@ -101,8 +59,16 @@ from synapse.storage.databases.main.appservice import (
|
||||
from synapse.storage.databases.main.censor_events import CensorEventsStore
|
||||
from synapse.storage.databases.main.client_ips import ClientIpWorkerStore
|
||||
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
|
||||
from synapse.storage.databases.main.devices import DeviceWorkerStore
|
||||
from synapse.storage.databases.main.directory import DirectoryWorkerStore
|
||||
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore
|
||||
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.databases.main.event_push_actions import (
|
||||
EventPushActionsWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.databases.main.filtering import FilteringWorkerStore
|
||||
from synapse.storage.databases.main.keys import KeyStore
|
||||
from synapse.storage.databases.main.lock import LockStore
|
||||
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
|
||||
from synapse.storage.databases.main.metrics import ServerMetricsStore
|
||||
@@ -111,118 +77,31 @@ from synapse.storage.databases.main.monthly_active_users import (
|
||||
)
|
||||
from synapse.storage.databases.main.presence import PresenceStore
|
||||
from synapse.storage.databases.main.profile import ProfileWorkerStore
|
||||
from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
|
||||
from synapse.storage.databases.main.pusher import PusherWorkerStore
|
||||
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
|
||||
from synapse.storage.databases.main.registration import RegistrationWorkerStore
|
||||
from synapse.storage.databases.main.relations import RelationsWorkerStore
|
||||
from synapse.storage.databases.main.room import RoomWorkerStore
|
||||
from synapse.storage.databases.main.room_batch import RoomBatchStore
|
||||
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
|
||||
from synapse.storage.databases.main.search import SearchStore
|
||||
from synapse.storage.databases.main.session import SessionStore
|
||||
from synapse.storage.databases.main.signatures import SignatureWorkerStore
|
||||
from synapse.storage.databases.main.state import StateGroupWorkerStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.stream import StreamWorkerStore
|
||||
from synapse.storage.databases.main.tags import TagsWorkerStore
|
||||
from synapse.storage.databases.main.transactions import TransactionWorkerStore
|
||||
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
|
||||
from synapse.storage.databases.main.user_directory import UserDirectoryStore
|
||||
from synapse.types import JsonDict
|
||||
from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
|
||||
logger = logging.getLogger("synapse.app.generic_worker")
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
"""An implementation of the `KeyUploadServlet` that responds to read only
|
||||
requests, but otherwise proxies through to the master instance.
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
|
||||
def __init__(self, hs: HomeServer):
|
||||
"""
|
||||
Args:
|
||||
hs: server
|
||||
"""
|
||||
super().__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastores().main
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.main_uri = hs.config.worker.worker_main_http_uri
|
||||
|
||||
async def on_POST(
|
||||
self, request: SynapseRequest, device_id: Optional[str]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
if device_id is not None:
|
||||
# passing the device_id here is deprecated; however, we allow it
|
||||
# for now for compatibility with older clients.
|
||||
if requester.device_id is not None and device_id != requester.device_id:
|
||||
logger.warning(
|
||||
"Client uploading keys for a different device "
|
||||
"(logged in as %s, uploading for %s)",
|
||||
requester.device_id,
|
||||
device_id,
|
||||
)
|
||||
else:
|
||||
device_id = requester.device_id
|
||||
|
||||
if device_id is None:
|
||||
raise SynapseError(
|
||||
400, "To upload keys, you must pass device_id when authenticating"
|
||||
)
|
||||
|
||||
if body:
|
||||
# They're actually trying to upload something, proxy to main synapse.
|
||||
|
||||
# Proxy headers from the original request, such as the auth headers
|
||||
# (in case the access token is there) and the original IP /
|
||||
# User-Agent of the request.
|
||||
headers = {
|
||||
header: request.requestHeaders.getRawHeaders(header, [])
|
||||
for header in (b"Authorization", b"User-Agent")
|
||||
}
|
||||
# Add the previous hop to the X-Forwarded-For header.
|
||||
x_forwarded_for = request.requestHeaders.getRawHeaders(
|
||||
b"X-Forwarded-For", []
|
||||
)
|
||||
# we use request.client here, since we want the previous hop, not the
|
||||
# original client (as returned by request.getClientAddress()).
|
||||
if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
|
||||
previous_host = request.client.host.encode("ascii")
|
||||
# If the header exists, add to the comma-separated list of the first
|
||||
# instance of the header. Otherwise, generate a new header.
|
||||
if x_forwarded_for:
|
||||
x_forwarded_for = [x_forwarded_for[0] + b", " + previous_host]
|
||||
x_forwarded_for.extend(x_forwarded_for[1:])
|
||||
else:
|
||||
x_forwarded_for = [previous_host]
|
||||
headers[b"X-Forwarded-For"] = x_forwarded_for
|
||||
|
||||
# Replicate the original X-Forwarded-Proto header. Note that
|
||||
# XForwardedForRequest overrides isSecure() to give us the original protocol
|
||||
# used by the client, as opposed to the protocol used by our upstream proxy
|
||||
# - which is what we want here.
|
||||
headers[b"X-Forwarded-Proto"] = [
|
||||
b"https" if request.isSecure() else b"http"
|
||||
]
|
||||
|
||||
try:
|
||||
result = await self.http_client.post_json_get_json(
|
||||
self.main_uri + request.uri.decode("ascii"), body, headers=headers
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error() from e
|
||||
except RequestSendFailed as e:
|
||||
raise SynapseError(502, "Failed to talk to master") from e
|
||||
|
||||
return 200, result
|
||||
else:
|
||||
# Just interested in counts.
|
||||
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
return 200, {"one_time_key_counts": result}
|
||||
|
||||
|
||||
class GenericWorkerSlavedStore(
|
||||
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
|
||||
# rather than going via the correct worker.
|
||||
@@ -232,26 +111,36 @@ class GenericWorkerSlavedStore(
|
||||
EndToEndRoomKeyStore,
|
||||
PresenceStore,
|
||||
DeviceInboxWorkerStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedPushRuleStore,
|
||||
DeviceWorkerStore,
|
||||
TagsWorkerStore,
|
||||
AccountDataWorkerStore,
|
||||
SlavedPusherStore,
|
||||
CensorEventsStore,
|
||||
ClientIpWorkerStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
# KeyStore isn't really safe to use from a worker, but for now we do so and hope that
|
||||
# the races it creates aren't too bad.
|
||||
KeyStore,
|
||||
RoomWorkerStore,
|
||||
RoomBatchStore,
|
||||
DirectoryWorkerStore,
|
||||
PushRulesWorkerStore,
|
||||
ApplicationServiceTransactionWorkerStore,
|
||||
ApplicationServiceWorkerStore,
|
||||
ProfileWorkerStore,
|
||||
SlavedFilteringStore,
|
||||
FilteringWorkerStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
MediaRepositoryStore,
|
||||
ServerMetricsStore,
|
||||
PusherWorkerStore,
|
||||
RoomMemberWorkerStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
EventPushActionsWorkerStore,
|
||||
StateGroupWorkerStore,
|
||||
SignatureWorkerStore,
|
||||
UserErasureWorkerStore,
|
||||
ReceiptsWorkerStore,
|
||||
StreamWorkerStore,
|
||||
EventsWorkerStore,
|
||||
RegistrationWorkerStore,
|
||||
SearchStore,
|
||||
TransactionWorkerStore,
|
||||
@@ -268,15 +157,9 @@ class GenericWorkerServer(HomeServer):
|
||||
DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore
|
||||
|
||||
def _listen_http(self, listener_config: ListenerConfig) -> None:
|
||||
port = listener_config.port
|
||||
bind_addresses = listener_config.bind_addresses
|
||||
|
||||
assert listener_config.http_options is not None
|
||||
|
||||
site_tag = listener_config.http_options.tag
|
||||
if site_tag is None:
|
||||
site_tag = str(port)
|
||||
|
||||
# We always include a health resource.
|
||||
resources: Dict[str, Resource] = {"/health": HealthResource()}
|
||||
|
||||
@@ -285,45 +168,7 @@ class GenericWorkerServer(HomeServer):
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
|
||||
RegisterRestServlet(self).register(resource)
|
||||
RegistrationTokenValidityRestServlet(self).register(resource)
|
||||
login.register_servlets(self, resource)
|
||||
ThreepidRestServlet(self).register(resource)
|
||||
WhoamiRestServlet(self).register(resource)
|
||||
DevicesRestServlet(self).register(resource)
|
||||
|
||||
# Read-only
|
||||
KeyUploadServlet(self).register(resource)
|
||||
KeyQueryServlet(self).register(resource)
|
||||
KeyChangesServlet(self).register(resource)
|
||||
OneTimeKeyServlet(self).register(resource)
|
||||
|
||||
voip.register_servlets(self, resource)
|
||||
push_rule.register_servlets(self, resource)
|
||||
versions.register_servlets(self, resource)
|
||||
|
||||
profile.register_servlets(self, resource)
|
||||
|
||||
sync.register_servlets(self, resource)
|
||||
events.register_servlets(self, resource)
|
||||
room.register_servlets(self, resource, is_worker=True)
|
||||
relations.register_servlets(self, resource)
|
||||
room.register_deprecated_servlets(self, resource)
|
||||
initial_sync.register_servlets(self, resource)
|
||||
room_batch.register_servlets(self, resource)
|
||||
room_keys.register_servlets(self, resource)
|
||||
tags.register_servlets(self, resource)
|
||||
account_data.register_servlets(self, resource)
|
||||
receipts.register_servlets(self, resource)
|
||||
read_marker.register_servlets(self, resource)
|
||||
|
||||
sendtodevice.register_servlets(self, resource)
|
||||
|
||||
user_directory.register_servlets(self, resource)
|
||||
|
||||
presence.register_servlets(self, resource)
|
||||
resource: Resource = ClientRestResource(self)
|
||||
|
||||
resources[CLIENT_API_PREFIX] = resource
|
||||
|
||||
@@ -375,23 +220,15 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
root_resource = create_resource_tree(resources, OptionsResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
max_request_body_size=max_request_body_size(self.config),
|
||||
reactor=self.get_reactor(),
|
||||
),
|
||||
_base.listen_http(
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
max_request_body_size(self.config),
|
||||
self.tls_server_context_factory,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
|
||||
logger.info("Synapse worker now listening on port %d", port)
|
||||
|
||||
def start_listening(self) -> None:
|
||||
for listener in self.config.worker.worker_listeners:
|
||||
if listener.type == "http":
|
||||
@@ -413,7 +250,6 @@ class GenericWorkerServer(HomeServer):
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
|
||||
)
|
||||
else:
|
||||
logger.warning("Unsupported listener type: %s", listener.type)
|
||||
|
||||
@@ -37,8 +37,7 @@ from synapse.api.urls import (
|
||||
from synapse.app import _base
|
||||
from synapse.app._base import (
|
||||
handle_startup_exception,
|
||||
listen_ssl,
|
||||
listen_tcp,
|
||||
listen_http,
|
||||
max_request_body_size,
|
||||
redirect_stdio_to_logs,
|
||||
register_start,
|
||||
@@ -53,7 +52,6 @@ from synapse.http.server import (
|
||||
RootOptionsRedirectResource,
|
||||
StaticResource,
|
||||
)
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
@@ -83,8 +81,6 @@ class SynapseHomeServer(HomeServer):
|
||||
self, config: HomeServerConfig, listener_config: ListenerConfig
|
||||
) -> Iterable[Port]:
|
||||
port = listener_config.port
|
||||
bind_addresses = listener_config.bind_addresses
|
||||
tls = listener_config.tls
|
||||
# Must exist since this is an HTTP listener.
|
||||
assert listener_config.http_options is not None
|
||||
site_tag = listener_config.http_options.tag
|
||||
@@ -140,37 +136,15 @@ class SynapseHomeServer(HomeServer):
|
||||
else:
|
||||
root_resource = OptionsResource()
|
||||
|
||||
site = SynapseSite(
|
||||
"synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
|
||||
site_tag,
|
||||
ports = listen_http(
|
||||
listener_config,
|
||||
create_resource_tree(resources, root_resource),
|
||||
self.version_string,
|
||||
max_request_body_size=max_request_body_size(self.config),
|
||||
max_request_body_size(self.config),
|
||||
self.tls_server_context_factory,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
|
||||
if tls:
|
||||
# refresh_certificate should have been called before this.
|
||||
assert self.tls_server_context_factory is not None
|
||||
ports = listen_ssl(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
self.tls_server_context_factory,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d (TLS)", port)
|
||||
|
||||
else:
|
||||
ports = listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d", port)
|
||||
|
||||
return ports
|
||||
|
||||
def _configure_named_resource(
|
||||
@@ -291,7 +265,6 @@ class SynapseHomeServer(HomeServer):
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
|
||||
)
|
||||
else:
|
||||
# this shouldn't happen, as the listener type should have been checked
|
||||
|
||||
@@ -32,9 +32,9 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Type for the `device_one_time_key_counts` field in an appservice transaction
|
||||
# Type for the `device_one_time_keys_count` field in an appservice transaction
|
||||
# user ID -> {device ID -> {algorithm -> count}}
|
||||
TransactionOneTimeKeyCounts = Dict[str, Dict[str, Dict[str, int]]]
|
||||
TransactionOneTimeKeysCount = Dict[str, Dict[str, Dict[str, int]]]
|
||||
|
||||
# Type for the `device_unused_fallback_key_types` field in an appservice transaction
|
||||
# user ID -> {device ID -> [algorithm]}
|
||||
@@ -172,12 +172,24 @@ class ApplicationService:
|
||||
Returns:
|
||||
True if this service would like to know about this room.
|
||||
"""
|
||||
member_list = await store.get_users_in_room(
|
||||
# We can use `get_local_users_in_room(...)` here because an application service
|
||||
# can only be interested in local users of the server it's on (ignore any remote
|
||||
# users that might match the user namespace regex).
|
||||
#
|
||||
# In the future, we can consider re-using
|
||||
# `store.get_app_service_users_in_room` which is very similar to this
|
||||
# function but has a slightly worse performance than this because we
|
||||
# have an early escape-hatch if we find a single user that the
|
||||
# appservice is interested in. The juice would be worth the squeeze if
|
||||
# `store.get_app_service_users_in_room` was used in more places besides
|
||||
# an experimental MSC. But for now we can avoid doing more work and
|
||||
# barely using it later.
|
||||
local_user_ids = await store.get_local_users_in_room(
|
||||
room_id, on_invalidate=cache_context.invalidate
|
||||
)
|
||||
|
||||
# check joined member events
|
||||
for user_id in member_list:
|
||||
for user_id in local_user_ids:
|
||||
if self.is_interested_in_user(user_id):
|
||||
return True
|
||||
return False
|
||||
@@ -233,7 +245,9 @@ class ApplicationService:
|
||||
return True
|
||||
|
||||
# likewise with the room's aliases (if it has any)
|
||||
alias_list = await store.get_aliases_for_room(room_id)
|
||||
alias_list = await store.get_aliases_for_room(
|
||||
room_id, on_invalidate=cache_context.invalidate
|
||||
)
|
||||
for alias in alias_list:
|
||||
if self.is_room_alias_in_namespace(alias):
|
||||
return True
|
||||
@@ -299,7 +313,9 @@ class ApplicationService:
|
||||
# Find all the rooms the sender is in
|
||||
if self.is_interested_in_user(user_id.to_string()):
|
||||
return True
|
||||
room_ids = await store.get_rooms_for_user(user_id.to_string())
|
||||
room_ids = await store.get_rooms_for_user(
|
||||
user_id.to_string(), on_invalidate=cache_context.invalidate
|
||||
)
|
||||
|
||||
# Then find out if the appservice is interested in any of those rooms
|
||||
for room_id in room_ids:
|
||||
@@ -364,7 +380,7 @@ class AppServiceTransaction:
|
||||
events: List[EventBase],
|
||||
ephemeral: List[JsonDict],
|
||||
to_device_messages: List[JsonDict],
|
||||
one_time_key_counts: TransactionOneTimeKeyCounts,
|
||||
one_time_keys_count: TransactionOneTimeKeysCount,
|
||||
unused_fallback_keys: TransactionUnusedFallbackKeys,
|
||||
device_list_summary: DeviceListUpdates,
|
||||
):
|
||||
@@ -373,7 +389,7 @@ class AppServiceTransaction:
|
||||
self.events = events
|
||||
self.ephemeral = ephemeral
|
||||
self.to_device_messages = to_device_messages
|
||||
self.one_time_key_counts = one_time_key_counts
|
||||
self.one_time_keys_count = one_time_keys_count
|
||||
self.unused_fallback_keys = unused_fallback_keys
|
||||
self.device_list_summary = device_list_summary
|
||||
|
||||
@@ -390,7 +406,7 @@ class AppServiceTransaction:
|
||||
events=self.events,
|
||||
ephemeral=self.ephemeral,
|
||||
to_device_messages=self.to_device_messages,
|
||||
one_time_key_counts=self.one_time_key_counts,
|
||||
one_time_keys_count=self.one_time_keys_count,
|
||||
unused_fallback_keys=self.unused_fallback_keys,
|
||||
device_list_summary=self.device_list_summary,
|
||||
txn_id=self.id,
|
||||
|
||||
@@ -23,7 +23,7 @@ from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.appservice import (
|
||||
ApplicationService,
|
||||
TransactionOneTimeKeyCounts,
|
||||
TransactionOneTimeKeysCount,
|
||||
TransactionUnusedFallbackKeys,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
@@ -123,7 +123,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
response = await self.get_json(
|
||||
uri,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": f"Bearer {service.hs_token}"},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
@@ -147,7 +147,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
response = await self.get_json(
|
||||
uri,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": f"Bearer {service.hs_token}"},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
@@ -190,7 +190,9 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self.get_json(
|
||||
uri, args=args, headers={"Authorization": f"Bearer {service.hs_token}"}
|
||||
uri,
|
||||
args=args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if not isinstance(response, list):
|
||||
logger.warning(
|
||||
@@ -230,7 +232,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
info = await self.get_json(
|
||||
uri,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": f"Bearer {service.hs_token}"},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
|
||||
if not _is_valid_3pe_metadata(info):
|
||||
@@ -260,7 +262,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
events: List[EventBase],
|
||||
ephemeral: List[JsonDict],
|
||||
to_device_messages: List[JsonDict],
|
||||
one_time_key_counts: TransactionOneTimeKeyCounts,
|
||||
one_time_keys_count: TransactionOneTimeKeysCount,
|
||||
unused_fallback_keys: TransactionUnusedFallbackKeys,
|
||||
device_list_summary: DeviceListUpdates,
|
||||
txn_id: Optional[int] = None,
|
||||
@@ -308,10 +310,13 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
# TODO: Update to stable prefixes once MSC3202 completes FCP merge
|
||||
if service.msc3202_transaction_extensions:
|
||||
if one_time_key_counts:
|
||||
if one_time_keys_count:
|
||||
body[
|
||||
"org.matrix.msc3202.device_one_time_key_counts"
|
||||
] = one_time_key_counts
|
||||
] = one_time_keys_count
|
||||
body[
|
||||
"org.matrix.msc3202.device_one_time_keys_count"
|
||||
] = one_time_keys_count
|
||||
if unused_fallback_keys:
|
||||
body[
|
||||
"org.matrix.msc3202.device_unused_fallback_key_types"
|
||||
@@ -327,7 +332,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
uri=uri,
|
||||
json_body=body,
|
||||
args={"access_token": service.hs_token},
|
||||
headers={"Authorization": f"Bearer {service.hs_token}"},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
|
||||
@@ -64,7 +64,7 @@ from typing import (
|
||||
from synapse.appservice import (
|
||||
ApplicationService,
|
||||
ApplicationServiceState,
|
||||
TransactionOneTimeKeyCounts,
|
||||
TransactionOneTimeKeysCount,
|
||||
TransactionUnusedFallbackKeys,
|
||||
)
|
||||
from synapse.appservice.api import ApplicationServiceApi
|
||||
@@ -258,7 +258,7 @@ class _ServiceQueuer:
|
||||
):
|
||||
return
|
||||
|
||||
one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None
|
||||
one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None
|
||||
unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None
|
||||
|
||||
if (
|
||||
@@ -269,7 +269,7 @@ class _ServiceQueuer:
|
||||
# for the users which are mentioned in this transaction,
|
||||
# as well as the appservice's sender.
|
||||
(
|
||||
one_time_key_counts,
|
||||
one_time_keys_count,
|
||||
unused_fallback_keys,
|
||||
) = await self._compute_msc3202_otk_counts_and_fallback_keys(
|
||||
service, events, ephemeral, to_device_messages_to_send
|
||||
@@ -281,7 +281,7 @@ class _ServiceQueuer:
|
||||
events,
|
||||
ephemeral,
|
||||
to_device_messages_to_send,
|
||||
one_time_key_counts,
|
||||
one_time_keys_count,
|
||||
unused_fallback_keys,
|
||||
device_list_summary,
|
||||
)
|
||||
@@ -296,7 +296,7 @@ class _ServiceQueuer:
|
||||
events: Iterable[EventBase],
|
||||
ephemerals: Iterable[JsonDict],
|
||||
to_device_messages: Iterable[JsonDict],
|
||||
) -> Tuple[TransactionOneTimeKeyCounts, TransactionUnusedFallbackKeys]:
|
||||
) -> Tuple[TransactionOneTimeKeysCount, TransactionUnusedFallbackKeys]:
|
||||
"""
|
||||
Given a list of the events, ephemeral messages and to-device messages,
|
||||
- first computes a list of application services users that may have
|
||||
@@ -367,7 +367,7 @@ class _TransactionController:
|
||||
events: List[EventBase],
|
||||
ephemeral: Optional[List[JsonDict]] = None,
|
||||
to_device_messages: Optional[List[JsonDict]] = None,
|
||||
one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None,
|
||||
one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None,
|
||||
unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None,
|
||||
device_list_summary: Optional[DeviceListUpdates] = None,
|
||||
) -> None:
|
||||
@@ -380,7 +380,7 @@ class _TransactionController:
|
||||
events: The persistent events to include in the transaction.
|
||||
ephemeral: The ephemeral events to include in the transaction.
|
||||
to_device_messages: The to-device messages to include in the transaction.
|
||||
one_time_key_counts: Counts of remaining one-time keys for relevant
|
||||
one_time_keys_count: Counts of remaining one-time keys for relevant
|
||||
appservice devices in the transaction.
|
||||
unused_fallback_keys: Lists of unused fallback keys for relevant
|
||||
appservice devices in the transaction.
|
||||
@@ -397,7 +397,7 @@ class _TransactionController:
|
||||
events=events,
|
||||
ephemeral=ephemeral or [],
|
||||
to_device_messages=to_device_messages or [],
|
||||
one_time_key_counts=one_time_key_counts or {},
|
||||
one_time_keys_count=one_time_keys_count or {},
|
||||
unused_fallback_keys=unused_fallback_keys or {},
|
||||
device_list_summary=device_list_summary or DeviceListUpdates(),
|
||||
)
|
||||
|
||||
@@ -33,6 +33,9 @@ def validate_config(
|
||||
config: the configuration value to be validated
|
||||
config_path: the path within the config file. This will be used as a basis
|
||||
for the error message.
|
||||
|
||||
Raises:
|
||||
ConfigError, if validation fails.
|
||||
"""
|
||||
try:
|
||||
jsonschema.validate(config, json_schema)
|
||||
|
||||
@@ -13,12 +13,13 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Any, Iterable
|
||||
from typing import Any, Iterable, Optional, Tuple
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.config._base import Config, ConfigError
|
||||
from synapse.config._util import validate_config
|
||||
from synapse.types import JsonDict
|
||||
from synapse.types.state import StateFilter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -26,16 +27,20 @@ logger = logging.getLogger(__name__)
|
||||
class ApiConfig(Config):
|
||||
section = "api"
|
||||
|
||||
room_prejoin_state: StateFilter
|
||||
track_puppetted_users_ips: bool
|
||||
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
validate_config(_MAIN_SCHEMA, config, ())
|
||||
self.room_prejoin_state = list(self._get_prejoin_state_types(config))
|
||||
self.room_prejoin_state = StateFilter.from_types(
|
||||
self._get_prejoin_state_entries(config)
|
||||
)
|
||||
self.track_puppeted_user_ips = config.get("track_puppeted_user_ips", False)
|
||||
|
||||
def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]:
|
||||
"""Get the event types to include in the prejoin state
|
||||
|
||||
Parses the config and returns an iterable of the event types to be included.
|
||||
"""
|
||||
def _get_prejoin_state_entries(
|
||||
self, config: JsonDict
|
||||
) -> Iterable[Tuple[str, Optional[str]]]:
|
||||
"""Get the event types and state keys to include in the prejoin state."""
|
||||
room_prejoin_state_config = config.get("room_prejoin_state") or {}
|
||||
|
||||
# backwards-compatibility support for room_invite_state_types
|
||||
@@ -50,33 +55,39 @@ class ApiConfig(Config):
|
||||
|
||||
logger.warning(_ROOM_INVITE_STATE_TYPES_WARNING)
|
||||
|
||||
yield from config["room_invite_state_types"]
|
||||
for event_type in config["room_invite_state_types"]:
|
||||
yield event_type, None
|
||||
return
|
||||
|
||||
if not room_prejoin_state_config.get("disable_default_event_types"):
|
||||
yield from _DEFAULT_PREJOIN_STATE_TYPES
|
||||
yield from _DEFAULT_PREJOIN_STATE_TYPES_AND_STATE_KEYS
|
||||
|
||||
yield from room_prejoin_state_config.get("additional_event_types", [])
|
||||
for entry in room_prejoin_state_config.get("additional_event_types", []):
|
||||
if isinstance(entry, str):
|
||||
yield entry, None
|
||||
else:
|
||||
yield entry
|
||||
|
||||
|
||||
_ROOM_INVITE_STATE_TYPES_WARNING = """\
|
||||
WARNING: The 'room_invite_state_types' configuration setting is now deprecated,
|
||||
and replaced with 'room_prejoin_state'. New features may not work correctly
|
||||
unless 'room_invite_state_types' is removed. See the sample configuration file for
|
||||
details of 'room_prejoin_state'.
|
||||
unless 'room_invite_state_types' is removed. See the config documentation at
|
||||
https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_prejoin_state
|
||||
for details of 'room_prejoin_state'.
|
||||
--------------------------------------------------------------------------------
|
||||
"""
|
||||
|
||||
_DEFAULT_PREJOIN_STATE_TYPES = [
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.CanonicalAlias,
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.RoomEncryption,
|
||||
EventTypes.Name,
|
||||
_DEFAULT_PREJOIN_STATE_TYPES_AND_STATE_KEYS = [
|
||||
(EventTypes.JoinRules, ""),
|
||||
(EventTypes.CanonicalAlias, ""),
|
||||
(EventTypes.RoomAvatar, ""),
|
||||
(EventTypes.RoomEncryption, ""),
|
||||
(EventTypes.Name, ""),
|
||||
# Per MSC1772.
|
||||
EventTypes.Create,
|
||||
(EventTypes.Create, ""),
|
||||
# Per MSC3173.
|
||||
EventTypes.Topic,
|
||||
(EventTypes.Topic, ""),
|
||||
]
|
||||
|
||||
|
||||
@@ -90,7 +101,17 @@ _ROOM_PREJOIN_STATE_CONFIG_SCHEMA = {
|
||||
"disable_default_event_types": {"type": "boolean"},
|
||||
"additional_event_types": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"items": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"minItems": 2,
|
||||
"maxItems": 2,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -16,6 +16,7 @@ from typing import Any, Optional
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
||||
from synapse.config._base import Config
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@@ -53,9 +54,6 @@ class ExperimentalConfig(Config):
|
||||
# MSC3266 (room summary api)
|
||||
self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False)
|
||||
|
||||
# MSC3030 (Jump to date API endpoint)
|
||||
self.msc3030_enabled: bool = experimental.get("msc3030_enabled", False)
|
||||
|
||||
# MSC2409 (this setting only relates to optionally sending to-device messages).
|
||||
# Presence, typing and read receipt EDUs are already sent to application services that
|
||||
# have opted in to receive them. If enabled, this adds to-device messages to that list.
|
||||
@@ -98,6 +96,9 @@ class ExperimentalConfig(Config):
|
||||
# MSC3773: Thread notifications
|
||||
self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
|
||||
|
||||
# MSC3664: Pushrules to match on related events
|
||||
self.msc3664_enabled: bool = experimental.get("msc3664_enabled", False)
|
||||
|
||||
# MSC3848: Introduce errcodes for specific event sending failures
|
||||
self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False)
|
||||
|
||||
@@ -125,3 +126,13 @@ class ExperimentalConfig(Config):
|
||||
self.msc3886_endpoint: Optional[str] = experimental.get(
|
||||
"msc3886_endpoint", None
|
||||
)
|
||||
|
||||
# MSC3912: Relation-based redactions.
|
||||
self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False)
|
||||
|
||||
# MSC1767 and friends: Extensible Events
|
||||
self.msc1767_enabled: bool = experimental.get("msc1767_enabled", False)
|
||||
if self.msc1767_enabled:
|
||||
# Enable room version (and thus applicable push rules from MSC3931/3932)
|
||||
version_id = RoomVersions.MSC1767v10.identifier
|
||||
KNOWN_ROOM_VERSIONS[version_id] = RoomVersions.MSC1767v10
|
||||
|
||||
@@ -53,7 +53,7 @@ DEFAULT_LOG_CONFIG = Template(
|
||||
# Synapse also supports structured logging for machine readable logs which can
|
||||
# be ingested by ELK stacks. See [2] for details.
|
||||
#
|
||||
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
|
||||
# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
|
||||
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
|
||||
|
||||
version: 1
|
||||
@@ -317,10 +317,9 @@ def setup_logging(
|
||||
Set up the logging subsystem.
|
||||
|
||||
Args:
|
||||
config (LoggingConfig | synapse.config.worker.WorkerConfig):
|
||||
configuration data
|
||||
config: configuration data
|
||||
|
||||
use_worker_options (bool): True to use the 'worker_log_config' option
|
||||
use_worker_options: True to use the 'worker_log_config' option
|
||||
instead of 'log_config'.
|
||||
|
||||
logBeginner: The Twisted logBeginner to use.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user