Compare commits

..

7 Commits

Author SHA1 Message Date
Andrew Morgan
f4f5a706f8 start.py: Support SYNAPSE_CONFIG_TEMPLATE_DIR 2024-09-30 12:43:32 +01:00
Andrew Morgan
28245e3908 Document additional environment variables 2024-09-30 12:43:32 +01:00
Andrew Morgan
b9c50043e0 fixup: Add SYNAPSE_CONFIG_TEMPLATE_DIR env var 2024-09-30 12:43:32 +01:00
Andrew Morgan
e48479978b Allow specifying where generated config files are stored
* Store generated worker config files in SYNAPSE_CONFIG_DIR, rather than hardcoding
  `/conf`.
* Move the `workers_have_been_configured` filepath to the config dir.
2024-09-30 12:38:49 +01:00
Andrew Morgan
5caca2acd6 Allow specifying where generated config files are stored
* Store generated worker config files in SYNAPSE_CONFIG_DIR, rather than hardcoding
  `/conf`.
* Move the `workers_have_been_configured` filepath to the config dir.
2024-09-30 12:27:53 +01:00
Andrew Morgan
d3ed0ebebd Remove unused config_path dict entry
This is not used anywhere, as far as I can tell.
2024-09-30 12:25:41 +01:00
Andrew Morgan
af2a16370d Add SYNAPSE_CONFIG_TEMPLATE_DIR env var to worker config script
This allows specifying a directory other than `/conf` where config
templates should live. This allows this script to be used outside of the
context of a docker container where /conf is bound to a dir on the host.
2024-09-30 12:17:43 +01:00
295 changed files with 6458 additions and 18173 deletions

View File

@@ -36,11 +36,11 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
#
# For PRs, we only run each type of test with the oldest Python version supported (which
# is Python 3.9 right now)
# is Python 3.8 right now)
trial_sqlite_tests = [
{
"python-version": "3.9",
"python-version": "3.8",
"database": "sqlite",
"extras": "all",
}
@@ -53,14 +53,14 @@ if not IS_PR:
"database": "sqlite",
"extras": "all",
}
for version in ("3.10", "3.11", "3.12", "3.13")
for version in ("3.9", "3.10", "3.11", "3.12")
)
trial_postgres_tests = [
{
"python-version": "3.9",
"python-version": "3.8",
"database": "postgres",
"postgres-version": "13",
"postgres-version": "11",
"extras": "all",
}
]
@@ -68,16 +68,16 @@ trial_postgres_tests = [
if not IS_PR:
trial_postgres_tests.append(
{
"python-version": "3.13",
"python-version": "3.12",
"database": "postgres",
"postgres-version": "17",
"postgres-version": "16",
"extras": "all",
}
)
trial_no_extra_tests = [
{
"python-version": "3.9",
"python-version": "3.8",
"database": "sqlite",
"extras": "",
}
@@ -99,24 +99,24 @@ set_output("trial_test_matrix", test_matrix)
# First calculate the various sytest jobs.
#
# For each type of test we only run on bullseye on PRs
# For each type of test we only run on focal on PRs
sytest_tests = [
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
},
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"postgres": "postgres",
},
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"postgres": "multi-postgres",
"workers": "workers",
},
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"postgres": "multi-postgres",
"workers": "workers",
"reactor": "asyncio",
@@ -127,11 +127,11 @@ if not IS_PR:
sytest_tests.extend(
[
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"reactor": "asyncio",
},
{
"sytest-tag": "bullseye",
"sytest-tag": "focal",
"postgres": "postgres",
"reactor": "asyncio",
},

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env bash
# this script is run by GitHub Actions in a plain `jammy` container; it
# this script is run by GitHub Actions in a plain `focal` container; it
# - installs the minimal system requirements, and poetry;
# - patches the project definition file to refer to old versions only;
# - creates a venv with these old versions using poetry; and finally

View File

@@ -14,7 +14,7 @@ permissions:
id-token: write # needed for signing the images with GitHub OIDC Token
jobs:
build:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
steps:
- name: Set up QEMU
id: qemu
@@ -30,7 +30,7 @@ jobs:
run: docker buildx inspect
- name: Install Cosign
uses: sigstore/cosign-installer@v3.7.0
uses: sigstore/cosign-installer@v3.6.0
- name: Checkout repository
uses: actions/checkout@v4

View File

@@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
uses: dawidd6/action-download-artifact@bf251b5aa9c2f7eeb574a96ee720e24f801b7c11 # v6
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}

View File

@@ -132,9 +132,9 @@ jobs:
fail-fast: false
matrix:
include:
- sytest-tag: bullseye
- sytest-tag: focal
- sytest-tag: bullseye
- sytest-tag: focal
postgres: postgres
workers: workers
redis: redis

View File

@@ -91,19 +91,10 @@ jobs:
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Artifact name
id: artifact-name
# We can't have colons in the upload name of the artifact, so we convert
# e.g. `debian:sid` to `sid`.
env:
DISTRO: ${{ matrix.distro }}
run: |
echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT"
- name: Upload debs as artifacts
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
with:
name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
name: debs
path: debs/*
build-wheels:
@@ -111,7 +102,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-22.04, macos-13]
os: [ubuntu-20.04, macos-12]
arch: [x86_64, aarch64]
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
# It is not read by the rest of the workflow.
@@ -121,9 +112,9 @@ jobs:
exclude:
# Don't build macos wheels on PR CI.
- is_pr: true
os: "macos-13"
os: "macos-12"
# Don't build aarch64 wheels on mac.
- os: "macos-13"
- os: "macos-12"
arch: aarch64
# Don't build aarch64 wheels on PR CI.
- is_pr: true
@@ -153,7 +144,7 @@ jobs:
- name: Only build a single wheel on PR
if: startsWith(github.ref, 'refs/pull/')
run: echo "CIBW_BUILD="cp39-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
@@ -165,9 +156,9 @@ jobs:
CARGO_NET_GIT_FETCH_WITH_CLI: true
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
with:
name: Wheel-${{ matrix.os }}-${{ matrix.arch }}
name: Wheel
path: ./wheelhouse/*.whl
build-sdist:
@@ -186,7 +177,7 @@ jobs:
- name: Build sdist
run: python -m build --sdist
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
with:
name: Sdist
path: dist/*.tar.gz
@@ -203,23 +194,17 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v3 # Don't upgrade to v4, it should match upload-artifact
- name: Build a tarball for the debs
# We need to merge all the debs uploads into one folder, then compress
# that.
run: |
mkdir debs
mv debs*/* debs/
tar -cvJf debs.tar.xz debs
run: tar -cvJf debs.tar.xz debs
- name: Attach to release
# Pinned to work around https://github.com/softprops/action-gh-release/issues/445
uses: softprops/action-gh-release@v0.1.15
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
files: |
Sdist/*
Wheel*/*
Wheel/*
debs.tar.xz
# if it's not already published, keep the release as a draft.
draft: true

View File

@@ -397,7 +397,7 @@ jobs:
needs:
- linting-done
- changes
runs-on: ubuntu-22.04
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
@@ -409,12 +409,12 @@ jobs:
# their build dependencies
- run: |
sudo apt-get -qq update
sudo apt-get -qq install build-essential libffi-dev python3-dev \
sudo apt-get -qq install build-essential libffi-dev python-dev \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
- uses: actions/setup-python@v5
with:
python-version: '3.9'
python-version: '3.8'
- name: Prepare old deps
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
@@ -458,7 +458,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.9"]
python-version: ["pypy-3.8"]
extras: ["all"]
steps:
@@ -580,11 +580,11 @@ jobs:
strategy:
matrix:
include:
- python-version: "3.9"
postgres-version: "13"
- python-version: "3.8"
postgres-version: "11"
- python-version: "3.13"
postgres-version: "17"
- python-version: "3.11"
postgres-version: "15"
services:
postgres:

View File

@@ -99,11 +99,11 @@ jobs:
if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
container:
# We're using debian:bullseye because it uses Python 3.9 which is our minimum supported Python version.
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
image: matrixdotorg/sytest-synapse:bullseye
image: matrixdotorg/sytest-synapse:focal
volumes:
- ${{ github.workspace }}:/src

3563
CHANGES.md

File diff suppressed because it is too large Load Diff

219
Cargo.lock generated
View File

@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.95"
version = "1.0.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04"
checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6"
[[package]]
name = "arc-swap"
@@ -35,6 +35,12 @@ version = "0.21.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
[[package]]
name = "bitflags"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
[[package]]
name = "blake2"
version = "0.10.6"
@@ -61,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]]
name = "bytes"
version = "1.9.0"
version = "1.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b"
checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3"
[[package]]
name = "cfg-if"
@@ -124,8 +130,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
dependencies = [
"cfg-if",
"js-sys",
"libc",
"wasi",
"wasm-bindgen",
]
[[package]]
@@ -154,9 +162,9 @@ dependencies = [
[[package]]
name = "heck"
version = "0.5.0"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "hex"
@@ -166,9 +174,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "http"
version = "1.2.0"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea"
checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258"
dependencies = [
"bytes",
"fnv",
@@ -215,10 +223,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346"
[[package]]
name = "log"
version = "0.4.25"
name = "lock_api"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f"
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "memchr"
@@ -247,6 +265,29 @@ version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "parking_lot"
version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb"
dependencies = [
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.9.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
"windows-targets",
]
[[package]]
name = "portable-atomic"
version = "1.6.0"
@@ -261,25 +302,25 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "proc-macro2"
version = "1.0.89"
version = "1.0.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e"
checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3"
version = "0.23.4"
version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57fe09249128b3173d092de9523eaa75136bf7ba85e0d69eca241c7939c933cc"
checksum = "a5e00b96a521718e08e03b1a622f01c8a8deb50719335de3f60b3b3950f069d8"
dependencies = [
"anyhow",
"cfg-if",
"indoc",
"libc",
"memoffset",
"once_cell",
"parking_lot",
"portable-atomic",
"pyo3-build-config",
"pyo3-ffi",
@@ -289,9 +330,9 @@ dependencies = [
[[package]]
name = "pyo3-build-config"
version = "0.23.4"
version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cd3927b5a78757a0d71aa9dff669f903b1eb64b54142a9bd9f757f8fde65fd7"
checksum = "7883df5835fafdad87c0d888b266c8ec0f4c9ca48a5bed6bbb592e8dedee1b50"
dependencies = [
"once_cell",
"target-lexicon",
@@ -299,9 +340,9 @@ dependencies = [
[[package]]
name = "pyo3-ffi"
version = "0.23.4"
version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dab6bb2102bd8f991e7749f130a70d05dd557613e39ed2deeee8e9ca0c4d548d"
checksum = "01be5843dc60b916ab4dad1dca6d20b9b4e6ddc8e15f50c47fe6d85f1fb97403"
dependencies = [
"libc",
"pyo3-build-config",
@@ -309,9 +350,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
version = "0.12.0"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3eb421dc86d38d08e04b927b02424db480be71b777fa3a56f32e2f2a3a1a3b08"
checksum = "2af49834b8d2ecd555177e63b273b708dea75150abc6f5341d0a6e1a9623976c"
dependencies = [
"arc-swap",
"log",
@@ -320,9 +361,9 @@ dependencies = [
[[package]]
name = "pyo3-macros"
version = "0.23.4"
version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91871864b353fd5ffcb3f91f2f703a22a9797c91b9ab497b1acac7b07ae509c7"
checksum = "77b34069fc0682e11b31dbd10321cbf94808394c56fd996796ce45217dfac53c"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
@@ -332,9 +373,9 @@ dependencies = [
[[package]]
name = "pyo3-macros-backend"
version = "0.23.4"
version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43abc3b80bc20f3facd86cd3c60beed58c3e2aa26213f3cda368de39c60a27e4"
checksum = "08260721f32db5e1a5beae69a55553f56b99bd0e1c3e6e0a5e8851a9d0f5a85c"
dependencies = [
"heck",
"proc-macro2",
@@ -345,9 +386,9 @@ dependencies = [
[[package]]
name = "pythonize"
version = "0.23.0"
version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91a6ee7a084f913f98d70cdc3ebec07e852b735ae3059a1500db2661265da9ff"
checksum = "9d0664248812c38cc55a4ed07f88e4df516ce82604b93b1ffdc041aa77a6cb3c"
dependencies = [
"pyo3",
"serde",
@@ -393,10 +434,19 @@ dependencies = [
]
[[package]]
name = "regex"
version = "1.11.1"
name = "redox_syscall"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e"
dependencies = [
"bitflags",
]
[[package]]
name = "regex"
version = "1.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
dependencies = [
"aho-corasick",
"memchr",
@@ -406,9 +456,9 @@ dependencies = [
[[package]]
name = "regex-automata"
version = "0.4.8"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3"
checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea"
dependencies = [
"aho-corasick",
"memchr",
@@ -417,9 +467,9 @@ dependencies = [
[[package]]
name = "regex-syntax"
version = "0.8.5"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56"
[[package]]
name = "ryu"
@@ -428,19 +478,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
[[package]]
name = "serde"
version = "1.0.217"
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
version = "1.0.210"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.217"
version = "1.0.210"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0"
checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f"
dependencies = [
"proc-macro2",
"quote",
@@ -449,9 +505,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.137"
version = "1.0.128"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "930cfb6e6abf99298aaad7d29abbef7a9999a9a8806a40088f55f0dcec03146b"
checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8"
dependencies = [
"itoa",
"memchr",
@@ -481,6 +537,12 @@ dependencies = [
"digest",
]
[[package]]
name = "smallvec"
version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "subtle"
version = "2.5.0"
@@ -489,9 +551,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "syn"
version = "2.0.85"
version = "2.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56"
checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9"
dependencies = [
"proc-macro2",
"quote",
@@ -536,10 +598,11 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "ulid"
version = "1.1.4"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f294bff79170ed1c5633812aff1e565c35d993a36e757f9bc0accf5eec4e6045"
checksum = "04f903f293d11f31c0c29e4148f6dc0d033a7f80cebc0282bea147611667d289"
dependencies = [
"getrandom",
"rand",
"web-time",
]
@@ -631,3 +694,67 @@ dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "windows-targets"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6"
[[package]]
name = "windows_i686_gnu"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9"
[[package]]
name = "windows_i686_msvc"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0"

View File

@@ -1,10 +1,8 @@
# A build script for poetry that adds the rust extension.
import itertools
import os
from typing import Any, Dict
from packaging.specifiers import SpecifierSet
from setuptools_rust import Binding, RustExtension
@@ -16,8 +14,6 @@ def build(setup_kwargs: Dict[str, Any]) -> None:
target="synapse.synapse_rust",
path=cargo_toml_path,
binding=Binding.PyO3,
# This flag is a no-op in the latest versions. Instead, we need to
# specify this in the `bdist_wheel` config below.
py_limited_api=True,
# We force always building in release mode, as we can't tell the
# difference between using `poetry` in development vs production.
@@ -25,18 +21,3 @@ def build(setup_kwargs: Dict[str, Any]) -> None:
)
setup_kwargs.setdefault("rust_extensions", []).append(extension)
setup_kwargs["zip_safe"] = False
# We lookup the minimum supported python version by looking at
# `python_requires` (e.g. ">=3.9.0,<4.0.0") and finding the first python
# version that matches. We then convert that into the `py_limited_api` form,
# e.g. cp39 for python 3.9.
py_limited_api: str
python_bounds = SpecifierSet(setup_kwargs["python_requires"])
for minor_version in itertools.count(start=8):
if f"3.{minor_version}.0" in python_bounds:
py_limited_api = f"cp3{minor_version}"
break
setup_kwargs.setdefault("options", {}).setdefault("bdist_wheel", {})[
"py_limited_api"
] = py_limited_api

1
changelog.d/17749.doc Normal file
View File

@@ -0,0 +1 @@
Remove spurious "TODO UPDATE ALL THIS" note in the Debian installation docs.

View File

@@ -245,7 +245,7 @@ class SynapseCmd(cmd.Cmd):
if "flows" not in json_res:
print("Failed to find any login flows.")
return False
defer.returnValue(False)
flow = json_res["flows"][0] # assume first is the one we want.
if "type" not in flow or "m.login.password" != flow["type"] or "stages" in flow:
@@ -254,8 +254,8 @@ class SynapseCmd(cmd.Cmd):
"Unable to login via the command line client. Please visit "
"%s to login." % fallback_url
)
return False
return True
defer.returnValue(False)
defer.returnValue(True)
def do_emailrequest(self, line):
"""Requests the association of a third party identifier

View File

@@ -78,7 +78,7 @@ class TwistedHttpClient(HttpClient):
url, data, headers_dict={"Content-Type": ["application/json"]}
)
body = yield readBody(response)
return response.code, body
defer.returnValue((response.code, body))
@defer.inlineCallbacks
def get_json(self, url, args=None):
@@ -88,7 +88,7 @@ class TwistedHttpClient(HttpClient):
url = "%s?%s" % (url, qs)
response = yield self._create_get_request(url)
body = yield readBody(response)
return json.loads(body)
defer.returnValue(json.loads(body))
def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None):
"""Wrapper of _create_request to issue a PUT request"""
@@ -134,7 +134,7 @@ class TwistedHttpClient(HttpClient):
response = yield self._create_request(method, url)
body = yield readBody(response)
return json.loads(body)
defer.returnValue(json.loads(body))
@defer.inlineCallbacks
def _create_request(
@@ -173,7 +173,7 @@ class TwistedHttpClient(HttpClient):
if self.verbose:
print("Status %s %s" % (response.code, response.phrase))
print(pformat(list(response.headers.getAllRawHeaders())))
return response
defer.returnValue(response)
def sleep(self, seconds):
d = defer.Deferred()

View File

@@ -30,6 +30,3 @@ docker-compose up -d
### More information
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)
**For a more comprehensive Docker Compose example showcasing a full Matrix 2.0 stack, please see
https://github.com/element-hq/element-docker-demo**

View File

@@ -51,7 +51,7 @@ services:
- traefik.http.routers.https-synapse.tls.certResolver=le-ssl
db:
image: docker.io/postgres:15-alpine
image: docker.io/postgres:12-alpine
# Change that password, of course!
environment:
- POSTGRES_USER=synapse

View File

@@ -8,9 +8,6 @@ All examples and snippets assume that your Synapse service is called `synapse` i
An example Docker Compose file can be found [here](docker-compose.yaml).
**For a more comprehensive Docker Compose example, showcasing a full Matrix 2.0 stack (originally based on this
docker-compose.yaml), please see https://github.com/element-hq/element-docker-demo**
## Worker Service Examples in Docker Compose
In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`).

138
debian/changelog vendored
View File

@@ -1,141 +1,3 @@
matrix-synapse-py3 (1.124.0) stable; urgency=medium
* New Synapse release 1.124.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Feb 2025 11:55:22 +0100
matrix-synapse-py3 (1.124.0~rc3) stable; urgency=medium
* New Synapse release 1.124.0rc3.
-- Synapse Packaging team <packages@matrix.org> Fri, 07 Feb 2025 13:42:55 +0000
matrix-synapse-py3 (1.124.0~rc2) stable; urgency=medium
* New Synapse release 1.124.0rc2.
-- Synapse Packaging team <packages@matrix.org> Wed, 05 Feb 2025 16:35:53 +0000
matrix-synapse-py3 (1.124.0~rc1) stable; urgency=medium
* New Synapse release 1.124.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Feb 2025 11:53:05 +0000
matrix-synapse-py3 (1.123.0) stable; urgency=medium
* New Synapse release 1.123.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Jan 2025 08:37:34 -0700
matrix-synapse-py3 (1.123.0~rc1) stable; urgency=medium
* New Synapse release 1.123.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Jan 2025 14:39:57 +0100
matrix-synapse-py3 (1.122.0) stable; urgency=medium
* New Synapse release 1.122.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Jan 2025 14:14:14 +0000
matrix-synapse-py3 (1.122.0~rc1) stable; urgency=medium
* New Synapse release 1.122.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Jan 2025 14:06:19 +0000
matrix-synapse-py3 (1.121.1) stable; urgency=medium
* New Synapse release 1.121.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 11 Dec 2024 18:24:48 +0000
matrix-synapse-py3 (1.121.0) stable; urgency=medium
* New Synapse release 1.121.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 11 Dec 2024 13:12:30 +0100
matrix-synapse-py3 (1.121.0~rc1) stable; urgency=medium
* New Synapse release 1.121.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 04 Dec 2024 14:47:23 +0000
matrix-synapse-py3 (1.120.2) stable; urgency=medium
* New synapse release 1.120.2.
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Dec 2024 15:43:37 +0000
matrix-synapse-py3 (1.120.1) stable; urgency=medium
* New synapse release 1.120.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Dec 2024 09:07:57 +0000
matrix-synapse-py3 (1.120.0) stable; urgency=medium
* New synapse release 1.120.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Nov 2024 13:10:23 +0000
matrix-synapse-py3 (1.120.0~rc1) stable; urgency=medium
* New Synapse release 1.120.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Nov 2024 15:02:21 +0000
matrix-synapse-py3 (1.119.0) stable; urgency=medium
* New Synapse release 1.119.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 13 Nov 2024 13:57:51 +0000
matrix-synapse-py3 (1.119.0~rc2) stable; urgency=medium
* New Synapse release 1.119.0rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 11 Nov 2024 14:33:02 +0000
matrix-synapse-py3 (1.119.0~rc1) stable; urgency=medium
* New Synapse release 1.119.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 06 Nov 2024 08:59:43 -0700
matrix-synapse-py3 (1.118.0) stable; urgency=medium
* New Synapse release 1.118.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Oct 2024 15:29:53 +0100
matrix-synapse-py3 (1.118.0~rc1) stable; urgency=medium
* New Synapse release 1.118.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Oct 2024 11:48:14 +0100
matrix-synapse-py3 (1.117.0) stable; urgency=medium
* New Synapse release 1.117.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Oct 2024 10:46:30 +0100
matrix-synapse-py3 (1.117.0~rc1) stable; urgency=medium
* New Synapse release 1.117.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Oct 2024 14:37:11 +0100
matrix-synapse-py3 (1.116.0) stable; urgency=medium
* New Synapse release 1.116.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Oct 2024 11:14:07 +0100
matrix-synapse-py3 (1.116.0~rc2) stable; urgency=medium
* New synapse release 1.116.0rc2.

View File

@@ -20,7 +20,7 @@
# `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in
# in `poetry export` in the past.
ARG PYTHON_VERSION=3.12
ARG PYTHON_VERSION=3.11
###
### Stage 0: generate requirements.txt

View File

@@ -7,7 +7,6 @@
#}
## Server ##
public_baseurl: http://127.0.0.1:8008/
report_stats: False
trusted_key_servers: []
enable_registration: true
@@ -85,14 +84,6 @@ rc_invites:
per_user:
per_second: 1000
burst_count: 1000
per_issuer:
per_second: 1000
burst_count: 1000
rc_presence:
per_user:
per_second: 9999
burst_count: 9999
federation_rr_transactions_per_room_per_second: 9999
@@ -113,16 +104,6 @@ experimental_features:
msc3967_enabled: true
# Expose a room summary for public rooms
msc3266_enabled: true
# Send to-device messages to application services
msc2409_to_device_messages_enabled: true
# Allow application services to masquerade devices
msc3202_device_masquerading: true
# Sending device list changes, one-time key counts and fallback key usage to application services
msc3202_transaction_extensions: true
# Proxy OTK claim requests to exclusive ASes
msc3983_appservice_otk_claims: true
# Proxy key queries to exclusive ASes
msc3984_appservice_key_query: true
server_notices:
system_mxid_localpart: _server

View File

@@ -38,13 +38,10 @@ server {
{% if using_unix_sockets %}
proxy_pass http://unix:/run/main_public.sock;
{% else %}
# note: do not add a path (even a single /) after the port in `proxy_pass`,
# otherwise nginx will canonicalise the URI and cause signature verification
# errors.
proxy_pass http://localhost:8080;
{% endif %}
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host:$server_port;
proxy_set_header Host $host;
}
}

View File

@@ -24,6 +24,15 @@
# nginx and supervisord configs depending on the workers requested.
#
# The environment variables it reads are:
# * SYNAPSE_CONFIG_PATH: The path where the generated `homeserver.yaml` will
# be stored.
# * SYNAPSE_CONFIG_DIR: The directory where generated config will be stored.
# If `SYNAPSE_CONFIG_PATH` is not set, it will default to
# SYNAPSE_CONFIG_DIR/homeserver.yaml.
# * SYNAPSE_DATA_DIR: Where the generated config will put persistent data
# such as the database and media store.
# * SYNAPSE_CONFIG_TEMPLATE_DIR: The directory containing jinja2 templates for
# configuration that this script will generate config from. Defaults to '/conf'.
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
# * SYNAPSE_REPORT_STATS: Whether to report stats.
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKERS_CONFIG
@@ -35,6 +44,8 @@
# SYNAPSE_WORKER_TYPES='event_persister, federation_sender, client_reader'
# SYNAPSE_WORKER_TYPES='event_persister:2, federation_sender:2, client_reader'
# SYNAPSE_WORKER_TYPES='stream_writers=account_data+presence+typing'
# * SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK: Whether worker logs should be written to disk,
# in addition to stdout.
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
# will be treated as Application Service registration files.
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
@@ -48,7 +59,9 @@
# * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged,
# regardless of the SYNAPSE_LOG_LEVEL setting.
# * SYNAPSE_LOG_TESTING: if set, Synapse will log additional information useful
# for testing.
# for testing.
# * SYNAPSE_USE_UNIX_SOCKET: if set, workers will communicate via unix socket
# rather than TCP.
#
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
# in the project's README), this script may be run multiple times, and functionality should
@@ -604,7 +617,9 @@ def generate_base_homeserver_config() -> None:
# start.py already does this for us, so just call that.
# note that this script is copied in in the official, monolith dockerfile
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
# This script makes use of the `SYNAPSE_CONFIG_DIR` environment variable to
# determine where to place the generated homeserver config.
def parse_worker_types(
@@ -733,8 +748,10 @@ def parse_worker_types(
def generate_worker_files(
environ: Mapping[str, str],
config_dir: str,
config_path: str,
data_dir: str,
template_dir: str,
requested_worker_types: Dict[str, Set[str]],
) -> None:
"""Read the desired workers(if any) that is passed in and generate shared
@@ -742,9 +759,13 @@ def generate_worker_files(
Args:
environ: os.environ instance.
config_path: The location of the generated Synapse main worker config file.
data_dir: The location of the synapse data directory. Where log and
user-facing config files live.
config_dir: The location of the configuration directory, where generated
worker config files are written to.
config_path: The location of the base Synapse homeserver config file.
data_dir: The location of the synapse data directory. Where logs will be
stored (if `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK` is set).
template_dir: The location of the template directory. Where jinja2
templates for config files live.
requested_worker_types: A Dict containing requested workers in the format of
{'worker_name1': {'worker_type', ...}}
"""
@@ -807,7 +828,8 @@ def generate_worker_files(
nginx_locations: Dict[str, str] = {}
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
workers_config_dir = os.path.join(config_dir, "workers")
os.makedirs(workers_config_dir, exist_ok=True)
# Start worker ports from this arbitrary port
worker_port = 18009
@@ -854,7 +876,7 @@ def generate_worker_files(
worker_config = insert_worker_name_for_worker_config(worker_config, worker_name)
worker_config.update(
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
{"name": worker_name, "port": str(worker_port)}
)
# Update the shared config with any worker_type specific options. The first of a
@@ -877,12 +899,14 @@ def generate_worker_files(
worker_descriptors.append(worker_config)
# Write out the worker's logging config file
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
log_config_filepath = generate_worker_log_config(
environ, worker_name, template_dir, workers_config_dir, data_dir
)
# Then a worker config file
convert(
"/conf/worker.yaml.j2",
f"/conf/workers/{worker_name}.yaml",
os.path.join(template_dir, "worker.yaml.j2"),
os.path.join(workers_config_dir, f"{worker_name}.yaml"),
**worker_config,
worker_log_config_filepath=log_config_filepath,
using_unix_sockets=using_unix_sockets,
@@ -923,7 +947,9 @@ def generate_worker_files(
# Finally, we'll write out the config files.
# log config for the master process
master_log_config = generate_worker_log_config(environ, "master", data_dir)
master_log_config = generate_worker_log_config(
environ, "master", template_dir, workers_config_dir, data_dir
)
shared_config["log_config"] = master_log_config
# Find application service registrations
@@ -954,8 +980,8 @@ def generate_worker_files(
# Shared homeserver config
convert(
"/conf/shared.yaml.j2",
"/conf/workers/shared.yaml",
os.path.join(template_dir, "shared.yaml.j2"),
os.path.join(workers_config_dir, "shared.yaml"),
shared_worker_config=yaml.dump(shared_config),
appservice_registrations=appservice_registrations,
enable_redis=workers_in_use,
@@ -965,7 +991,7 @@ def generate_worker_files(
# Nginx config
convert(
"/conf/nginx.conf.j2",
os.path.join(template_dir, "nginx.conf.j2"),
"/etc/nginx/conf.d/matrix-synapse.conf",
worker_locations=nginx_location_config,
upstream_directives=nginx_upstream_config,
@@ -977,7 +1003,7 @@ def generate_worker_files(
# Supervisord config
os.makedirs("/etc/supervisor", exist_ok=True)
convert(
"/conf/supervisord.conf.j2",
os.path.join(template_dir, "supervisord.conf.j2"),
"/etc/supervisor/supervisord.conf",
main_config_path=config_path,
enable_redis=workers_in_use,
@@ -985,7 +1011,7 @@ def generate_worker_files(
)
convert(
"/conf/synapse.supervisord.conf.j2",
os.path.join(template_dir, "synapse.supervisord.conf.j2"),
"/etc/supervisor/conf.d/synapse.conf",
workers=worker_descriptors,
main_config_path=config_path,
@@ -994,7 +1020,7 @@ def generate_worker_files(
# healthcheck config
convert(
"/conf/healthcheck.sh.j2",
os.path.join(template_dir, "healthcheck.sh.j2"),
"/healthcheck.sh",
healthcheck_urls=healthcheck_urls,
)
@@ -1006,10 +1032,24 @@ def generate_worker_files(
def generate_worker_log_config(
environ: Mapping[str, str], worker_name: str, data_dir: str
environ: Mapping[str, str],
worker_name: str,
workers_config_dir: str,
template_dir: str,
data_dir: str,
) -> str:
"""Generate a log.config file for the given worker.
Args:
environ: A mapping representing the environment variables that this script
is running with.
worker_name: The name of the worker. Used in generated file paths.
workers_config_dir: The location of the worker configuration directory,
where the generated worker log config will be saved.
template_dir: The directory containing jinja2 template files.
data_dir: The directory where log files will be written (if
`SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK` is set).
Returns: the path to the generated file
"""
# Check whether we should write worker logs to disk, in addition to the console
@@ -1024,9 +1064,9 @@ def generate_worker_log_config(
extra_log_template_args["SYNAPSE_LOG_TESTING"] = environ.get("SYNAPSE_LOG_TESTING")
# Render and write the file
log_config_filepath = f"/conf/workers/{worker_name}.log.config"
log_config_filepath = os.path.join(workers_config_dir, f"{worker_name}.log.config")
convert(
"/conf/log.config",
os.path.join(template_dir, "log.config"),
log_config_filepath,
worker_name=worker_name,
**extra_log_template_args,
@@ -1049,6 +1089,7 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
template_dir = environ.get("SYNAPSE_CONFIG_TEMPLATE_DIR", "/conf")
# override SYNAPSE_NO_TLS, we don't support TLS in worker mode,
# this needs to be handled by a frontend proxy
@@ -1060,9 +1101,10 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
generate_base_homeserver_config()
else:
log("Base homeserver config exists—not regenerating")
# This script may be run multiple times (mostly by Complement, see note at top of
# file). Don't re-configure workers in this instance.
mark_filepath = "/conf/workers_have_been_configured"
mark_filepath = os.path.join(config_dir, "workers_have_been_configured")
if not os.path.exists(mark_filepath):
# Collect and validate worker_type requests
# Read the desired worker configuration from the environment
@@ -1079,7 +1121,9 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
# Always regenerate all other config files
log("Generating worker config files")
generate_worker_files(environ, config_path, data_dir, requested_worker_types)
generate_worker_files(
environ, config_dir, config_path, data_dir, template_dir, requested_worker_types
)
# Mark workers as being configured
with open(mark_filepath, "w") as f:

View File

@@ -42,6 +42,8 @@ def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
def generate_config_from_template(
data_dir: str,
template_dir: str,
config_dir: str,
config_path: str,
os_environ: Mapping[str, str],
@@ -50,6 +52,9 @@ def generate_config_from_template(
"""Generate a homeserver.yaml from environment variables
Args:
data_dir: where persistent data is stored
template_dir: The location of the template directory. Where jinja2
templates for config files live.
config_dir: where to put generated config files
config_path: where to put the main config file
os_environ: environment mapping
@@ -70,9 +75,10 @@ def generate_config_from_template(
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY",
}
synapse_server_name = environ["SYNAPSE_SERVER_NAME"]
for name, secret in secrets.items():
if secret not in environ:
filename = "/data/%s.%s.key" % (environ["SYNAPSE_SERVER_NAME"], name)
filename = os.path.join(data_dir, f"{synapse_server_name}.{name}.key")
# if the file already exists, load in the existing value; otherwise,
# generate a new secret and write it to a file
@@ -88,7 +94,7 @@ def generate_config_from_template(
handle.write(value)
environ[secret] = value
environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml")
environ["SYNAPSE_APPSERVICES"] = glob.glob(os.path.join(data_dir, "appservices", "*.yaml"))
if not os.path.exists(config_dir):
os.mkdir(config_dir)
@@ -111,12 +117,12 @@ def generate_config_from_template(
environ["SYNAPSE_LOG_CONFIG"] = config_dir + "/log.config"
log("Generating synapse config file " + config_path)
convert("/conf/homeserver.yaml", config_path, environ)
convert(os.path.join(template_dir, "homeserver.yaml"), config_path, environ)
log_config_file = environ["SYNAPSE_LOG_CONFIG"]
log("Generating log config file " + log_config_file)
convert(
"/conf/log.config",
os.path.join(template_dir, "log.config"),
log_config_file,
{**environ, "include_worker_name_in_log_line": False},
)
@@ -128,15 +134,15 @@ def generate_config_from_template(
"synapse.app.homeserver",
"--config-path",
config_path,
# tell synapse to put generated keys in /data rather than /compiled
# tell synapse to put generated keys in the data directory rather than /compiled
"--keys-directory",
config_dir,
"--generate-keys",
]
if ownership is not None:
log(f"Setting ownership on /data to {ownership}")
subprocess.run(["chown", "-R", ownership, "/data"], check=True)
log(f"Setting ownership on the data dir to {ownership}")
subprocess.run(["chown", "-R", ownership, data_dir], check=True)
args = ["gosu", ownership] + args
subprocess.run(args, check=True)
@@ -159,12 +165,13 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
template_dir = environ.get("SYNAPSE_CONFIG_TEMPLATE_DIR", "/conf")
# create a suitable log config from our template
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
if not os.path.exists(log_config_file):
log("Creating log config %s" % (log_config_file,))
convert("/conf/log.config", log_config_file, environ)
convert(os.path.join(template_dir, "log.config"), log_config_file, environ)
# generate the main config file, and a signing key.
args = [
@@ -216,12 +223,14 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
if mode == "migrate_config":
# generate a config based on environment vars.
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get(
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
)
template_dir = environ.get("SYNAPSE_CONFIG_TEMPLATE_DIR", "/conf")
return generate_config_from_template(
config_dir, config_path, environ, ownership
data_dir, template_dir, config_dir, config_path, environ, ownership
)
if mode != "run":

View File

@@ -54,7 +54,6 @@
- [Using `synctl` with Workers](synctl_workers.md)
- [Systemd](systemd-with-workers/README.md)
- [Administration](usage/administration/README.md)
- [Backups](usage/administration/backups.md)
- [Admin API](usage/administration/admin_api/README.md)
- [Account Validity](admin_api/account_validity.md)
- [Background Updates](usage/administration/admin_api/background_updates.md)

View File

@@ -60,11 +60,10 @@ paginate through.
anything other than the return value of `next_token` from a previous call. Defaults to `0`.
* `dir`: string - Direction of event report order. Whether to fetch the most recent
first (`b`) or the oldest first (`f`). Defaults to `b`.
* `user_id`: optional string - Filter by the user ID of the reporter. This is the user who reported the event
and wrote the reason.
* `room_id`: optional string - Filter by room id.
* `event_sender_user_id`: optional string - Filter by the sender of the reported event. This is the user who
the report was made against.
* `user_id`: string - Is optional and filters to only return users with user IDs that
contain this value. This is the user who reported the event and wrote the reason.
* `room_id`: string - Is optional and filters to only return rooms with room IDs that
contain this value.
**Response**

View File

@@ -5,7 +5,6 @@ basis. The currently supported features are:
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
for another client
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
- [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222): adding `state_after` to sync v2
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).

View File

@@ -385,13 +385,6 @@ The API is:
GET /_synapse/admin/v1/rooms/<room_id>/state
```
**Parameters**
The following query parameter is available:
* `type` - The type of room state event to filter by, eg "m.room.create". If provided, only state events
of this type will be returned (regardless of their `state_key` value).
A response body like the following is returned:
```json

View File

@@ -40,7 +40,6 @@ It returns a JSON body like the following:
"erased": false,
"shadow_banned": 0,
"creation_ts": 1560432506,
"last_seen_ts": 1732919539393,
"appservice_id": null,
"consent_server_notice_sent": null,
"consent_version": null,
@@ -56,8 +55,7 @@ It returns a JSON body like the following:
}
],
"user_type": null,
"locked": false,
"suspended": false
"locked": false
}
```
@@ -478,9 +476,9 @@ with a body of:
}
```
## List joined rooms of a user
## List room memberships of a user
Gets a list of all `room_id` that a specific `user_id` is joined to and is a member of (participating in).
Gets a list of all `room_id` that a specific `user_id` is member.
The API is:
@@ -517,73 +515,6 @@ The following fields are returned in the JSON response body:
- `joined_rooms` - An array of `room_id`.
- `total` - Number of rooms.
## Get the number of invites sent by the user
Fetches the number of invites sent by the provided user ID across all rooms
after the given timestamp.
```
GET /_synapse/admin/v1/users/$user_id/sent_invite_count
```
**Parameters**
The following parameters should be set in the URL:
* `user_id`: fully qualified: for example, `@user:server.com`
The following should be set as query parameters in the URL:
* `from_ts`: int, required. A timestamp in ms from the unix epoch. Only
invites sent at or after the provided timestamp will be returned.
This works by comparing the provided timestamp to the `received_ts`
column in the `events` table.
Note: https://currentmillis.com/ is a useful tool for converting dates
into timestamps and vice versa.
A response body like the following is returned:
```json
{
"invite_count": 30
}
```
_Added in Synapse 1.122.0_
## Get the cumulative number of rooms a user has joined after a given timestamp
Fetches the number of rooms that the user joined after the given timestamp, even
if they have subsequently left/been banned from those rooms.
```
GET /_synapse/admin/v1/users/$<user_id/cumulative_joined_room_count
```
**Parameters**
The following parameters should be set in the URL:
* `user_id`: fully qualified: for example, `@user:server.com`
The following should be set as query parameters in the URL:
* `from_ts`: int, required. A timestamp in ms from the unix epoch. Only
invites sent at or after the provided timestamp will be returned.
This works by comparing the provided timestamp to the `received_ts`
column in the `events` table.
Note: https://currentmillis.com/ is a useful tool for converting dates
into timestamps and vice versa.
A response body like the following is returned:
```json
{
"cumulative_joined_room_count": 30
}
```
_Added in Synapse 1.122.0_
## Account Data
Gets information about account data for a specific `user_id`.
@@ -1434,9 +1365,6 @@ _Added in Synapse 1.72.0._
## Redact all the events of a user
This endpoint allows an admin to redact the events of a given user. There are no restrictions on redactions for a
local user. By default, we puppet the user who sent the message to redact it themselves. Redactions for non-local users are issued using the admin user, and will fail in rooms where the admin user is not admin/does not have the specified power level to issue redactions.
The API is
```
POST /_synapse/admin/v1/user/$user_id/redact
@@ -1512,6 +1440,4 @@ The following fields are returned in the JSON response body:
- `failed_redactions` - dictionary - the keys of the dict are event ids the process was unable to redact, if any, and the values are
the corresponding error that caused the redaction to fail
_Added in Synapse 1.116.0._
_Added in Synapse 1.116.0._

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common
configuration:
```sh
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:bullseye
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
```
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)

View File

@@ -76,9 +76,8 @@ _Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_a
async def user_may_invite(inviter: str, invitee: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
```
Called when processing an invitation, both when one is created locally or when
receiving an invite over federation. Both inviter and invitee are represented by
their Matrix user ID (e.g. `@alice:example.com`).
Called when processing an invitation. Both inviter and invitee are
represented by their Matrix user ID (e.g. `@alice:example.com`).
The callback must return one of:
@@ -113,9 +112,7 @@ async def user_may_send_3pid_invite(
```
Called when processing an invitation using a third-party identifier (also called a 3PID,
e.g. an email address or a phone number). It is only called when a 3PID invite is created
locally - not when one is received in a room over federation. If the 3PID is already associated
with a Matrix ID, the spam check will go through the `user_may_invite` callback instead.
e.g. an email address or a phone number).
The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the
invitee is represented by its medium (e.g. "email") and its address
@@ -245,7 +242,7 @@ this callback.
_First introduced in Synapse v1.37.0_
```python
async def check_username_for_spam(user_profile: synapse.module_api.UserProfile, requester_id: str) -> bool
async def check_username_for_spam(user_profile: synapse.module_api.UserProfile) -> bool
```
Called when computing search results in the user directory. The module must return a
@@ -264,8 +261,6 @@ The profile is represented as a dictionary with the following keys:
The module is given a copy of the original dictionary, so modifying it from within the
module cannot modify a user's profile when included in user directory search results.
The requester_id parameter is the ID of the user that called the user directory API.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `False`, Synapse falls through to the next one. The value of the first
callback that does not return `False` will be used. If this happens, Synapse will not call

View File

@@ -336,36 +336,6 @@ but it has a `response_types_supported` which excludes "code" (which we rely on,
is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)),
so we have to disable discovery and configure the URIs manually.
### Forgejo
Forgejo is a fork of Gitea that can act as an OAuth2 provider.
The implementation of OAuth2 is improved compared to Gitea, as it provides a correctly defined `subject_claim` and `scopes`.
Synapse config:
```yaml
oidc_providers:
- idp_id: forgejo
idp_name: Forgejo
discover: false
issuer: "https://your-forgejo.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: client_secret_post
scopes: ["openid", "profile", "email", "groups"]
authorization_endpoint: "https://your-forgejo.com/login/oauth/authorize"
token_endpoint: "https://your-forgejo.com/login/oauth/access_token"
userinfo_endpoint: "https://your-forgejo.com/api/v1/user"
user_mapping_provider:
config:
subject_claim: "sub"
picture_claim: "picture"
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}"
```
### GitHub
[GitHub][github-idp] is a bit special as it is not an OpenID Connect compliant provider, but

View File

@@ -100,10 +100,6 @@ database:
keepalives_count: 3
```
## Backups
Don't forget to [back up](./usage/administration/backups.md#database) your database!
## Tuning Postgres
The default settings should be fine for most deployments. For larger

View File

@@ -74,7 +74,7 @@ server {
proxy_pass http://localhost:8008;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host:$server_port;
proxy_set_header Host $host;
# Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml

View File

@@ -157,7 +157,7 @@ sudo pip install py-bcrypt
#### Alpine Linux
Jahway603 maintains [Synapse packages for Alpine Linux](https://pkgs.alpinelinux.org/packages?name=synapse&branch=edge) in the community repository. Install with:
6543 maintains [Synapse packages for Alpine Linux](https://pkgs.alpinelinux.org/packages?name=synapse&branch=edge) in the community repository. Install with:
```sh
sudo apk add synapse
@@ -208,7 +208,7 @@ When following this route please make sure that the [Platform-specific prerequis
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
- Python 3.9 or later, up to Python 3.13.
- Python 3.8 or later, up to Python 3.11.
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
If building on an uncommon architecture for which pre-built wheels are
@@ -656,10 +656,6 @@ This also requires the optional `lxml` python dependency to be installed. This
in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
means `apt-get install libxml2-dev`, or equivalent for your OS.
### Backups
Don't forget to take [backups](../usage/administration/backups.md) of your new server!
### Troubleshooting Installation
`pip` seems to leak *lots* of memory during installation. For instance, a Linux

View File

@@ -72,8 +72,8 @@ class ExampleSpamChecker:
async def user_may_publish_room(self, userid, room_id):
return True # allow publishing of all rooms
async def check_username_for_spam(self, user_profile, requester_id):
return False # allow all usernames regardless of requester
async def check_username_for_spam(self, user_profile):
return False # allow all usernames
async def check_registration_for_spam(
self,

View File

@@ -117,59 +117,6 @@ each upgrade are complete before moving on to the next upgrade, to avoid
stacking them up. You can monitor the currently running background updates with
[the Admin API](usage/administration/admin_api/background_updates.html#status).
# Upgrading to v1.122.0
## Dropping support for PostgreSQL 11 and 12
In line with our [deprecation policy](deprecation_policy.md), we've dropped
support for PostgreSQL 11 and 12, as they are no longer supported upstream.
This release of Synapse requires PostgreSQL 13+.
# Upgrading to v1.120.0
## Removal of experimental MSC3886 feature
[MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886)
has been closed (and will not enter the Matrix spec). As such, we are
removing the experimental support for it in this release.
The `experimental_features.msc3886_endpoint` configuration option has
been removed.
## Authenticated media is now enforced by default
The [`enable_authenticated_media`] configuration option now defaults to true.
This means that clients and remote (federated) homeservers now need to use
the authenticated media endpoints in order to download media from your
homeserver.
As an exception, existing media that was stored on the server prior to
this option changing to `true` will still be accessible over the
unauthenticated endpoints.
The matrix.org homeserver has already been running with this option enabled
since September 2024, so most common clients and homeservers should already
be compatible.
With that said, administrators who wish to disable this feature for broader
compatibility can still do so by manually configuring
`enable_authenticated_media: False`.
[`enable_authenticated_media`]: usage/configuration/config_documentation.md#enable_authenticated_media
# Upgrading to v1.119.0
## Minimum supported Python version
The minimum supported Python version has been increased from v3.8 to v3.9.
You will need Python 3.9+ to run Synapse v1.119.0 (due out Nov 7th, 2024).
If you use current versions of the Matrix.org-distributed Docker images, no action is required.
Please note that support for Ubuntu `focal` was dropped as well since it uses Python 3.8.
# Upgrading to v1.111.0
## New worker endpoints for authenticated client and federation media

View File

@@ -255,8 +255,6 @@ line to `/etc/default/matrix-synapse`:
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2
*Note*: You may need to set `PYTHONMALLOC=malloc` to ensure that `jemalloc` can accurately calculate memory usage. By default, Python uses its internal small-object allocator, which may interfere with jemalloc's ability to track memory consumption correctly. This could prevent the [cache_autotuning](../configuration/config_documentation.md#caches-and-associated-values) feature from functioning as expected, as the Python allocator may not reach the memory threshold set by `max_cache_memory_usage`, thus not triggering the cache eviction process.
This made a significant difference on Python 2.7 - it's unclear how
much of an improvement it provides on Python 3.x.

View File

@@ -1,125 +0,0 @@
# How to back up a Synapse homeserver
It is critical to maintain good backups of your server, to guard against
hardware failure as well as potential corruption due to bugs or administrator
error.
This page documents the things you will need to consider backing up as part of
a Synapse installation.
## Configuration files
Keep a copy of your configuration file (`homeserver.yaml`), as well as any
auxiliary config files it refers to such as the
[`log_config`](../configuration/config_documentation.md#log_config) file,
[`app_service_config_files`](../configuration/config_documentation.md#app_service_config_files).
Often, all such config files will be kept in a single directory such as
`/etc/synapse`, which will make this easier.
## Server signing key
Your server has a [signing
key](../configuration/config_documentation.md#signing_key_path) which it uses
to sign events and outgoing federation requests. It is easiest to back it up
with your configuration files, but an alternative is to have Synapse create a
new signing key if you have to restore.
If you do decide to replace the signing key, you should add the old *public*
key to
[`old_signing_keys`](../configuration/config_documentation.md#old_signing_keys).
## Database
Synapse's support for SQLite is only suitable for testing purposes, so for the
purposes of this document, we'll assume you are using
[PostgreSQL](../../postgres.md).
A full discussion of backup strategies for PostgreSQL is out of scope for this
document; see the [PostgreSQL
documentation](https://www.postgresql.org/docs/current/backup.html) for
detailed information.
### Synapse-specfic details
* Be very careful not to restore into a database that already has tables
present. At best, this will error; at worst, it will lead to subtle database
inconsistencies.
* The `e2e_one_time_keys_json` table should **not** be backed up, or if it is
backed up, should be
[`TRUNCATE`d](https://www.postgresql.org/docs/current/sql-truncate.html)
after restoring the database before Synapse is started.
[Background: restoring the database to an older backup can cause
used one-time-keys to be re-issued, causing subsequent [message decryption
errors](https://github.com/element-hq/element-meta/issues/2155). Clearing
all one-time-keys from the database ensures that this cannot happen, and
will prompt clients to generate and upload new one-time-keys.]
### Quick and easy database backup and restore
Typically, the easiest solution is to use `pg_dump` to take a copy of the whole
database. We recommend `pg_dump`'s custom dump format, as it produces
significantly smaller backup files.
```shell
sudo -u postgres pg_dump -Fc --exclude-table-data e2e_one_time_keys_json synapse > synapse.dump
```
There is no need to stop Postgres or Synapse while `pg_dump` is running: it
will take a consistent snapshot of the databse.
To restore, you will need to recreate the database as described in [Using
Postgres](../../postgres.md#set-up-database),
then load the dump into it with `pg_restore`:
```shell
sudo -u postgres createdb --encoding=UTF8 --locale=C --template=template0 --owner=synapse_user synapse
sudo -u postgres pg_restore -d synapse < synapse.dump
```
(If you forgot to exclude `e2e_one_time_keys_json` during `pg_dump`, remember
to connect to the new database and `TRUNCATE e2e_one_time_keys_json;` before
starting Synapse.)
To reiterate: do **not** restore a dump over an existing database.
Again, if you plan to run your homeserver at any sort of production level, we
recommend studying the PostgreSQL documentation on backup options.
## Media store
Synapse keeps a copy of media uploaded by users, including avatars and message
attachments, in its [Media
store](../configuration/config_documentation.md#media-store).
It is a directory on the local disk, containing the following directories:
* `local_content`: this is content uploaded by your local users. As a general
rule, you should back this up: it may represent the only copy of those
media files anywhere in the federation, and if they are lost, users will
see errors when viewing user or room avatars, and messages with attachments.
* `local_thumbnails`: "thumbnails" of images uploaded by your users. If
[`dynamic_thumbnails`](../configuration/config_documentation.md#dynamic_thumbnails)
is enabled, these will be regenerated if they are removed from the disk, and
there is therefore no need to back them up.
If `dynamic_thumbnails` is *not* enabled (the default): although this can
theoretically be regenerated from `local_content`, there is no tooling to do
so. We recommend that these are backed up too.
* `remote_content`: this is a cache of content that was uploaded by a user on
another server, and has since been requested by a user on your own server.
Typically there is no need to back up this directory: if a file in this directory
is removed, Synapse will attempt to fetch it again from the remote
server.
* `remote_thumbnails`: thumbnails of images uploaded by users on other
servers. As with `remote_content`, there is normally no need to back this
up.
* `url_cache`, `url_cache_thumbnails`: temporary caches of files downloaded
by the [URL previews](../../setup/installation.md#url-previews) feature.
These do not need to be backed up.

View File

@@ -673,9 +673,8 @@ This setting has the following sub-options:
TLS via STARTTLS *if the SMTP server supports it*. If this option is set,
Synapse will refuse to connect unless the server supports STARTTLS.
* `enable_tls`: By default, if the server supports TLS, it will be used, and the server
must present a certificate that is valid for `tlsname`. If this option
must present a certificate that is valid for 'smtp_host'. If this option
is set to false, TLS will not be used.
* `tlsname`: The domain name the SMTP server's TLS certificate must be valid for, defaulting to `smtp_host`.
* `notif_from`: defines the "From" address to use when sending emails.
It must be set if email sending is enabled. The placeholder '%(app)s' will be replaced by the application name,
which is normally set in `app_name`, but may be overridden by the
@@ -742,7 +741,6 @@ email:
force_tls: true
require_transport_security: true
enable_tls: false
tlsname: mail.server.example.com
notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
app_name: my_branded_matrix_server
enable_notifs: true
@@ -1436,7 +1434,7 @@ number of entries that can be stored.
Please see the [Config Conventions](#config-conventions) for information on how to specify memory size and cache expiry
durations.
* `max_cache_memory_usage` sets a ceiling on how much memory the cache can use before caches begin to be continuously evicted.
They will continue to be evicted until the memory usage drops below the `target_cache_memory_usage`, set in
They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in
the setting below, or until the `min_cache_ttl` is hit. There is no default value for this option.
* `target_cache_memory_usage` sets a rough target for the desired memory usage of the caches. There is no default value
for this option.
@@ -1868,27 +1866,6 @@ rc_federation:
concurrent: 5
```
---
### `rc_presence`
This option sets ratelimiting for presence.
The `rc_presence.per_user` option sets rate limits on how often a specific
users' presence updates are evaluated. Ratelimited presence updates sent via sync are
ignored, and no error is returned to the client.
This option also sets the rate limit for the
[`PUT /_matrix/client/v3/presence/{userId}/status`](https://spec.matrix.org/latest/client-server-api/#put_matrixclientv3presenceuseridstatus)
endpoint.
`per_user` defaults to `per_second: 0.1`, `burst_count: 1`.
Example configuration:
```yaml
rc_presence:
per_user:
per_second: 0.05
burst_count: 0.5
```
---
### `federation_rr_transactions_per_room_per_second`
Sets outgoing federation transaction frequency for sending read-receipts,
@@ -1910,33 +1887,12 @@ Config options related to Synapse's media store.
When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy
unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to true (previously false). In a future release of Synapse, this option will be removed and become always-on.
In all cases, authenticated requests to download media will succeed, but for unauthenticated requests, this
case-by-case breakdown describes whether media downloads are permitted:
* `enable_authenticated_media = False`:
* unauthenticated client or homeserver requesting local media: allowed
* unauthenticated client or homeserver requesting remote media: allowed as long as the media is in the cache,
or as long as the remote homeserver does not require authentication to retrieve the media
* `enable_authenticated_media = True`:
* unauthenticated client or homeserver requesting local media:
allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist);
otherwise denied.
* unauthenticated client or homeserver requesting remote media: the same as for local media;
allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist);
otherwise denied.
It is especially notable that media downloaded before this option existed (in older Synapse versions), or whilst this option was set to `False`,
will perpetually be available over the legacy, unauthenticated endpoint, even after this option is set to `True`.
This is for backwards compatibility with older clients and homeservers that do not yet support requesting authenticated media;
those older clients or homeservers will not be cut off from media they can already see.
_Changed in Synapse 1.120:_ This option now defaults to `True` when not set, whereas before this version it defaulted to `False`.
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but
this will change to true in a future Synapse release.
Example configuration:
```yaml
enable_authenticated_media: false
enable_authenticated_media: true
```
---
### `enable_media_repo`
@@ -3114,22 +3070,6 @@ Example configuration:
```yaml
macaroon_secret_key: <PRIVATE STRING>
```
---
### `macaroon_secret_key_path`
An alternative to [`macaroon_secret_key`](#macaroon_secret_key):
allows the secret key to be specified in an external file.
The file should be a plain text file, containing only the secret key.
Synapse reads the secret key from the given file once at startup.
Example configuration:
```yaml
macaroon_secret_key_path: /path/to/secrets/file
```
_Added in Synapse 1.121.0._
---
### `form_secret`
@@ -3168,15 +3108,6 @@ it was last used.
It is possible to build an entry from an old `signing.key` file using the
`export_signing_key` script which is provided with synapse.
If you have lost the private key file, you can ask another server you trust to
tell you the public keys it has seen from your server. To fetch the keys from
`matrix.org`, try something like:
```
curl https://matrix-federation.matrix.org/_matrix/key/v2/query/myserver.example.com |
jq '.server_keys | map(.verify_keys) | add'
```
Example configuration:
```yaml
old_signing_keys:
@@ -3791,8 +3722,6 @@ Additional sub-options for this setting include:
Required if `enabled` is set to true.
* `subject_claim`: Name of the claim containing a unique identifier for the user.
Optional, defaults to `sub`.
* `display_name_claim`: Name of the claim containing the display name for the user. Optional.
If provided, the display name will be set to the value of this claim upon first login.
* `issuer`: The issuer to validate the "iss" claim against. Optional. If provided the
"iss" claim will be required and validated for all JSON web tokens.
* `audiences`: A list of audiences to validate the "aud" claim against. Optional.
@@ -3807,7 +3736,6 @@ jwt_config:
secret: "provided-by-your-issuer"
algorithm: "provided-by-your-issuer"
subject_claim: "name_of_claim"
display_name_claim: "name_of_claim"
issuer: "provided-by-your-issuer"
audiences:
- "provided-by-your-issuer"
@@ -4442,12 +4370,6 @@ a `federation_sender_instances` map. Doing so will remove handling of this funct
the main process. Multiple workers can be added to this map, in which case the work is
balanced across them.
The way that the load balancing works is any outbound federation request will be assigned
to a federation sender worker based on the hash of the destination server name. This
means that all requests being sent to the same destination will be processed by the same
worker instance. Multiple `federation_sender_instances` are useful if there is a federation
with multiple servers.
This configuration setting must be shared between all workers handling federation
sending, and if changed all federation sender workers must be stopped at the same time
and then started, to ensure that all instances are running with the same config (otherwise
@@ -4486,10 +4408,6 @@ instance_map:
worker1:
host: localhost
port: 8034
other:
host: localhost
port: 8035
tls: true
```
Example configuration(#2, for UNIX sockets):
```yaml
@@ -4600,9 +4518,6 @@ This setting has the following sub-options:
* `path`: The full path to a local Unix socket file. **If this is used, `host` and
`port` are ignored.** Defaults to `/tmp/redis.sock'
* `password`: Optional password if configured on the Redis instance.
* `password_path`: Alternative to `password`, reading the password from an
external file. The file should be a plain text file, containing only the
password. Synapse reads the password from the given file once at startup.
* `dbid`: Optional redis dbid if needs to connect to specific redis logical db.
* `use_tls`: Whether to use tls connection. Defaults to false.
* `certificate_file`: Optional path to the certificate file
@@ -4616,16 +4531,13 @@ This setting has the following sub-options:
_Changed in Synapse 1.85.0: Added path option to use a local Unix socket_
_Changed in Synapse 1.116.0: Added password\_path_
Example configuration:
```yaml
redis:
enabled: true
host: localhost
port: 6379
password_path: <path_to_the_password_file>
# OR password: <secret_password>
password: <secret_password>
dbid: <dbid>
#use_tls: True
#certificate_file: <path_to_the_certificate_file>
@@ -4803,7 +4715,7 @@ This setting has the following sub-options:
* `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only
for direct messages. Defaults to false.
* `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false.
* `worker_to_run_on`: Which worker to run this module on. This must match
* `worker_to_run_on`: Which worker to run this module on. This must match
the "worker_name". If not set or `null`, invites will be accepted on the
main process.

View File

@@ -177,11 +177,11 @@ The following applies to Synapse installations that have been installed from sou
You can start the main Synapse process with Poetry by running the following command:
```console
poetry run synapse_homeserver --config-path [your homeserver.yaml]
poetry run synapse_homeserver --config-file [your homeserver.yaml]
```
For worker setups, you can run the following command
```console
poetry run synapse_worker --config-path [your homeserver.yaml] --config-path [your worker.yaml]
poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml]
```
## Available worker applications
@@ -273,6 +273,17 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/knock/
^/_matrix/client/(api/v1|r0|v3|unstable)/profile/
# Account data requests
^/_matrix/client/(r0|v3|unstable)/.*/tags
^/_matrix/client/(r0|v3|unstable)/.*/account_data
# Receipts requests
^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt
^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers
# Presence requests
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
# User directory search requests
^/_matrix/client/(r0|v3|unstable)/user_directory/search$
@@ -281,13 +292,6 @@ Additionally, the following REST endpoints can be handled for GET requests:
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events
# Account data requests
^/_matrix/client/(r0|v3|unstable)/.*/tags
^/_matrix/client/(r0|v3|unstable)/.*/account_data
# Presence requests
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
Pagination requests can also be handled, but all requests for a given
room must be routed to the same instance. Additionally, care must be taken to
ensure that the purge history admin API is not used while pagination requests

56
flake.lock generated
View File

@@ -56,6 +56,24 @@
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
@@ -168,27 +186,27 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1729265718,
"narHash": "sha256-4HQI+6LsO3kpWTYuVGIzhJs1cetFcwT7quWCk/6rqeo=",
"lastModified": 1690535733,
"narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ccc0c2126893dd20963580b6478d1a10a4512185",
"rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"ref": "master",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1728538411,
"narHash": "sha256-f0SBJz1eZ2yOuKUr5CA9BHULGXVSn6miBuUWdTyhUhU=",
"lastModified": 1681358109,
"narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b69de56fac8c2b6f8fd27f2eca01dcda8e0a4221",
"rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9",
"type": "github"
},
"original": {
@@ -231,19 +249,20 @@
"devenv": "devenv",
"nixpkgs": "nixpkgs_2",
"rust-overlay": "rust-overlay",
"systems": "systems_2"
"systems": "systems_3"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs_3"
},
"locked": {
"lastModified": 1731897198,
"narHash": "sha256-Ou7vLETSKwmE/HRQz4cImXXJBr/k9gp4J4z/PF8LzTE=",
"lastModified": 1693966243,
"narHash": "sha256-a2CA1aMIPE67JWSVIGoGtD3EGlFdK9+OlJQs0FOWCKY=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "0be641045af6d8666c11c2c40e45ffc9667839b5",
"rev": "a8b4bb4cbb744baaabc3e69099f352f99164e2c1",
"type": "github"
},
"original": {
@@ -281,6 +300,21 @@
"repo": "default",
"type": "github"
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",

View File

@@ -3,13 +3,13 @@
# (https://github.com/matrix-org/complement) Matrix homeserver test suites are also
# installed automatically.
#
# You must have already installed Nix (https://nixos.org/download/) on your system to use this.
# Nix can be installed on any Linux distribiution or MacOS; NixOS is not required.
# Windows is not directly supported, but Nix can be installed inside of WSL2 or even Docker
# You must have already installed Nix (https://nixos.org) on your system to use this.
# Nix can be installed on Linux or MacOS; NixOS is not required. Windows is not
# directly supported, but Nix can be installed inside of WSL2 or even Docker
# containers. Please refer to https://nixos.org/download for details.
#
# You must also enable support for flakes in Nix. See the following for how to
# do so permanently: https://wiki.nixos.org/wiki/Flakes#Other_Distros,_without_Home-Manager
# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes
#
# Be warned: you'll need over 3.75 GB of free space to download all the dependencies.
#
@@ -20,7 +20,7 @@
# locally from "services", such as PostgreSQL and Redis.
#
# You should now be dropped into a new shell with all programs and dependencies
# available to you!
# availabile to you!
#
# You can start up pre-configured local Synapse, PostgreSQL and Redis instances by
# running: `devenv up`. To stop them, use Ctrl-C.
@@ -39,9 +39,9 @@
{
inputs = {
# Use the rolling/unstable branch of nixpkgs. Used to fetch the latest
# Use the master/unstable branch of nixpkgs. Used to fetch the latest
# available versions of packages.
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
nixpkgs.url = "github:NixOS/nixpkgs/master";
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
systems.url = "github:nix-systems/default";
# A development environment manager built on Nix. See https://devenv.sh.
@@ -50,7 +50,7 @@
rust-overlay.url = "github:oxalica/rust-overlay";
};
outputs = { nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
let
forEachSystem = nixpkgs.lib.genAttrs (import systems);
in {
@@ -82,7 +82,7 @@
#
# NOTE: We currently need to set the Rust version unnecessarily high
# in order to work around https://github.com/matrix-org/synapse/issues/15939
(rust-bin.stable."1.82.0".default.override {
(rust-bin.stable."1.71.1".default.override {
# Additionally install the "rust-src" extension to allow diving into the
# Rust source code in an IDE (rust-analyzer will also make use of it).
extensions = [ "rust-src" ];
@@ -126,7 +126,7 @@
# Automatically activate the poetry virtualenv upon entering the shell.
languages.python.poetry.activate.enable = true;
# Install all extra Python dependencies; this is needed to run the unit
# tests and utilise all Synapse features.
# tests and utilitise all Synapse features.
languages.python.poetry.install.arguments = ["--extras all"];
# Install the 'matrix-synapse' package from the local checkout.
languages.python.poetry.install.installRootPackage = true;
@@ -163,8 +163,8 @@
# Create a postgres user called 'synapse_user' which has ownership
# over the 'synapse' database.
services.postgres.initialScript = ''
CREATE USER synapse_user;
ALTER DATABASE synapse OWNER TO synapse_user;
CREATE USER synapse_user;
ALTER DATABASE synapse OWNER TO synapse_user;
'';
# Redis is needed in order to run Synapse in worker mode.
@@ -205,7 +205,7 @@
# corresponding Nix packages on https://search.nixos.org/packages.
#
# This was done until `./install-deps.pl --dryrun` produced no output.
env.PERL5LIB = "${with pkgs.perl538Packages; makePerlPath [
env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [
DBI
ClassMethodModifiers
CryptEd25519

View File

@@ -26,7 +26,7 @@ strict_equality = True
# Run mypy type checking with the minimum supported Python version to catch new usage
# that isn't backwards-compatible (types, overloads, etc).
python_version = 3.9
python_version = 3.8
files =
docker/,

998
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -36,7 +36,7 @@
[tool.ruff]
line-length = 88
target-version = "py39"
target-version = "py38"
[tool.ruff.lint]
# See https://beta.ruff.rs/docs/rules/#error-e
@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
version = "1.124.0"
version = "1.116.0rc2"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later"
@@ -155,7 +155,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.poetry.dependencies]
python = "^3.9.0"
python = "^3.8.0"
# Mandatory Dependencies
# ----------------------
@@ -178,7 +178,7 @@ Twisted = {extras = ["tls"], version = ">=18.9.0"}
treq = ">=15.1"
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
pyOpenSSL = ">=16.0.0"
PyYAML = ">=5.3"
PyYAML = ">=3.13"
pyasn1 = ">=0.1.9"
pyasn1-modules = ">=0.0.7"
bcrypt = ">=3.1.7"
@@ -241,7 +241,7 @@ authlib = { version = ">=0.15.1", optional = true }
# `contrib/systemd/log_config.yaml`.
# Note: systemd-python 231 appears to have been yanked from pypi
systemd-python = { version = ">=231", optional = true }
lxml = { version = ">=4.5.2", optional = true }
lxml = { version = ">=4.2.0", optional = true }
sentry-sdk = { version = ">=0.7.2", optional = true }
opentracing = { version = ">=2.2.0", optional = true }
jaeger-client = { version = ">=4.0.0", optional = true }
@@ -320,7 +320,7 @@ all = [
# failing on new releases. Keeping lower bounds loose here means that dependabot
# can bump versions without having to update the content-hash in the lockfile.
# This helps prevents merge conflicts when running a batch of dependabot updates.
ruff = "0.7.3"
ruff = "0.6.7"
# Type checking only works with the pydantic.v1 compat module from pydantic v2
pydantic = "^2"
@@ -370,7 +370,7 @@ tomli = ">=1.2.3"
# runtime errors caused by build system changes.
# We are happy to raise these upper bounds upon request,
# provided we check that it's safe to do so (i.e. that CI passes).
requires = ["poetry-core>=1.1.0,<=1.9.1", "setuptools_rust>=1.3,<=1.10.2"]
requires = ["poetry-core>=1.1.0,<=1.9.0", "setuptools_rust>=1.3,<=1.8.1"]
build-backend = "poetry.core.masonry.api"
@@ -378,19 +378,16 @@ build-backend = "poetry.core.masonry.api"
# Skip unsupported platforms (by us or by Rust).
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
# We skip:
# - CPython 3.6, 3.7 and 3.8: EOLed
# - PyPy 3.7 and 3.8: we only support Python 3.9+
# - CPython 3.6 and 3.7: EOLed
# - PyPy 3.7: we only support Python 3.8+
# - musllinux i686: excluded to reduce number of wheels we build.
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
# c.f. https://github.com/matrix-org/synapse/pull/14259
skip = "cp36* cp37* cp38* pp37* pp38* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
# We need a rust compiler.
#
# We temporarily pin Rust to 1.82.0 to work around
# https://github.com/element-hq/synapse/issues/17988
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.82.0 -y --profile minimal"
# We need a rust compiler
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
environment= { PATH = "$PATH:$HOME/.cargo/bin" }
# For some reason if we don't manually clean the build directory we

View File

@@ -30,14 +30,14 @@ http = "1.1.0"
lazy_static = "1.4.0"
log = "0.4.17"
mime = "0.3.17"
pyo3 = { version = "0.23.2", features = [
pyo3 = { version = "0.21.0", features = [
"macros",
"anyhow",
"abi3",
"abi3-py38",
] }
pyo3-log = "0.12.0"
pythonize = "0.23.0"
pyo3-log = "0.10.0"
pythonize = "0.21.0"
regex = "1.6.0"
sha2 = "0.10.8"
serde = { version = "1.0.144", features = ["derive"] }

View File

@@ -60,7 +60,6 @@ fn bench_match_exact(b: &mut Bencher) {
true,
vec![],
false,
false,
)
.unwrap();
@@ -106,7 +105,6 @@ fn bench_match_word(b: &mut Bencher) {
true,
vec![],
false,
false,
)
.unwrap();
@@ -152,7 +150,6 @@ fn bench_match_word_miss(b: &mut Bencher) {
true,
vec![],
false,
false,
)
.unwrap();
@@ -198,7 +195,6 @@ fn bench_eval_message(b: &mut Bencher) {
true,
vec![],
false,
false,
)
.unwrap();
@@ -209,7 +205,6 @@ fn bench_eval_message(b: &mut Bencher) {
false,
false,
false,
false,
);
b.iter(|| eval.run(&rules, Some("bob"), Some("person")));

View File

@@ -32,14 +32,14 @@ use crate::push::utils::{glob_to_regex, GlobMatchType};
/// Called when registering modules with python.
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
let child_module = PyModule::new(py, "acl")?;
let child_module = PyModule::new_bound(py, "acl")?;
child_module.add_class::<ServerAclEvaluator>()?;
m.add_submodule(&child_module)?;
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import acl` work.
py.import("sys")?
py.import_bound("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.acl", child_module)?;

View File

@@ -1,107 +0,0 @@
/*
* This file is licensed under the Affero General Public License (AGPL) version 3.
*
* Copyright (C) 2024 New Vector, Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* See the GNU Affero General Public License for more details:
* <https://www.gnu.org/licenses/agpl-3.0.html>.
*/
use std::collections::HashMap;
use pyo3::{exceptions::PyValueError, pyfunction, PyResult};
use crate::{
identifier::UserID,
matrix_const::{
HISTORY_VISIBILITY_INVITED, HISTORY_VISIBILITY_JOINED, MEMBERSHIP_INVITE, MEMBERSHIP_JOIN,
},
};
#[pyfunction(name = "event_visible_to_server")]
pub fn event_visible_to_server_py(
sender: String,
target_server_name: String,
history_visibility: String,
erased_senders: HashMap<String, bool>,
partial_state_invisible: bool,
memberships: Vec<(String, String)>, // (state_key, membership)
) -> PyResult<bool> {
event_visible_to_server(
sender,
target_server_name,
history_visibility,
erased_senders,
partial_state_invisible,
memberships,
)
.map_err(|e| PyValueError::new_err(format!("{e}")))
}
/// Return whether the target server is allowed to see the event.
///
/// For a fully stated room, the target server is allowed to see an event E if:
/// - the state at E has world readable or shared history vis, OR
/// - the state at E says that the target server is in the room.
///
/// For a partially stated room, the target server is allowed to see E if:
/// - E was created by this homeserver, AND:
/// - the partial state at E has world readable or shared history vis, OR
/// - the partial state at E says that the target server is in the room.
pub fn event_visible_to_server(
sender: String,
target_server_name: String,
history_visibility: String,
erased_senders: HashMap<String, bool>,
partial_state_invisible: bool,
memberships: Vec<(String, String)>, // (state_key, membership)
) -> anyhow::Result<bool> {
if let Some(&erased) = erased_senders.get(&sender) {
if erased {
return Ok(false);
}
}
if partial_state_invisible {
return Ok(false);
}
if history_visibility != HISTORY_VISIBILITY_INVITED
&& history_visibility != HISTORY_VISIBILITY_JOINED
{
return Ok(true);
}
let mut visible = false;
for (state_key, membership) in memberships {
let state_key = UserID::try_from(state_key.as_ref())
.map_err(|e| anyhow::anyhow!(format!("invalid user_id ({state_key}): {e}")))?;
if state_key.server_name() != target_server_name {
return Err(anyhow::anyhow!(
"state_key.server_name ({}) does not match target_server_name ({target_server_name})",
state_key.server_name()
));
}
match membership.as_str() {
MEMBERSHIP_INVITE => {
if history_visibility == HISTORY_VISIBILITY_INVITED {
visible = true;
break;
}
}
MEMBERSHIP_JOIN => {
visible = true;
break;
}
_ => continue,
}
}
Ok(visible)
}

View File

@@ -41,11 +41,9 @@ use pyo3::{
pybacked::PyBackedStr,
pyclass, pymethods,
types::{PyAnyMethods, PyDict, PyDictMethods, PyString},
Bound, IntoPyObject, PyAny, PyObject, PyResult, Python,
Bound, IntoPy, PyAny, PyObject, PyResult, Python,
};
use crate::UnwrapInfallible;
/// Definitions of the various fields of the internal metadata.
#[derive(Clone)]
enum EventInternalMetadataData {
@@ -62,59 +60,31 @@ enum EventInternalMetadataData {
impl EventInternalMetadataData {
/// Convert the field to its name and python object.
fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, Bound<'a, PyAny>) {
fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, PyObject) {
match self {
EventInternalMetadataData::OutOfBandMembership(o) => (
pyo3::intern!(py, "out_of_band_membership"),
o.into_pyobject(py)
.unwrap_infallible()
.to_owned()
.into_any(),
),
EventInternalMetadataData::SendOnBehalfOf(o) => (
pyo3::intern!(py, "send_on_behalf_of"),
o.into_pyobject(py).unwrap_infallible().into_any(),
),
EventInternalMetadataData::RecheckRedaction(o) => (
pyo3::intern!(py, "recheck_redaction"),
o.into_pyobject(py)
.unwrap_infallible()
.to_owned()
.into_any(),
),
EventInternalMetadataData::SoftFailed(o) => (
pyo3::intern!(py, "soft_failed"),
o.into_pyobject(py)
.unwrap_infallible()
.to_owned()
.into_any(),
),
EventInternalMetadataData::ProactivelySend(o) => (
pyo3::intern!(py, "proactively_send"),
o.into_pyobject(py)
.unwrap_infallible()
.to_owned()
.into_any(),
),
EventInternalMetadataData::Redacted(o) => (
pyo3::intern!(py, "redacted"),
o.into_pyobject(py)
.unwrap_infallible()
.to_owned()
.into_any(),
),
EventInternalMetadataData::TxnId(o) => (
pyo3::intern!(py, "txn_id"),
o.into_pyobject(py).unwrap_infallible().into_any(),
),
EventInternalMetadataData::TokenId(o) => (
pyo3::intern!(py, "token_id"),
o.into_pyobject(py).unwrap_infallible().into_any(),
),
EventInternalMetadataData::DeviceId(o) => (
pyo3::intern!(py, "device_id"),
o.into_pyobject(py).unwrap_infallible().into_any(),
),
EventInternalMetadataData::OutOfBandMembership(o) => {
(pyo3::intern!(py, "out_of_band_membership"), o.into_py(py))
}
EventInternalMetadataData::SendOnBehalfOf(o) => {
(pyo3::intern!(py, "send_on_behalf_of"), o.into_py(py))
}
EventInternalMetadataData::RecheckRedaction(o) => {
(pyo3::intern!(py, "recheck_redaction"), o.into_py(py))
}
EventInternalMetadataData::SoftFailed(o) => {
(pyo3::intern!(py, "soft_failed"), o.into_py(py))
}
EventInternalMetadataData::ProactivelySend(o) => {
(pyo3::intern!(py, "proactively_send"), o.into_py(py))
}
EventInternalMetadataData::Redacted(o) => {
(pyo3::intern!(py, "redacted"), o.into_py(py))
}
EventInternalMetadataData::TxnId(o) => (pyo3::intern!(py, "txn_id"), o.into_py(py)),
EventInternalMetadataData::TokenId(o) => (pyo3::intern!(py, "token_id"), o.into_py(py)),
EventInternalMetadataData::DeviceId(o) => {
(pyo3::intern!(py, "device_id"), o.into_py(py))
}
}
}
@@ -277,7 +247,7 @@ impl EventInternalMetadata {
///
/// Note that `outlier` and `stream_ordering` are stored in separate columns so are not returned here.
fn get_dict(&self, py: Python<'_>) -> PyResult<PyObject> {
let dict = PyDict::new(py);
let dict = PyDict::new_bound(py);
for entry in &self.data {
let (key, value) = entry.to_python_pair(py);

View File

@@ -22,23 +22,21 @@
use pyo3::{
types::{PyAnyMethods, PyModule, PyModuleMethods},
wrap_pyfunction, Bound, PyResult, Python,
Bound, PyResult, Python,
};
pub mod filter;
mod internal_metadata;
/// Called when registering modules with python.
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
let child_module = PyModule::new(py, "events")?;
let child_module = PyModule::new_bound(py, "events")?;
child_module.add_class::<internal_metadata::EventInternalMetadata>()?;
child_module.add_function(wrap_pyfunction!(filter::event_visible_to_server_py, m)?)?;
m.add_submodule(&child_module)?;
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import events` work.
py.import("sys")?
py.import_bound("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.events", child_module)?;

View File

@@ -70,7 +70,7 @@ pub fn http_request_from_twisted(request: &Bound<'_, PyAny>) -> PyResult<Request
let headers_iter = request
.getattr("requestHeaders")?
.call_method0("getAllRawHeaders")?
.try_iter()?;
.iter()?;
for header in headers_iter {
let header = header?;

View File

@@ -1,252 +0,0 @@
/*
* This file is licensed under the Affero General Public License (AGPL) version 3.
*
* Copyright (C) 2024 New Vector, Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* See the GNU Affero General Public License for more details:
* <https://www.gnu.org/licenses/agpl-3.0.html>.
*/
//! # Matrix Identifiers
//!
//! This module contains definitions and utilities for working with matrix identifiers.
use std::{fmt, ops::Deref};
/// Errors that can occur when parsing a matrix identifier.
#[derive(Clone, Debug, PartialEq)]
pub enum IdentifierError {
IncorrectSigil,
MissingColon,
}
impl fmt::Display for IdentifierError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
/// A Matrix user_id.
#[derive(Clone, Debug, PartialEq)]
pub struct UserID(String);
impl UserID {
/// Returns the `localpart` of the user_id.
pub fn localpart(&self) -> &str {
&self[1..self.colon_pos()]
}
/// Returns the `server_name` / `domain` of the user_id.
pub fn server_name(&self) -> &str {
&self[self.colon_pos() + 1..]
}
/// Returns the position of the ':' inside of the user_id.
/// Used when splitting the user_id into it's respective parts.
fn colon_pos(&self) -> usize {
self.find(':').unwrap()
}
}
impl TryFrom<&str> for UserID {
type Error = IdentifierError;
/// Will try creating a `UserID` from the provided `&str`.
/// Can fail if the user_id is incorrectly formatted.
fn try_from(s: &str) -> Result<Self, Self::Error> {
if !s.starts_with('@') {
return Err(IdentifierError::IncorrectSigil);
}
if s.find(':').is_none() {
return Err(IdentifierError::MissingColon);
}
Ok(UserID(s.to_string()))
}
}
impl TryFrom<String> for UserID {
type Error = IdentifierError;
/// Will try creating a `UserID` from the provided `&str`.
/// Can fail if the user_id is incorrectly formatted.
fn try_from(s: String) -> Result<Self, Self::Error> {
if !s.starts_with('@') {
return Err(IdentifierError::IncorrectSigil);
}
if s.find(':').is_none() {
return Err(IdentifierError::MissingColon);
}
Ok(UserID(s))
}
}
impl<'de> serde::Deserialize<'de> for UserID {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s: String = serde::Deserialize::deserialize(deserializer)?;
UserID::try_from(s).map_err(serde::de::Error::custom)
}
}
impl Deref for UserID {
type Target = str;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Display for UserID {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
/// A Matrix room_id.
#[derive(Clone, Debug, PartialEq)]
pub struct RoomID(String);
impl RoomID {
/// Returns the `localpart` of the room_id.
pub fn localpart(&self) -> &str {
&self[1..self.colon_pos()]
}
/// Returns the `server_name` / `domain` of the room_id.
pub fn server_name(&self) -> &str {
&self[self.colon_pos() + 1..]
}
/// Returns the position of the ':' inside of the room_id.
/// Used when splitting the room_id into it's respective parts.
fn colon_pos(&self) -> usize {
self.find(':').unwrap()
}
}
impl TryFrom<&str> for RoomID {
type Error = IdentifierError;
/// Will try creating a `RoomID` from the provided `&str`.
/// Can fail if the room_id is incorrectly formatted.
fn try_from(s: &str) -> Result<Self, Self::Error> {
if !s.starts_with('!') {
return Err(IdentifierError::IncorrectSigil);
}
if s.find(':').is_none() {
return Err(IdentifierError::MissingColon);
}
Ok(RoomID(s.to_string()))
}
}
impl TryFrom<String> for RoomID {
type Error = IdentifierError;
/// Will try creating a `RoomID` from the provided `String`.
/// Can fail if the room_id is incorrectly formatted.
fn try_from(s: String) -> Result<Self, Self::Error> {
if !s.starts_with('!') {
return Err(IdentifierError::IncorrectSigil);
}
if s.find(':').is_none() {
return Err(IdentifierError::MissingColon);
}
Ok(RoomID(s))
}
}
impl<'de> serde::Deserialize<'de> for RoomID {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s: String = serde::Deserialize::deserialize(deserializer)?;
RoomID::try_from(s).map_err(serde::de::Error::custom)
}
}
impl Deref for RoomID {
type Target = str;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Display for RoomID {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
/// A Matrix event_id.
#[derive(Clone, Debug, PartialEq)]
pub struct EventID(String);
impl TryFrom<&str> for EventID {
type Error = IdentifierError;
/// Will try creating a `EventID` from the provided `&str`.
/// Can fail if the event_id is incorrectly formatted.
fn try_from(s: &str) -> Result<Self, Self::Error> {
if !s.starts_with('$') {
return Err(IdentifierError::IncorrectSigil);
}
Ok(EventID(s.to_string()))
}
}
impl TryFrom<String> for EventID {
type Error = IdentifierError;
/// Will try creating a `EventID` from the provided `String`.
/// Can fail if the event_id is incorrectly formatted.
fn try_from(s: String) -> Result<Self, Self::Error> {
if !s.starts_with('$') {
return Err(IdentifierError::IncorrectSigil);
}
Ok(EventID(s))
}
}
impl<'de> serde::Deserialize<'de> for EventID {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s: String = serde::Deserialize::deserialize(deserializer)?;
EventID::try_from(s).map_err(serde::de::Error::custom)
}
}
impl Deref for EventID {
type Target = str;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Display for EventID {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}

View File

@@ -1,5 +1,3 @@
use std::convert::Infallible;
use lazy_static::lazy_static;
use pyo3::prelude::*;
use pyo3_log::ResetHandle;
@@ -8,8 +6,6 @@ pub mod acl;
pub mod errors;
pub mod events;
pub mod http;
pub mod identifier;
pub mod matrix_const;
pub mod push;
pub mod rendezvous;
@@ -54,16 +50,3 @@ fn synapse_rust(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
Ok(())
}
pub trait UnwrapInfallible<T> {
fn unwrap_infallible(self) -> T;
}
impl<T> UnwrapInfallible<T> for Result<T, Infallible> {
fn unwrap_infallible(self) -> T {
match self {
Ok(val) => val,
Err(never) => match never {},
}
}
}

View File

@@ -1,28 +0,0 @@
/*
* This file is licensed under the Affero General Public License (AGPL) version 3.
*
* Copyright (C) 2024 New Vector, Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* See the GNU Affero General Public License for more details:
* <https://www.gnu.org/licenses/agpl-3.0.html>.
*/
//! # Matrix Constants
//!
//! This module contains definitions for constant values described by the matrix specification.
pub const HISTORY_VISIBILITY_WORLD_READABLE: &str = "world_readable";
pub const HISTORY_VISIBILITY_SHARED: &str = "shared";
pub const HISTORY_VISIBILITY_INVITED: &str = "invited";
pub const HISTORY_VISIBILITY_JOINED: &str = "joined";
pub const MEMBERSHIP_BAN: &str = "ban";
pub const MEMBERSHIP_LEAVE: &str = "leave";
pub const MEMBERSHIP_KNOCK: &str = "knock";
pub const MEMBERSHIP_INVITE: &str = "invite";
pub const MEMBERSHIP_JOIN: &str = "join";

View File

@@ -81,7 +81,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
))]),
actions: Cow::Borrowed(&[Action::Notify]),
default: true,
default_enabled: true,
default_enabled: false,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),

View File

@@ -105,9 +105,6 @@ pub struct PushRuleEvaluator {
/// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
/// flag as MSC1767 (extensible events core).
msc3931_enabled: bool,
// If MSC4210 (remove legacy mentions) is enabled.
msc4210_enabled: bool,
}
#[pymethods]
@@ -125,7 +122,6 @@ impl PushRuleEvaluator {
related_event_match_enabled,
room_version_feature_flags,
msc3931_enabled,
msc4210_enabled,
))]
pub fn py_new(
flattened_keys: BTreeMap<String, JsonValue>,
@@ -137,7 +133,6 @@ impl PushRuleEvaluator {
related_event_match_enabled: bool,
room_version_feature_flags: Vec<String>,
msc3931_enabled: bool,
msc4210_enabled: bool,
) -> Result<Self, Error> {
let body = match flattened_keys.get("content.body") {
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(),
@@ -155,7 +150,6 @@ impl PushRuleEvaluator {
related_event_match_enabled,
room_version_feature_flags,
msc3931_enabled,
msc4210_enabled,
})
}
@@ -167,7 +161,6 @@ impl PushRuleEvaluator {
///
/// Returns the set of actions, if any, that match (filtering out any
/// `dont_notify` and `coalesce` actions).
#[pyo3(signature = (push_rules, user_id=None, display_name=None))]
pub fn run(
&self,
push_rules: &FilteredPushRules,
@@ -183,8 +176,7 @@ impl PushRuleEvaluator {
// For backwards-compatibility the legacy mention rules are disabled
// if the event contains the 'm.mentions' property.
// Additionally, MSC4210 always disables the legacy rules.
if (self.has_mentions || self.msc4210_enabled)
if self.has_mentions
&& (rule_id == "global/override/.m.rule.contains_display_name"
|| rule_id == "global/content/.m.rule.contains_user_name"
|| rule_id == "global/override/.m.rule.roomnotif")
@@ -237,7 +229,6 @@ impl PushRuleEvaluator {
}
/// Check if the given condition matches.
#[pyo3(signature = (condition, user_id=None, display_name=None))]
fn matches(
&self,
condition: Condition,
@@ -535,7 +526,6 @@ fn push_rule_evaluator() {
true,
vec![],
true,
false,
)
.unwrap();
@@ -565,7 +555,6 @@ fn test_requires_room_version_supports_condition() {
false,
flags,
true,
false,
)
.unwrap();
@@ -593,7 +582,7 @@ fn test_requires_room_version_supports_condition() {
};
let rules = PushRules::new(vec![custom_rule]);
result = evaluator.run(
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false),
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false),
None,
None,
);

View File

@@ -65,8 +65,8 @@ use anyhow::{Context, Error};
use log::warn;
use pyo3::exceptions::PyTypeError;
use pyo3::prelude::*;
use pyo3::types::{PyBool, PyInt, PyList, PyString};
use pythonize::{depythonize, pythonize, PythonizeError};
use pyo3::types::{PyBool, PyList, PyLong, PyString};
use pythonize::{depythonize_bound, pythonize};
use serde::de::Error as _;
use serde::{Deserialize, Serialize};
use serde_json::Value;
@@ -79,7 +79,7 @@ pub mod utils;
/// Called when registering modules with python.
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
let child_module = PyModule::new(py, "push")?;
let child_module = PyModule::new_bound(py, "push")?;
child_module.add_class::<PushRule>()?;
child_module.add_class::<PushRules>()?;
child_module.add_class::<FilteredPushRules>()?;
@@ -90,7 +90,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()>
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import push` work.
py.import("sys")?
py.import_bound("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.push", child_module)?;
@@ -182,16 +182,12 @@ pub enum Action {
Unknown(Value),
}
impl<'py> IntoPyObject<'py> for Action {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PythonizeError;
fn into_pyobject(self, py: Python<'py>) -> Result<Self::Output, Self::Error> {
impl IntoPy<PyObject> for Action {
fn into_py(self, py: Python<'_>) -> PyObject {
// When we pass the `Action` struct to Python we want it to be converted
// to a dict. We use `pythonize`, which converts the struct using the
// `serde` serialization.
pythonize(py, &self)
pythonize(py, &self).expect("valid action")
}
}
@@ -274,13 +270,13 @@ pub enum SimpleJsonValue {
}
impl<'source> FromPyObject<'source> for SimpleJsonValue {
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
if let Ok(s) = ob.downcast::<PyString>() {
Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string())))
// A bool *is* an int, ensure we try bool first.
} else if let Ok(b) = ob.downcast::<PyBool>() {
Ok(SimpleJsonValue::Bool(b.extract()?))
} else if let Ok(i) = ob.downcast::<PyInt>() {
} else if let Ok(i) = ob.downcast::<PyLong>() {
Ok(SimpleJsonValue::Int(i.extract()?))
} else if ob.is_none() {
Ok(SimpleJsonValue::Null)
@@ -302,19 +298,15 @@ pub enum JsonValue {
}
impl<'source> FromPyObject<'source> for JsonValue {
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
if let Ok(l) = ob.downcast::<PyList>() {
match l
.iter()
.map(|it| SimpleJsonValue::extract_bound(&it))
.collect()
{
match l.iter().map(SimpleJsonValue::extract).collect() {
Ok(a) => Ok(JsonValue::Array(a)),
Err(e) => Err(PyTypeError::new_err(format!(
"Can't convert to JsonValue::Array: {e}"
))),
}
} else if let Ok(v) = SimpleJsonValue::extract_bound(ob) {
} else if let Ok(v) = SimpleJsonValue::extract(ob) {
Ok(JsonValue::Value(v))
} else {
Err(PyTypeError::new_err(format!(
@@ -371,19 +363,15 @@ pub enum KnownCondition {
},
}
impl<'source> IntoPyObject<'source> for Condition {
type Target = PyAny;
type Output = Bound<'source, Self::Target>;
type Error = PythonizeError;
fn into_pyobject(self, py: Python<'source>) -> Result<Self::Output, Self::Error> {
pythonize(py, &self)
impl IntoPy<PyObject> for Condition {
fn into_py(self, py: Python<'_>) -> PyObject {
pythonize(py, &self).expect("valid condition")
}
}
impl<'source> FromPyObject<'source> for Condition {
fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> {
Ok(depythonize(ob)?)
Ok(depythonize_bound(ob.clone())?)
}
}
@@ -546,7 +534,6 @@ pub struct FilteredPushRules {
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc4028_push_encrypted_events: bool,
msc4210_enabled: bool,
}
#[pymethods]
@@ -559,7 +546,6 @@ impl FilteredPushRules {
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc4028_push_encrypted_events: bool,
msc4210_enabled: bool,
) -> Self {
Self {
push_rules,
@@ -568,7 +554,6 @@ impl FilteredPushRules {
msc3381_polls_enabled,
msc3664_enabled,
msc4028_push_encrypted_events,
msc4210_enabled,
}
}
@@ -611,14 +596,6 @@ impl FilteredPushRules {
return false;
}
if self.msc4210_enabled
&& (rule.rule_id == "global/override/.m.rule.contains_display_name"
|| rule.rule_id == "global/content/.m.rule.contains_user_name"
|| rule.rule_id == "global/override/.m.rule.roomnotif")
{
return false;
}
true
})
.map(|r| {

View File

@@ -23,6 +23,7 @@ use anyhow::bail;
use anyhow::Context;
use anyhow::Error;
use lazy_static::lazy_static;
use regex;
use regex::Regex;
use regex::RegexBuilder;

View File

@@ -29,7 +29,7 @@ use pyo3::{
exceptions::PyValueError,
pyclass, pymethods,
types::{PyAnyMethods, PyModule, PyModuleMethods},
Bound, IntoPyObject, Py, PyAny, PyObject, PyResult, Python,
Bound, Py, PyAny, PyObject, PyResult, Python, ToPyObject,
};
use ulid::Ulid;
@@ -37,7 +37,6 @@ use self::session::Session;
use crate::{
errors::{NotFoundError, SynapseError},
http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt},
UnwrapInfallible,
};
mod session;
@@ -126,11 +125,7 @@ impl RendezvousHandler {
let base = Uri::try_from(format!("{base}_synapse/client/rendezvous"))
.map_err(|_| PyValueError::new_err("Invalid base URI"))?;
let clock = homeserver
.call_method0("get_clock")?
.into_pyobject(py)
.unwrap_infallible()
.unbind();
let clock = homeserver.call_method0("get_clock")?.to_object(py);
// Construct a Python object so that we can get a reference to the
// evict method and schedule it to run.
@@ -293,13 +288,6 @@ impl RendezvousHandler {
let mut response = Response::new(Bytes::new());
*response.status_mut() = StatusCode::ACCEPTED;
prepare_headers(response.headers_mut(), session);
// Even though this isn't mandated by the MSC, we set a Content-Type on the response. It
// doesn't do any harm as the body is empty, but this helps escape a bug in some reverse
// proxy/cache setup which strips the ETag header if there is no Content-Type set.
// Specifically, we noticed this behaviour when placing Synapse behind Cloudflare.
response.headers_mut().typed_insert(ContentType::text());
http_response_to_twisted(twisted_request, response)?;
Ok(())
@@ -323,7 +311,7 @@ impl RendezvousHandler {
}
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
let child_module = PyModule::new(py, "rendezvous")?;
let child_module = PyModule::new_bound(py, "rendezvous")?;
child_module.add_class::<RendezvousHandler>()?;
@@ -331,7 +319,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()>
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import rendezvous` work.
py.import("sys")?
py.import_bound("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.rendezvous", child_module)?;

View File

@@ -28,11 +28,12 @@ from typing import Collection, Optional, Sequence, Set
# example)
DISTS = (
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
"debian:bookworm", # (EOL 2026-06) (our EOL forced by Python 3.11 is 2027-10-24)
"debian:sid", # (rolling distro, no EOL)
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
"ubuntu:noble", # 24.04 LTS (EOL 2029-06)
"ubuntu:oracular", # 24.10 (EOL 2025-07)
"ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
"ubuntu:mantic", # 23.10 (EOL 2024-07) (our EOL forced by Python 3.11 is 2027-10-24)
"debian:trixie", # (EOL not specified yet)
)

View File

@@ -195,10 +195,6 @@ if [ -z "$skip_docker_build" ]; then
# Build the unified Complement image (from the worker Synapse image we just built).
echo_if_github "::group::Build Docker image: complement/Dockerfile"
$CONTAINER_RUNTIME build -t complement-synapse \
`# This is the tag we end up pushing to the registry (see` \
`# .github/workflows/push_complement_image.yml) so let's just label it now` \
`# so people can reference it by the same name locally.` \
-t ghcr.io/element-hq/synapse/complement-synapse \
-f "docker/complement/Dockerfile" "docker/complement"
echo_if_github "::endgroup::"

View File

@@ -360,7 +360,7 @@ def is_cacheable(
# For a type alias, check if the underlying real type is cachable.
return is_cacheable(mypy.types.get_proper_type(rt), signature, verbose)
elif isinstance(rt, UninhabitedType):
elif isinstance(rt, UninhabitedType) and rt.is_noreturn:
# There is no return value, just consider it cachable. This is only used
# in tests.
return True, None

View File

@@ -40,7 +40,7 @@ import commonmark
import git
from click.exceptions import ClickException
from git import GitCommandError, Repo
from github import BadCredentialsException, Github
from github import Github
from packaging import version
@@ -323,8 +323,10 @@ def tag(gh_token: Optional[str]) -> None:
def _tag(gh_token: Optional[str]) -> None:
"""Tags the release and generates a draft GitHub release"""
# Test that the GH Token is valid before continuing.
check_valid_gh_token(gh_token)
if gh_token:
# Test that the GH Token is valid before continuing.
gh = Github(gh_token)
gh.get_user()
# Make sure we're in a git repo.
repo = get_repo_and_check_clean_checkout()
@@ -467,8 +469,10 @@ def upload(gh_token: Optional[str]) -> None:
def _upload(gh_token: Optional[str]) -> None:
"""Upload release to pypi."""
# Test that the GH Token is valid before continuing.
check_valid_gh_token(gh_token)
if gh_token:
# Test that the GH Token is valid before continuing.
gh = Github(gh_token)
gh.get_user()
current_version = get_package_version()
tag_name = f"v{current_version}"
@@ -565,8 +569,10 @@ def wait_for_actions(gh_token: Optional[str]) -> None:
def _wait_for_actions(gh_token: Optional[str]) -> None:
# Test that the GH Token is valid before continuing.
check_valid_gh_token(gh_token)
if gh_token:
# Test that the GH Token is valid before continuing.
gh = Github(gh_token)
gh.get_user()
# Find out the version and tag name.
current_version = get_package_version()
@@ -800,22 +806,6 @@ def get_repo_and_check_clean_checkout(
return repo
def check_valid_gh_token(gh_token: Optional[str]) -> None:
"""Check that a github token is valid, if supplied"""
if not gh_token:
# No github token supplied, so nothing to do.
return
try:
gh = Github(gh_token)
# We need to lookup name to trigger a request.
_name = gh.get_user().name
except BadCredentialsException as e:
raise click.ClickException(f"Github credentials are bad: {e}")
def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]:
"""Find the branch/ref, looking first locally then in the remote."""
if ref_name in repo.references:

View File

@@ -39,8 +39,8 @@ ImageFile.LOAD_TRUNCATED_IMAGES = True
# Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the
# if-statement completely.
py_version = sys.version_info
if py_version < (3, 9):
print("Synapse requires Python 3.9 or above.")
if py_version < (3, 8):
print("Synapse requires Python 3.8 or above.")
sys.exit(1)
# Allow using the asyncio reactor via env var.

View File

@@ -88,7 +88,6 @@ from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomBackgroundUpdateStore
from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore
from synapse.storage.databases.main.search import SearchBackgroundUpdateStore
from synapse.storage.databases.main.sliding_sync import SlidingSyncStore
from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore
from synapse.storage.databases.main.stats import StatsStore
from synapse.storage.databases.main.user_directory import (
@@ -256,7 +255,6 @@ class Store(
ReceiptsBackgroundUpdateStore,
RelationsWorkerStore,
EventFederationWorkerStore,
SlidingSyncStore,
):
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)

View File

@@ -174,12 +174,6 @@ class MSC3861DelegatedAuth(BaseAuth):
logger.warning("Failed to load metadata:", exc_info=True)
return None
async def auth_metadata(self) -> Dict[str, Any]:
"""
Returns the auth metadata dict
"""
return await self._issuer_metadata.get()
async def _introspection_endpoint(self) -> str:
"""
Returns the introspection endpoint of the issuer
@@ -344,7 +338,7 @@ class MSC3861DelegatedAuth(BaseAuth):
logger.exception("Failed to introspect token")
raise SynapseError(503, "Unable to introspect the access token")
logger.debug("Introspection result: %r", introspection_result)
logger.info(f"Introspection result: {introspection_result!r}")
# TODO: introspection verification should be more extensive, especially:
# - verify the audience

View File

@@ -231,8 +231,6 @@ class EventContentFields:
ROOM_NAME: Final = "name"
MEMBERSHIP: Final = "membership"
MEMBERSHIP_DISPLAYNAME: Final = "displayname"
MEMBERSHIP_AVATAR_URL: Final = "avatar_url"
# Used in m.room.guest_access events.
GUEST_ACCESS: Final = "guest_access"
@@ -320,8 +318,3 @@ class ApprovalNoticeMedium:
class Direction(enum.Enum):
BACKWARDS = "b"
FORWARDS = "f"
class ProfileFields:
DISPLAYNAME: Final = "displayname"
AVATAR_URL: Final = "avatar_url"

View File

@@ -87,7 +87,8 @@ class Codes(str, Enum):
WEAK_PASSWORD = "M_WEAK_PASSWORD"
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
USER_DEACTIVATED = "M_USER_DEACTIVATED"
USER_LOCKED = "M_USER_LOCKED"
# USER_LOCKED = "M_USER_LOCKED"
USER_LOCKED = "ORG_MATRIX_MSC3939_USER_LOCKED"
NOT_YET_UPLOADED = "M_NOT_YET_UPLOADED"
CANNOT_OVERWRITE_MEDIA = "M_CANNOT_OVERWRITE_MEDIA"
@@ -100,9 +101,8 @@ class Codes(str, Enum):
# The account has been suspended on the server.
# By opposition to `USER_DEACTIVATED`, this is a reversible measure
# that can possibly be appealed and reverted.
# Introduced by MSC3823
# https://github.com/matrix-org/matrix-spec-proposals/pull/3823
USER_ACCOUNT_SUSPENDED = "M_USER_SUSPENDED"
# Part of MSC3823.
USER_ACCOUNT_SUSPENDED = "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED"
BAD_ALIAS = "M_BAD_ALIAS"
# For restricted join rules.
@@ -132,10 +132,6 @@ class Codes(str, Enum):
# connection.
UNKNOWN_POS = "M_UNKNOWN_POS"
# Part of MSC4133
PROFILE_TOO_LARGE = "M_PROFILE_TOO_LARGE"
KEY_TOO_LARGE = "M_KEY_TOO_LARGE"
class CodeMessageException(RuntimeError):
"""An exception with integer code, a message string attributes and optional headers.

View File

@@ -275,7 +275,6 @@ class Ratelimiter:
update: bool = True,
n_actions: int = 1,
_time_now_s: Optional[float] = None,
pause: Optional[float] = 0.5,
) -> None:
"""Checks if an action can be performed. If not, raises a LimitExceededError
@@ -299,8 +298,6 @@ class Ratelimiter:
at all.
_time_now_s: The current time. Optional, defaults to the current time according
to self.clock. Only used by tests.
pause: Time in seconds to pause when an action is being limited. Defaults to 0.5
to stop clients from "tight-looping" on retrying their request.
Raises:
LimitExceededError: If an action could not be performed, along with the time in
@@ -319,8 +316,9 @@ class Ratelimiter:
)
if not allowed:
if pause:
await self.clock.sleep(pause)
# We pause for a bit here to stop clients from "tight-looping" on
# retrying their request.
await self.clock.sleep(0.5)
raise LimitExceededError(
limiter_name=self._limiter_name,

View File

@@ -23,8 +23,7 @@
import hmac
from hashlib import sha256
from typing import Optional
from urllib.parse import urlencode, urljoin
from urllib.parse import urlencode
from synapse.config import ConfigError
from synapse.config.homeserver import HomeServerConfig
@@ -67,42 +66,3 @@ class ConsentURIBuilder:
urlencode({"u": user_id, "h": mac}),
)
return consent_uri
class LoginSSORedirectURIBuilder:
def __init__(self, hs_config: HomeServerConfig):
self._public_baseurl = hs_config.server.public_baseurl
def build_login_sso_redirect_uri(
self, *, idp_id: Optional[str], client_redirect_url: str
) -> str:
"""Build a `/login/sso/redirect` URI for the given identity provider.
Builds `/_matrix/client/v3/login/sso/redirect/{idpId}?redirectUrl=xxx` when `idp_id` is specified.
Otherwise, builds `/_matrix/client/v3/login/sso/redirect?redirectUrl=xxx` when `idp_id` is `None`.
Args:
idp_id: Optional ID of the identity provider
client_redirect_url: URL to redirect the user to after login
Returns
The URI to follow when choosing a specific identity provider.
"""
base_url = urljoin(
self._public_baseurl,
f"{CLIENT_API_PREFIX}/v3/login/sso/redirect",
)
serialized_query_parameters = urlencode({"redirectUrl": client_redirect_url})
if idp_id:
resultant_url = urljoin(
# We have to add a trailing slash to the base URL to ensure that the
# last path segment is not stripped away when joining with another path.
f"{base_url}/",
f"{idp_id}?{serialized_query_parameters}",
)
else:
resultant_url = f"{base_url}?{serialized_query_parameters}"
return resultant_url

View File

@@ -3,7 +3,7 @@
#
# Copyright 2020 The Matrix.org Foundation C.I.C.
# Copyright 2016 OpenMarket Ltd
# Copyright (C) 2023-2024 New Vector, Ltd
# Copyright (C) 2023 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as

View File

@@ -87,7 +87,6 @@ class ApplicationService:
ip_range_whitelist: Optional[IPSet] = None,
supports_ephemeral: bool = False,
msc3202_transaction_extensions: bool = False,
msc4190_device_management: bool = False,
):
self.token = token
self.url = (
@@ -101,7 +100,6 @@ class ApplicationService:
self.ip_range_whitelist = ip_range_whitelist
self.supports_ephemeral = supports_ephemeral
self.msc3202_transaction_extensions = msc3202_transaction_extensions
self.msc4190_device_management = msc4190_device_management
if "|" in self.id:
raise Exception("application service ID cannot contain '|' character")

View File

@@ -221,13 +221,9 @@ class Config:
The number of milliseconds in the duration.
Raises:
TypeError: if given something other than an integer or a string, or the
duration is using an incorrect suffix.
TypeError, if given something other than an integer or a string
ValueError: if given a string not of the form described above.
"""
# For integers, we prefer to use `type(value) is int` instead of
# `isinstance(value, int)` because we want to exclude subclasses of int, such as
# bool.
if type(value) is int: # noqa: E721
return value
elif isinstance(value, str):
@@ -250,20 +246,9 @@ class Config:
if suffix in sizes:
value = value[:-1]
size = sizes[suffix]
elif suffix.isdigit():
# No suffix is treated as milliseconds.
value = value
size = 1
else:
raise TypeError(
f"Bad duration suffix {value} (expected no suffix or one of these suffixes: {sizes.keys()})"
)
return int(value) * size
else:
raise TypeError(
f"Bad duration type {value!r} (expected int or string duration)"
)
raise TypeError(f"Bad duration {value!r}")
@staticmethod
def abspath(file_path: str) -> str:

View File

@@ -183,18 +183,6 @@ def _load_appservice(
"The `org.matrix.msc3202` option should be true or false if specified."
)
# Opt-in flag for the MSC4190 behaviours.
# When enabled, the following C-S API endpoints change for appservices:
# - POST /register does not return an access token
# - PUT /devices/{device_id} creates a new device if one does not exist
# - DELETE /devices/{device_id} no longer requires UIA
# - POST /delete_devices/{device_id} no longer requires UIA
msc4190_enabled = as_info.get("io.element.msc4190", False)
if not isinstance(msc4190_enabled, bool):
raise ValueError(
"The `io.element.msc4190` option should be true or false if specified."
)
return ApplicationService(
token=as_info["as_token"],
url=as_info["url"],
@@ -207,5 +195,4 @@ def _load_appservice(
ip_range_whitelist=ip_range_whitelist,
supports_ephemeral=supports_ephemeral,
msc3202_transaction_extensions=msc3202_transaction_extensions,
msc4190_device_management=msc4190_enabled,
)

View File

@@ -20,7 +20,7 @@
#
#
from typing import Any, List, Optional
from typing import Any, List
from synapse.config.sso import SsoAttributeRequirement
from synapse.types import JsonDict
@@ -46,9 +46,7 @@ class CasConfig(Config):
# TODO Update this to a _synapse URL.
public_baseurl = self.root.server.public_baseurl
self.cas_service_url: Optional[str] = (
public_baseurl + "_matrix/client/r0/login/cas/ticket"
)
self.cas_service_url = public_baseurl + "_matrix/client/r0/login/cas/ticket"
self.cas_protocol_version = cas_config.get("protocol_version")
if (

View File

@@ -110,7 +110,6 @@ class EmailConfig(Config):
raise ConfigError(
"email.require_transport_security requires email.enable_tls to be true"
)
self.email_tlsname = email_config.get("tlsname", None)
if "app_name" in email_config:
self.email_app_name = email_config["app_name"]

View File

@@ -365,6 +365,11 @@ class ExperimentalConfig(Config):
# MSC3874: Filtering /messages with rel_types / not_rel_types.
self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False)
# MSC3886: Simple client rendezvous capability
self.msc3886_endpoint: Optional[str] = experimental.get(
"msc3886_endpoint", None
)
# MSC3890: Remotely silence local notifications
# Note: This option requires "experimental_features.msc3391_enabled" to be
# set to "true", in order to communicate account data deletions to clients.
@@ -436,14 +441,9 @@ class ExperimentalConfig(Config):
("experimental", "msc4108_delegation_endpoint"),
)
# MSC4133: Custom profile fields
self.msc4133_enabled: bool = experimental.get("msc4133_enabled", False)
self.msc3823_account_suspension = experimental.get(
"msc3823_account_suspension", False
)
# MSC4210: Remove legacy mentions
self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False)
# MSC4222: Adding `state_after` to sync v2
self.msc4222_enabled: bool = experimental.get("msc4222_enabled", False)
# MSC4076: Add `disable_badge_count`` to pusher configuration
self.msc4076_enabled: bool = experimental.get("msc4076_enabled", False)
# MSC4151: Report room API (Client-Server API)
self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)

View File

@@ -38,7 +38,6 @@ class JWTConfig(Config):
self.jwt_algorithm = jwt_config["algorithm"]
self.jwt_subject_claim = jwt_config.get("subject_claim", "sub")
self.jwt_display_name_claim = jwt_config.get("display_name_claim")
# The issuer and audiences are optional, if provided, it is asserted
# that the claims exist on the JWT.
@@ -50,6 +49,5 @@ class JWTConfig(Config):
self.jwt_secret = None
self.jwt_algorithm = None
self.jwt_subject_claim = None
self.jwt_display_name_claim = None
self.jwt_issuer = None
self.jwt_audiences = None

View File

@@ -43,7 +43,7 @@ from unpaddedbase64 import decode_base64
from synapse.types import JsonDict
from synapse.util.stringutils import random_string, random_string_with_symbols
from ._base import Config, ConfigError, read_file
from ._base import Config, ConfigError
if TYPE_CHECKING:
from signedjson.key import VerifyKeyWithExpiry
@@ -91,11 +91,6 @@ To suppress this warning and continue using 'matrix.org', admins should set
'suppress_key_server_warning' to 'true' in homeserver.yaml.
--------------------------------------------------------------------------------"""
CONFLICTING_MACAROON_SECRET_KEY_OPTS_ERROR = """\
Conflicting options 'macaroon_secret_key' and 'macaroon_secret_key_path' are
both defined in config file.
"""
logger = logging.getLogger(__name__)
@@ -171,16 +166,10 @@ class KeyConfig(Config):
)
)
macaroon_secret_key = config.get("macaroon_secret_key")
macaroon_secret_key_path = config.get("macaroon_secret_key_path")
if macaroon_secret_key_path:
if macaroon_secret_key:
raise ConfigError(CONFLICTING_MACAROON_SECRET_KEY_OPTS_ERROR)
macaroon_secret_key = read_file(
macaroon_secret_key_path, "macaroon_secret_key_path"
).strip()
if not macaroon_secret_key:
macaroon_secret_key = self.root.registration.registration_shared_secret
macaroon_secret_key: Optional[str] = config.get(
"macaroon_secret_key", self.root.registration.registration_shared_secret
)
if not macaroon_secret_key:
# Unfortunately, there are people out there that don't have this
# set. Lets just be "nice" and derive one from their secret key.

View File

@@ -360,6 +360,5 @@ def setup_logging(
"Licensed under the AGPL 3.0 license. Website: https://github.com/element-hq/synapse"
)
logging.info("Server hostname: %s", config.server.server_name)
logging.info("Public Base URL: %s", config.server.public_baseurl)
logging.info("Instance name: %s", hs.get_instance_name())
logging.info("Twisted reactor: %s", type(reactor).__name__)

View File

@@ -228,9 +228,3 @@ class RatelimitConfig(Config):
config.get("remote_media_download_burst_count", "500M")
),
)
self.rc_presence_per_user = RatelimitSettings.parse(
config,
"rc_presence.per_user",
defaults={"per_second": 0.1, "burst_count": 1},
)

View File

@@ -21,15 +21,10 @@
from typing import Any
from synapse.config._base import Config, ConfigError, read_file
from synapse.config._base import Config
from synapse.types import JsonDict
from synapse.util.check_dependencies import check_requirements
CONFLICTING_PASSWORD_OPTS_ERROR = """\
You have configured both `redis.password` and `redis.password_path`.
These are mutually incompatible.
"""
class RedisConfig(Config):
section = "redis"
@@ -48,17 +43,6 @@ class RedisConfig(Config):
self.redis_path = redis_config.get("path", None)
self.redis_dbid = redis_config.get("dbid", None)
self.redis_password = redis_config.get("password")
redis_password_path = redis_config.get("password_path")
if redis_password_path:
if self.redis_password:
raise ConfigError(CONFLICTING_PASSWORD_OPTS_ERROR)
self.redis_password = read_file(
redis_password_path,
(
"redis",
"password_path",
),
).strip()
self.redis_use_tls = redis_config.get("use_tls", False)
self.redis_certificate = redis_config.get("certificate_file", None)

View File

@@ -22,7 +22,7 @@
import logging
import os
from typing import Any, Dict, List, Tuple
from urllib.request import getproxies_environment
from urllib.request import getproxies_environment # type: ignore
import attr
@@ -272,7 +272,9 @@ class ContentRepositoryConfig(Config):
remote_media_lifetime
)
self.enable_authenticated_media = config.get("enable_authenticated_media", True)
self.enable_authenticated_media = config.get(
"enable_authenticated_media", False
)
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
assert data_dir_path is not None

View File

@@ -215,6 +215,9 @@ class HttpListenerConfig:
additional_resources: Dict[str, dict] = attr.Factory(dict)
tag: Optional[str] = None
request_id_header: Optional[str] = None
# If true, the listener will return CORS response headers compatible with MSC3886:
# https://github.com/matrix-org/matrix-spec-proposals/pull/3886
experimental_cors_msc3886: bool = False
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -332,14 +335,8 @@ class ServerConfig(Config):
logger.info("Using default public_baseurl %s", public_baseurl)
else:
self.serve_client_wellknown = True
# Ensure that public_baseurl ends with a trailing slash
if public_baseurl[-1] != "/":
public_baseurl += "/"
# Scrutinize user-provided config
if not isinstance(public_baseurl, str):
raise ConfigError("Must be a string", ("public_baseurl",))
self.public_baseurl = public_baseurl
# check that public_baseurl is valid
@@ -1007,6 +1004,7 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
additional_resources=listener.get("additional_resources", {}),
tag=listener.get("tag"),
request_id_header=listener.get("request_id_header"),
experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
)
if socket_path:

View File

@@ -566,7 +566,6 @@ def _is_membership_change_allowed(
logger.debug(
"_is_membership_change_allowed: %s",
{
"caller_membership": caller.membership if caller else None,
"caller_in_room": caller_in_room,
"caller_invited": caller_invited,
"caller_knocked": caller_knocked,
@@ -678,8 +677,7 @@ def _is_membership_change_allowed(
and join_rule == JoinRules.KNOCK_RESTRICTED
)
):
# You can only join the room if you are invited or are already in the room.
if not (caller_in_room or caller_invited):
if not caller_in_room and not caller_invited:
raise AuthError(403, "You are not invited to this room.")
else:
# TODO (erikj): may_join list

View File

@@ -42,7 +42,7 @@ import attr
from typing_extensions import Literal
from unpaddedbase64 import encode_base64
from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.constants import RelationTypes
from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
from synapse.synapse_rust.events import EventInternalMetadata
from synapse.types import JsonDict, StrCollection
@@ -325,17 +325,12 @@ class EventBase(metaclass=abc.ABCMeta):
def __repr__(self) -> str:
rejection = f"REJECTED={self.rejected_reason}, " if self.rejected_reason else ""
conditional_membership_string = ""
if self.get("type") == EventTypes.Member:
conditional_membership_string = f"membership={self.membership}, "
return (
f"<{self.__class__.__name__} "
f"{rejection}"
f"event_id={self.event_id}, "
f"type={self.get('type')}, "
f"state_key={self.get('state_key')}, "
f"{conditional_membership_string}"
f"outlier={self.internal_metadata.is_outlier()}"
">"
)

View File

@@ -66,67 +66,50 @@ class InviteAutoAccepter:
event: The incoming event.
"""
# Check if the event is an invite for a local user.
if (
event.type != EventTypes.Member
or event.is_state() is False
or event.membership != Membership.INVITE
or self._api.is_mine(event.state_key) is False
):
return
is_invite_for_local_user = (
event.type == EventTypes.Member
and event.is_state()
and event.membership == Membership.INVITE
and self._api.is_mine(event.state_key)
)
# Only accept invites for direct messages if the configuration mandates it.
is_direct_message = event.content.get("is_direct", False)
if (
self._config.accept_invites_only_for_direct_messages
and is_direct_message is False
):
return
is_allowed_by_direct_message_rules = (
not self._config.accept_invites_only_for_direct_messages
or is_direct_message is True
)
# Only accept invites from remote users if the configuration mandates it.
is_from_local_user = self._api.is_mine(event.sender)
if (
self._config.accept_invites_only_from_local_users
and is_from_local_user is False
):
return
# Check the user is activated.
recipient = await self._api.get_userinfo_by_id(event.state_key)
# Ignore if the user doesn't exist.
if recipient is None:
return
# Never accept invites for deactivated users.
if recipient.is_deactivated:
return
# Never accept invites for suspended users.
if recipient.suspended:
return
# Never accept invites for locked users.
if recipient.locked:
return
# Make the user join the room. We run this as a background process to circumvent a race condition
# that occurs when responding to invites over federation (see https://github.com/matrix-org/synapse-auto-accept-invite/issues/12)
run_as_background_process(
"retry_make_join",
self._retry_make_join,
event.state_key,
event.state_key,
event.room_id,
"join",
bg_start_span=False,
is_allowed_by_local_user_rules = (
not self._config.accept_invites_only_from_local_users
or is_from_local_user is True
)
if is_direct_message:
# Mark this room as a direct message!
await self._mark_room_as_direct_message(
event.state_key, event.sender, event.room_id
if (
is_invite_for_local_user
and is_allowed_by_direct_message_rules
and is_allowed_by_local_user_rules
):
# Make the user join the room. We run this as a background process to circumvent a race condition
# that occurs when responding to invites over federation (see https://github.com/matrix-org/synapse-auto-accept-invite/issues/12)
run_as_background_process(
"retry_make_join",
self._retry_make_join,
event.state_key,
event.state_key,
event.room_id,
"join",
bg_start_span=False,
)
if is_direct_message:
# Mark this room as a direct message!
await self._mark_room_as_direct_message(
event.state_key, event.sender, event.room_id
)
async def _mark_room_as_direct_message(
self, user_id: str, dm_user_id: str, room_id: str
) -> None:

View File

@@ -24,7 +24,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import attr
from signedjson.types import SigningKey
from synapse.api.constants import MAX_DEPTH, EventTypes
from synapse.api.constants import MAX_DEPTH
from synapse.api.room_versions import (
KNOWN_EVENT_FORMAT_VERSIONS,
EventFormatVersions,
@@ -109,19 +109,6 @@ class EventBuilder:
def is_state(self) -> bool:
return self._state_key is not None
def is_mine_id(self, user_id: str) -> bool:
"""Determines whether a user ID or room alias originates from this homeserver.
Returns:
`True` if the hostname part of the user ID or room alias matches this
homeserver.
`False` otherwise, or if the user ID or room alias is malformed.
"""
localpart_hostname = user_id.split(":", 1)
if len(localpart_hostname) < 2:
return False
return localpart_hostname[1] == self._hostname
async def build(
self,
prev_event_ids: List[str],
@@ -155,46 +142,6 @@ class EventBuilder:
self, state_ids
)
# Check for out-of-band membership that may have been exposed on `/sync` but
# the events have not been de-outliered yet so they won't be part of the
# room state yet.
#
# This helps in situations where a remote homeserver invites a local user to
# a room that we're already participating in; and we've persisted the invite
# as an out-of-band membership (outlier), but it hasn't been pushed to us as
# part of a `/send` transaction yet and de-outliered. This also helps for
# any of the other out-of-band membership transitions.
#
# As an optimization, we could check if the room state already includes a
# non-`leave` membership event, then we can assume the membership event has
# been de-outliered and we don't need to check for an out-of-band
# membership. But we don't have the necessary information from a
# `StateMap[str]` and we'll just have to take the hit of this extra lookup
# for any membership event for now.
if self.type == EventTypes.Member and self.is_mine_id(self.state_key):
(
_membership,
member_event_id,
) = await self._store.get_local_current_membership_for_user_in_room(
user_id=self.state_key,
room_id=self.room_id,
)
# There is no need to check if the membership is actually an
# out-of-band membership (`outlier`) as we would end up with the
# same result either way (adding the member event to the
# `auth_event_ids`).
if (
member_event_id is not None
# We only need to be careful about duplicating the event in the
# `auth_event_ids` list (duplicate `type`/`state_key` is part of the
# authorization rules)
and member_event_id not in auth_event_ids
):
auth_event_ids.append(member_event_id)
# Also make sure to point to the previous membership event that will
# allow this one to happen so the computed state works out.
prev_event_ids.append(member_event_id)
format_version = self.room_version.event_format
# The types of auth/prev events changes between event versions.
prev_events: Union[StrCollection, List[Tuple[str, Dict[str, str]]]]

View File

@@ -248,7 +248,7 @@ class EventContext(UnpersistedEventContextBase):
@tag_args
async def get_current_state_ids(
self, state_filter: Optional["StateFilter"] = None
) -> StateMap[str]:
) -> Optional[StateMap[str]]:
"""
Gets the room state map, including this event - ie, the state in ``state_group``
@@ -256,12 +256,13 @@ class EventContext(UnpersistedEventContextBase):
not make it into the room state. This method will raise an exception if
``rejected`` is set.
It is also an error to access this for an outlier event.
Arg:
state_filter: specifies the type of state event to fetch from DB, example: EventTypes.JoinRules
Returns:
Returns None if state_group is None, which happens when the associated
event is an outlier.
Maps a (type, state_key) to the event ID of the state event matching
this tuple.
"""
@@ -299,8 +300,7 @@ class EventContext(UnpersistedEventContextBase):
this tuple.
"""
if self.state_group_before_event is None:
return {}
assert self.state_group_before_event is not None
return await self._storage.state.get_state_ids_for_group(
self.state_group_before_event, state_filter
)

View File

@@ -140,6 +140,7 @@ from typing import (
Iterable,
List,
Optional,
Set,
Tuple,
)
@@ -169,13 +170,7 @@ from synapse.metrics.background_process_metrics import (
run_as_background_process,
wrap_as_background_process,
)
from synapse.types import (
JsonDict,
ReadReceipt,
RoomStreamToken,
StrCollection,
get_domain_from_id,
)
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection
from synapse.util import Clock
from synapse.util.metrics import Measure
from synapse.util.retryutils import filter_destinations_by_retry_limiter
@@ -302,10 +297,12 @@ class _DestinationWakeupQueue:
# being woken up.
_MAX_TIME_IN_QUEUE = 30.0
# The maximum duration in seconds between waking up consecutive destination
# queues.
_MAX_DELAY = 0.1
sender: "FederationSender" = attr.ib()
clock: Clock = attr.ib()
max_delay_s: int = attr.ib()
queue: "OrderedDict[str, Literal[None]]" = attr.ib(factory=OrderedDict)
processing: bool = attr.ib(default=False)
@@ -335,7 +332,7 @@ class _DestinationWakeupQueue:
# We also add an upper bound to the delay, to gracefully handle the
# case where the queue only has a few entries in it.
current_sleep_seconds = min(
self.max_delay_s, self._MAX_TIME_IN_QUEUE / len(self.queue)
self._MAX_DELAY, self._MAX_TIME_IN_QUEUE / len(self.queue)
)
while self.queue:
@@ -419,15 +416,20 @@ class FederationSender(AbstractFederationSender):
self._is_processing = False
self._last_poked_id = -1
self._external_cache = hs.get_external_cache()
# map from room_id to a set of PerDestinationQueues which we believe are
# awaiting a call to flush_read_receipts_for_room. The presence of an entry
# here for a given room means that we are rate-limiting RR flushes to that room,
# and that there is a pending call to _flush_rrs_for_room in the system.
self._queues_awaiting_rr_flush_by_room: Dict[str, Set[PerDestinationQueue]] = {}
rr_txn_interval_per_room_s = (
1.0 / hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
)
self._destination_wakeup_queue = _DestinationWakeupQueue(
self, self.clock, max_delay_s=rr_txn_interval_per_room_s
self._rr_txn_interval_per_room_ms = (
1000.0
/ hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
)
self._external_cache = hs.get_external_cache()
self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock)
# Regularly wake up destinations that have outstanding PDUs to be caught up
self.clock.looping_call_now(
run_as_background_process,
@@ -743,48 +745,37 @@ class FederationSender(AbstractFederationSender):
# Some background on the rate-limiting going on here.
#
# It turns out that if we attempt to send out RRs as soon as we get them
# from a client, then we end up trying to do several hundred Hz of
# federation transactions. (The number of transactions scales as O(N^2)
# on the size of a room, since in a large room we have both more RRs
# coming in, and more servers to send them to.)
# It turns out that if we attempt to send out RRs as soon as we get them from
# a client, then we end up trying to do several hundred Hz of federation
# transactions. (The number of transactions scales as O(N^2) on the size of a
# room, since in a large room we have both more RRs coming in, and more servers
# to send them to.)
#
# This leads to a lot of CPU load, and we end up getting behind. The
# solution currently adopted is to differentiate between receipts and
# destinations we should immediately send to, and those we can trickle
# the receipts to.
# This leads to a lot of CPU load, and we end up getting behind. The solution
# currently adopted is as follows:
#
# The current logic is to send receipts out immediately if:
# - the room is "small", i.e. there's only N servers to send receipts
# to, and so sending out the receipts immediately doesn't cause too
# much load; or
# - the receipt is for an event that happened recently, as users
# notice if receipts are delayed when they know other users are
# currently reading the room; or
# - the receipt is being sent to the server that sent the event, so
# that users see receipts for their own receipts quickly.
# The first receipt in a given room is sent out immediately, at time T0. Any
# further receipts are, in theory, batched up for N seconds, where N is calculated
# based on the number of servers in the room to achieve a transaction frequency
# of around 50Hz. So, for example, if there were 100 servers in the room, then
# N would be 100 / 50Hz = 2 seconds.
#
# For destinations that we should delay sending the receipt to, we queue
# the receipts up to be sent in the next transaction, but don't trigger
# a new transaction to be sent. We then add the destination to the
# `DestinationWakeupQueue`, which will slowly iterate over each
# destination and trigger a new transaction to be sent.
# Then, after T+N, we flush out any receipts that have accumulated, and restart
# the timer to flush out more receipts at T+2N, etc. If no receipts accumulate,
# we stop the cycle and go back to the start.
#
# However, in practice, it is often possible to send out delayed
# receipts earlier: in particular, if we are sending a transaction to a
# given server anyway (for example, because we have a PDU or a RR in
# another room to send), then we may as well send out all of the pending
# RRs for that server. So it may be that by the time we get to waking up
# the destination, we don't actually have any RRs left to send out.
# However, in practice, it is often possible to flush out receipts earlier: in
# particular, if we are sending a transaction to a given server anyway (for
# example, because we have a PDU or a RR in another room to send), then we may
# as well send out all of the pending RRs for that server. So it may be that
# by the time we get to T+N, we don't actually have any RRs left to send out.
# Nevertheless we continue to buffer up RRs for the room in question until we
# reach the point that no RRs arrive between timer ticks.
#
# For even more background, see
# https://github.com/matrix-org/synapse/issues/4730.
# For even more background, see https://github.com/matrix-org/synapse/issues/4730.
room_id = receipt.room_id
# Local read receipts always have 1 event ID.
event_id = receipt.event_ids[0]
# Work out which remote servers should be poked and poke them.
domains_set = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
@@ -806,51 +797,49 @@ class FederationSender(AbstractFederationSender):
if not domains:
return
# We now split which domains we want to wake up immediately vs which we
# want to delay waking up.
immediate_domains: StrCollection
delay_domains: StrCollection
queues_pending_flush = self._queues_awaiting_rr_flush_by_room.get(room_id)
if len(domains) < 10:
# For "small" rooms send to all domains immediately
immediate_domains = domains
delay_domains = ()
# if there is no flush yet scheduled, we will send out these receipts with
# immediate flushes, and schedule the next flush for this room.
if queues_pending_flush is not None:
logger.debug("Queuing receipt for: %r", domains)
else:
metadata = await self.store.get_metadata_for_event(
receipt.room_id, event_id
)
assert metadata is not None
logger.debug("Sending receipt to: %r", domains)
self._schedule_rr_flush_for_room(room_id, len(domains))
sender_domain = get_domain_from_id(metadata.sender)
for domain in domains:
queue = self._get_per_destination_queue(domain)
queue.queue_read_receipt(receipt)
if self.clock.time_msec() - metadata.received_ts < 60_000:
# We always send receipts for recent messages immediately
immediate_domains = domains
delay_domains = ()
# if there is already a RR flush pending for this room, then make sure this
# destination is registered for the flush
if queues_pending_flush is not None:
queues_pending_flush.add(queue)
else:
# Otherwise, we delay waking up all destinations except for the
# sender's domain.
immediate_domains = []
delay_domains = []
for domain in domains:
if domain == sender_domain:
immediate_domains.append(domain)
else:
delay_domains.append(domain)
queue.flush_read_receipts_for_room(room_id)
for domain in immediate_domains:
# Add to destination queue and wake the destination up
queue = self._get_per_destination_queue(domain)
queue.queue_read_receipt(receipt)
queue.attempt_new_transaction()
def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None:
# that is going to cause approximately len(domains) transactions, so now back
# off for that multiplied by RR_TXN_INTERVAL_PER_ROOM
backoff_ms = self._rr_txn_interval_per_room_ms * n_domains
for domain in delay_domains:
# Add to destination queue...
queue = self._get_per_destination_queue(domain)
queue.queue_read_receipt(receipt)
logger.debug("Scheduling RR flush in %s in %d ms", room_id, backoff_ms)
self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id)
self._queues_awaiting_rr_flush_by_room[room_id] = set()
# ... and schedule the destination to be woken up.
self._destination_wakeup_queue.add_to_queue(domain)
def _flush_rrs_for_room(self, room_id: str) -> None:
queues = self._queues_awaiting_rr_flush_by_room.pop(room_id)
logger.debug("Flushing RRs in %s to %s", room_id, queues)
if not queues:
# no more RRs arrived for this room; we are done.
return
# schedule the next flush
self._schedule_rr_flush_for_room(room_id, len(queues))
for queue in queues:
queue.flush_read_receipts_for_room(room_id)
async def send_presence_to_destinations(
self, states: Iterable[UserPresenceState], destinations: Iterable[str]

View File

@@ -156,6 +156,7 @@ class PerDestinationQueue:
# Each receipt can only have a single receipt per
# (room ID, receipt type, user ID, thread ID) tuple.
self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = []
self._rrs_pending_flush = False
# stream_id of last successfully sent to-device message.
# NB: may be a long or an int.
@@ -257,7 +258,15 @@ class PerDestinationQueue:
}
)
self.mark_new_data()
def flush_read_receipts_for_room(self, room_id: str) -> None:
# If there are any pending receipts for this room then force-flush them
# in a new transaction.
for edu in self._pending_receipt_edus:
if room_id in edu:
self._rrs_pending_flush = True
self.attempt_new_transaction()
# No use in checking remaining EDUs if the room was found.
break
def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
self._pending_edus_keyed[(edu.edu_type, key)] = edu
@@ -594,9 +603,12 @@ class PerDestinationQueue:
self._destination, last_successful_stream_ordering
)
def _get_receipt_edus(self, limit: int) -> Iterable[Edu]:
def _get_receipt_edus(self, force_flush: bool, limit: int) -> Iterable[Edu]:
if not self._pending_receipt_edus:
return
if not force_flush and not self._rrs_pending_flush:
# not yet time for this lot
return
# Send at most limit EDUs for receipts.
for content in self._pending_receipt_edus[:limit]:
@@ -735,7 +747,7 @@ class _TransactionQueueManager:
)
# Add read receipt EDUs.
pending_edus.extend(self.queue._get_receipt_edus(limit=5))
pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
edu_limit = MAX_EDUS_PER_TRANSACTION - len(pending_edus)
# Next, prioritize to-device messages so that existing encryption channels
@@ -783,6 +795,13 @@ class _TransactionQueueManager:
if not self._pdus and not pending_edus:
return [], []
# if we've decided to send a transaction anyway, and we have room, we
# may as well send any pending RRs
if edu_limit:
pending_edus.extend(
self.queue._get_receipt_edus(force_flush=True, limit=edu_limit)
)
if self._pdus:
self._last_stream_ordering = self._pdus[
-1

View File

@@ -113,7 +113,7 @@ class Authenticator:
):
raise AuthenticationError(
HTTPStatus.UNAUTHORIZED,
f"Destination mismatch in auth header, received: {destination!r}",
"Destination mismatch in auth header",
Codes.UNAUTHORIZED,
)
if (

View File

@@ -509,9 +509,6 @@ class FederationV2InviteServlet(BaseFederationServerServlet):
event = content["event"]
invite_room_state = content.get("invite_room_state", [])
if not isinstance(invite_room_state, list):
invite_room_state = []
# Synapse expects invite_room_state to be in unsigned, as it is in v1
# API

View File

@@ -73,8 +73,6 @@ class AdminHandler:
self._redact_all_events, REDACT_ALL_EVENTS_ACTION_NAME
)
self.hs = hs
async def get_redact_task(self, redact_id: str) -> Optional[ScheduledTask]:
"""Get the current status of an active redaction process
@@ -124,7 +122,6 @@ class AdminHandler:
"consent_ts": user_info.consent_ts,
"user_type": user_info.user_type,
"is_guest": user_info.is_guest,
"suspended": user_info.suspended,
}
if self._msc3866_enabled:
@@ -426,10 +423,8 @@ class AdminHandler:
user_id = task.params.get("user_id")
assert user_id is not None
# puppet the user if they're ours, otherwise use admin to redact
requester = create_requester(
user_id if self.hs.is_mine_id(user_id) else admin.user.to_string(),
authenticated_entity=admin.user.to_string(),
user_id, authenticated_entity=admin.user.to_string()
)
reason = task.params.get("reason")
@@ -448,8 +443,8 @@ class AdminHandler:
["m.room.member", "m.room.message"],
)
if not event_ids:
# nothing to redact in this room
continue
# there's nothing to redact
return TaskStatus.COMPLETE, result, None
events = await self._store.get_events_as_list(event_ids)
for event in events:
@@ -473,7 +468,7 @@ class AdminHandler:
"type": EventTypes.Redaction,
"content": {"reason": reason} if reason else {},
"room_id": room,
"sender": requester.user.to_string(),
"sender": user_id,
}
if room_version.updated_redaction_rules:
event_dict["content"]["redacts"] = event.event_id

View File

@@ -896,10 +896,10 @@ class ApplicationServicesHandler:
results = await make_deferred_yieldable(
defer.DeferredList(
[
run_in_background( # type: ignore[call-overload]
run_in_background(
self.appservice_api.claim_client_keys,
# We know this must be an app service.
self.store.get_app_service_by_id(service_id),
self.store.get_app_service_by_id(service_id), # type: ignore[arg-type]
service_query,
)
for service_id, service_query in query_by_appservice.items()
@@ -952,10 +952,10 @@ class ApplicationServicesHandler:
results = await make_deferred_yieldable(
defer.DeferredList(
[
run_in_background( # type: ignore[call-overload]
run_in_background(
self.appservice_api.query_keys,
# We know this must be an app service.
self.store.get_app_service_by_id(service_id),
self.store.get_app_service_by_id(service_id), # type: ignore[arg-type]
service_query,
)
for service_id, service_query in query_by_appservice.items()

View File

@@ -1,17 +1,3 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2024 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import logging
from typing import TYPE_CHECKING, List, Optional, Set, Tuple

View File

@@ -48,7 +48,6 @@ from synapse.metrics.background_process_metrics import (
wrap_as_background_process,
)
from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo
from synapse.storage.databases.main.roommember import EventIdMembership
from synapse.storage.databases.main.state_deltas import StateDelta
from synapse.types import (
DeviceListUpdates,
@@ -223,6 +222,7 @@ class DeviceWorkerHandler:
return changed
@trace
@measure_func("device.get_user_ids_changed")
@cancellable
async def get_user_ids_changed(
self, user_id: str, from_token: StreamToken
@@ -290,11 +290,9 @@ class DeviceWorkerHandler:
memberships_to_fetch.add(delta.prev_event_id)
# Fetch all the memberships for the membership events
event_id_to_memberships: Mapping[str, Optional[EventIdMembership]] = {}
if memberships_to_fetch:
event_id_to_memberships = await self.store.get_membership_from_event_ids(
memberships_to_fetch
)
event_id_to_memberships = await self.store.get_membership_from_event_ids(
memberships_to_fetch
)
joined_invited_knocked = (
Membership.JOIN,
@@ -351,6 +349,7 @@ class DeviceWorkerHandler:
return device_list_updates
@measure_func("_generate_sync_entry_for_device_list")
async def generate_sync_entry_for_device_list(
self,
user_id: str,
@@ -729,40 +728,6 @@ class DeviceHandler(DeviceWorkerHandler):
await self.notify_device_update(user_id, device_ids)
async def upsert_device(
self, user_id: str, device_id: str, display_name: Optional[str] = None
) -> bool:
"""Create or update a device
Args:
user_id: The user to update devices of.
device_id: The device to update.
display_name: The new display name for this device.
Returns:
True if the device was created, False if it was updated.
"""
# Reject a new displayname which is too long.
self._check_device_name_length(display_name)
created = await self.store.store_device(
user_id,
device_id,
initial_device_display_name=display_name,
)
if not created:
await self.store.update_device(
user_id,
device_id,
new_display_name=display_name,
)
await self.notify_device_update(user_id, [device_id])
return created
async def update_device(self, user_id: str, device_id: str, content: dict) -> None:
"""Update the given device

Some files were not shown because too many files have changed in this diff Show More