mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-05 01:10:13 +00:00
Compare commits
73 Commits
quenting/l
...
anoa/cache
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e7ca69d6b2 | ||
|
|
90387cc45f | ||
|
|
c6534e66b1 | ||
|
|
a096967507 | ||
|
|
3ec9f164f2 | ||
|
|
b4ff74c2f8 | ||
|
|
21c2c0403c | ||
|
|
1ea7cf0c57 | ||
|
|
6ffd8b9119 | ||
|
|
325632fdfc | ||
|
|
ce8b0747ba | ||
|
|
84d64251dc | ||
|
|
2bed3fb566 | ||
|
|
2c60b67a95 | ||
|
|
6358afff8d | ||
|
|
f7b547e2d8 | ||
|
|
8f7bd946de | ||
|
|
4f80fa4b0a | ||
|
|
b2592667a4 | ||
|
|
769d30a247 | ||
|
|
7ecfe8b1a8 | ||
|
|
e1036ffa48 | ||
|
|
8c98cf7e55 | ||
|
|
ec64c3e88d | ||
|
|
ada3a3b2b3 | ||
|
|
9cc4001778 | ||
|
|
c68c5dd07b | ||
|
|
92bdf77c3f | ||
|
|
e43bf10187 | ||
|
|
6146dbad3e | ||
|
|
ca655e4020 | ||
|
|
7951d41b4e | ||
|
|
e235099ab9 | ||
|
|
3e865e403b | ||
|
|
35e7e659f6 | ||
|
|
39e4f27347 | ||
|
|
6fe8137a4a | ||
|
|
fcffd2e897 | ||
|
|
d48e69ad4c | ||
|
|
74fdbc7b75 | ||
|
|
4d55f2f301 | ||
|
|
dfccde9f60 | ||
|
|
4b43e6fe02 | ||
|
|
b2997a8f20 | ||
|
|
bff4a11b3f | ||
|
|
09a489e198 | ||
|
|
537e14169e | ||
|
|
68068de3a4 | ||
|
|
356cc4a0a1 | ||
|
|
27fc3389f3 | ||
|
|
df2cfb3932 | ||
|
|
c339021ce8 | ||
|
|
499f947c67 | ||
|
|
e76a9af4d7 | ||
|
|
eec1ca6e93 | ||
|
|
56b5759c0f | ||
|
|
767177ca5a | ||
|
|
5b8e6e7911 | ||
|
|
6a6be6fbe2 | ||
|
|
21c7841228 | ||
|
|
5b55e3f15d | ||
|
|
0e2b92bcbc | ||
|
|
481987eb83 | ||
|
|
5fd30c7ea7 | ||
|
|
d527c794fb | ||
|
|
19fe3f001e | ||
|
|
f8a44638eb | ||
|
|
7ec5e60671 | ||
|
|
48184eefa3 | ||
|
|
205d9e4fc4 | ||
|
|
40edb10a98 | ||
|
|
3d7e39b2ea | ||
|
|
c51da9bac0 |
12
.github/workflows/docker.yml
vendored
12
.github/workflows/docker.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Extract version from pyproject.toml
|
||||
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
|
||||
@@ -41,13 +41,13 @@ jobs:
|
||||
echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -102,14 +102,14 @@ jobs:
|
||||
merge-multiple: true
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
if: ${{ startsWith(matrix.repository, 'docker.io') }}
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
if: ${{ startsWith(matrix.repository, 'ghcr.io') }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
@@ -120,7 +120,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # v3.9.2
|
||||
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # v3.10.0
|
||||
|
||||
- name: Calculate docker image tag
|
||||
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
|
||||
|
||||
6
.github/workflows/docs-pr.yaml
vendored
6
.github/workflows/docs-pr.yaml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
# Fetch all history so that the schema_versions script works.
|
||||
fetch-depth: 0
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
name: Check links in documentation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0
|
||||
|
||||
4
.github/workflows/docs.yaml
vendored
4
.github/workflows/docs.yaml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
needs:
|
||||
- pre
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
# Fetch all history so that the schema_versions script works.
|
||||
fetch-depth: 0
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
|
||||
4
.github/workflows/fix_lint.yaml
vendored
4
.github/workflows/fix_lint.yaml
vendored
@@ -18,10 +18,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
components: clippy, rustfmt
|
||||
|
||||
20
.github/workflows/latest_deps.yml
vendored
20
.github/workflows/latest_deps.yml
vendored
@@ -42,9 +42,9 @@ jobs:
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -77,10 +77,10 @@ jobs:
|
||||
postgres-version: "14"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -93,7 +93,7 @@ jobs:
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: pip install .[all,test]
|
||||
@@ -152,10 +152,10 @@ jobs:
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -202,14 +202,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out synapse codebase
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
@@ -234,7 +234,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
4
.github/workflows/poetry_lockfile.yaml
vendored
4
.github/workflows/poetry_lockfile.yaml
vendored
@@ -16,8 +16,8 @@ jobs:
|
||||
name: "Check locked dependencies have sdists"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- run: pip install tomli
|
||||
|
||||
8
.github/workflows/push_complement_image.yml
vendored
8
.github/workflows/push_complement_image.yml
vendored
@@ -33,22 +33,22 @@ jobs:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout specific branch (debug build)
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- name: Checkout clean copy of develop (scheduled build)
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
ref: develop
|
||||
- name: Checkout clean copy of master (on-push)
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
ref: master
|
||||
- name: Login to registry
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
|
||||
16
.github/workflows/release-artifacts.yml
vendored
16
.github/workflows/release-artifacts.yml
vendored
@@ -27,8 +27,8 @@ jobs:
|
||||
name: "Calculate list of debian distros"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- id: set-distros
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
path: src
|
||||
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Set up python
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
@@ -132,9 +132,9 @@ jobs:
|
||||
os: "ubuntu-24.04-arm"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
|
||||
# here, because `python` on osx points to Python 2.7.
|
||||
@@ -165,8 +165,8 @@ jobs:
|
||||
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
|
||||
8
.github/workflows/schema.yaml
vendored
8
.github/workflows/schema.yaml
vendored
@@ -14,8 +14,8 @@ jobs:
|
||||
name: Ensure Synapse config schema is valid
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- name: Install check-jsonschema
|
||||
@@ -40,8 +40,8 @@ jobs:
|
||||
name: Ensure generated documentation is up-to-date
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- name: Install PyYAML
|
||||
|
||||
86
.github/workflows/tests.yml
vendored
86
.github/workflows/tests.yml
vendored
@@ -86,9 +86,9 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -106,8 +106,8 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
@@ -116,8 +116,8 @@ jobs:
|
||||
check-lockfile:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: .ci/scripts/check_lockfile.py
|
||||
@@ -129,7 +129,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
@@ -151,10 +151,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -187,7 +187,7 @@ jobs:
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Check line endings
|
||||
run: scripts-dev/check_line_terminators.sh
|
||||
|
||||
@@ -195,11 +195,11 @@ jobs:
|
||||
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
@@ -213,11 +213,11 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -233,10 +233,10 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
components: clippy
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
@@ -252,10 +252,10 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: nightly-2025-04-23
|
||||
components: clippy
|
||||
@@ -270,10 +270,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -306,10 +306,10 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
# We use nightly so that we can use some unstable options that we use in
|
||||
# `.rustfmt.toml`.
|
||||
@@ -326,8 +326,8 @@ jobs:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install rstcheck"
|
||||
@@ -376,8 +376,8 @@ jobs:
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- id: get-matrix
|
||||
@@ -397,7 +397,7 @@ jobs:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
@@ -412,7 +412,7 @@ jobs:
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -453,10 +453,10 @@ jobs:
|
||||
- changes
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -468,7 +468,7 @@ jobs:
|
||||
sudo apt-get -qq install build-essential libffi-dev python3-dev \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: '3.9'
|
||||
|
||||
@@ -518,7 +518,7 @@ jobs:
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
# Install libs necessary for PyPy to build binary wheels for dependencies
|
||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
@@ -568,12 +568,12 @@ jobs:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -615,7 +615,7 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
@@ -659,7 +659,7 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Add PostgreSQL apt repository
|
||||
# We need a version of pg_dump that can handle the version of
|
||||
# PostgreSQL being tested against. The Ubuntu package repository lags
|
||||
@@ -714,12 +714,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout synapse codebase
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -727,7 +727,7 @@ jobs:
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
@@ -750,10 +750,10 @@ jobs:
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -770,10 +770,10 @@ jobs:
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
|
||||
2
.github/workflows/triage_labelled.yml
vendored
2
.github/workflows/triage_labelled.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
if: >
|
||||
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
|
||||
steps:
|
||||
- uses: actions/add-to-project@c0c5949b017d0d4a39f7ba888255881bdac2a823 # v1.0.2
|
||||
- uses: actions/add-to-project@4515659e2b458b27365e167605ac44f219494b66 # v1.0.2
|
||||
id: add_project
|
||||
with:
|
||||
project-url: "https://github.com/orgs/matrix-org/projects/67"
|
||||
|
||||
18
.github/workflows/twisted_trunk.yml
vendored
18
.github/workflows/twisted_trunk.yml
vendored
@@ -43,10 +43,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -70,11 +70,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -117,10 +117,10 @@ jobs:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -175,14 +175,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v4 for synapse
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
@@ -217,7 +217,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
92
CHANGES.md
92
CHANGES.md
@@ -1,3 +1,95 @@
|
||||
# Synapse 1.138.0 (2025-09-09)
|
||||
|
||||
No significant changes since 1.138.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.138.0rc1 (2025-09-02)
|
||||
|
||||
### Features
|
||||
|
||||
- Support for the stable endpoint and scopes of [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) & co. ([\#18549](https://github.com/element-hq/synapse/issues/18549))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Improve database performance of [MSC4293](https://github.com/matrix-org/matrix-spec-proposals/pull/4293) - Redact on Kick/Ban. ([\#18851](https://github.com/element-hq/synapse/issues/18851))
|
||||
- Do not throw an error when fetching a rejected delayed state event on startup. ([\#18858](https://github.com/element-hq/synapse/issues/18858))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Fix worker documentation incorrectly indicating all room Admin API requests were capable of being handled by workers. ([\#18853](https://github.com/element-hq/synapse/issues/18853))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Instrument `_ByteProducer` with tracing to measure potential dead time while writing bytes to the request. ([\#18804](https://github.com/element-hq/synapse/issues/18804))
|
||||
- Switch to OpenTracing's `ContextVarsScopeManager` instead of our own custom `LogContextScopeManager`. ([\#18849](https://github.com/element-hq/synapse/issues/18849))
|
||||
- Trace how much work is being done while "recursively fetching redactions". ([\#18854](https://github.com/element-hq/synapse/issues/18854))
|
||||
- Link [upstream Twisted bug](https://github.com/twisted/twisted/issues/12498) tracking the problem that explains why we have to use a `Producer` to write bytes to the request. ([\#18855](https://github.com/element-hq/synapse/issues/18855))
|
||||
- Introduce `EventPersistencePair` type. ([\#18857](https://github.com/element-hq/synapse/issues/18857))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump actions/add-to-project from c0c5949b017d0d4a39f7ba888255881bdac2a823 to 4515659e2b458b27365e167605ac44f219494b66. ([\#18863](https://github.com/element-hq/synapse/issues/18863))
|
||||
* Bump actions/checkout from 4.3.0 to 5.0.0. ([\#18834](https://github.com/element-hq/synapse/issues/18834))
|
||||
* Bump anyhow from 1.0.98 to 1.0.99. ([\#18841](https://github.com/element-hq/synapse/issues/18841))
|
||||
* Bump docker/login-action from 3.4.0 to 3.5.0. ([\#18835](https://github.com/element-hq/synapse/issues/18835))
|
||||
* Bump dtolnay/rust-toolchain from b3b07ba8b418998c39fb20f53e8b695cdcc8de1b to e97e2d8cc328f1b50210efc529dca0028893a2d9. ([\#18862](https://github.com/element-hq/synapse/issues/18862))
|
||||
* Bump phonenumbers from 9.0.11 to 9.0.12. ([\#18837](https://github.com/element-hq/synapse/issues/18837))
|
||||
* Bump regex from 1.11.1 to 1.11.2. ([\#18864](https://github.com/element-hq/synapse/issues/18864))
|
||||
* Bump reqwest from 0.12.22 to 0.12.23. ([\#18842](https://github.com/element-hq/synapse/issues/18842))
|
||||
* Bump ruff from 0.12.7 to 0.12.10. ([\#18865](https://github.com/element-hq/synapse/issues/18865))
|
||||
* Bump serde_json from 1.0.142 to 1.0.143. ([\#18866](https://github.com/element-hq/synapse/issues/18866))
|
||||
* Bump types-bleach from 6.2.0.20250514 to 6.2.0.20250809. ([\#18838](https://github.com/element-hq/synapse/issues/18838))
|
||||
* Bump types-jsonschema from 4.25.0.20250720 to 4.25.1.20250822. ([\#18867](https://github.com/element-hq/synapse/issues/18867))
|
||||
* Bump types-psycopg2 from 2.9.21.20250718 to 2.9.21.20250809. ([\#18836](https://github.com/element-hq/synapse/issues/18836))
|
||||
|
||||
# Synapse 1.137.0 (2025-08-26)
|
||||
|
||||
No significant changes since 1.137.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.137.0rc1 (2025-08-19)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix a bug which could corrupt auth chains making it impossible to perform state resolution. ([\#18746](https://github.com/element-hq/synapse/issues/18746))
|
||||
- Fix error message in `register_new_matrix_user` utility script for empty `registration_shared_secret`. ([\#18780](https://github.com/element-hq/synapse/issues/18780))
|
||||
- Allow enabling [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108) when the stable Matrix Authentication Service integration is enabled. ([\#18832](https://github.com/element-hq/synapse/issues/18832))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Include IPv6 networks in `denied-peer-ips` of coturn setup. Contributed by @litetex. ([\#18781](https://github.com/element-hq/synapse/issues/18781))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Update tests to ensure all database tables are emptied when purging a room. ([\#18794](https://github.com/element-hq/synapse/issues/18794))
|
||||
- Instrument the `encode_response` part of Sliding Sync requests for more complete traces in Jaeger. ([\#18815](https://github.com/element-hq/synapse/issues/18815))
|
||||
- Tag Sliding Sync traces when we `wait_for_events`. ([\#18816](https://github.com/element-hq/synapse/issues/18816))
|
||||
- Fix `portdb` CI by hardcoding the new `pg_dump` restrict key that was added due to [CVE-2025-8714](https://nvd.nist.gov/vuln/detail/cve-2025-8714). ([\#18824](https://github.com/element-hq/synapse/issues/18824))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump actions/add-to-project from 5b1a254a3546aef88e0a7724a77a623fa2e47c36 to 0c37450c4be3b6a7582b2fb013c9ebfd9c8e9300. ([\#18557](https://github.com/element-hq/synapse/issues/18557))
|
||||
* Bump actions/cache from 4.2.3 to 4.2.4. ([\#18799](https://github.com/element-hq/synapse/issues/18799))
|
||||
* Bump actions/checkout from 4.2.2 to 4.3.0. ([\#18800](https://github.com/element-hq/synapse/issues/18800))
|
||||
* Bump actions/download-artifact from 4.3.0 to 5.0.0. ([\#18801](https://github.com/element-hq/synapse/issues/18801))
|
||||
* Bump docker/metadata-action from 5.7.0 to 5.8.0. ([\#18773](https://github.com/element-hq/synapse/issues/18773))
|
||||
* Bump mypy from 1.16.1 to 1.17.1. ([\#18775](https://github.com/element-hq/synapse/issues/18775))
|
||||
* Bump phonenumbers from 9.0.10 to 9.0.11. ([\#18797](https://github.com/element-hq/synapse/issues/18797))
|
||||
* Bump pygithub from 2.6.1 to 2.7.0. ([\#18779](https://github.com/element-hq/synapse/issues/18779))
|
||||
* Bump serde_json from 1.0.141 to 1.0.142. ([\#18776](https://github.com/element-hq/synapse/issues/18776))
|
||||
* Bump slab from 0.4.10 to 0.4.11. ([\#18809](https://github.com/element-hq/synapse/issues/18809))
|
||||
* Bump tokio from 1.47.0 to 1.47.1. ([\#18774](https://github.com/element-hq/synapse/issues/18774))
|
||||
* Bump types-pyyaml from 6.0.12.20250516 to 6.0.12.20250809. ([\#18798](https://github.com/element-hq/synapse/issues/18798))
|
||||
* Bump types-setuptools from 80.9.0.20250529 to 80.9.0.20250809. ([\#18796](https://github.com/element-hq/synapse/issues/18796))
|
||||
|
||||
# Synapse 1.136.0 (2025-08-12)
|
||||
|
||||
Note: This release includes the security fixes from `1.135.2` and `1.136.0rc2`, detailed below.
|
||||
|
||||
39
Cargo.lock
generated
39
Cargo.lock
generated
@@ -28,9 +28,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.98"
|
||||
version = "1.0.99"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
|
||||
checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -753,9 +753,9 @@ checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.27"
|
||||
version = "0.4.28"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||
checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"
|
||||
|
||||
[[package]]
|
||||
name = "lru-slab"
|
||||
@@ -1062,9 +1062,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.11.1"
|
||||
version = "1.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
|
||||
checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -1091,9 +1091,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
version = "0.12.22"
|
||||
version = "0.12.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531"
|
||||
checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"bytes",
|
||||
@@ -1250,18 +1250,28 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.219"
|
||||
version = "1.0.224"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
|
||||
checksum = "6aaeb1e94f53b16384af593c71e20b095e958dab1d26939c1b70645c5cfbcc0b"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_core"
|
||||
version = "1.0.224"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32f39390fa6346e24defbcdd3d9544ba8a19985d0af74df8501fbfe9a64341ab"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.219"
|
||||
version = "1.0.224"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
||||
checksum = "87ff78ab5e8561c9a675bfc1785cb07ae721f0ee53329a595cefd8c04c2ac4e0"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -1270,14 +1280,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.142"
|
||||
version = "1.0.145"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7"
|
||||
checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
"ryu",
|
||||
"serde",
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
1
changelog.d/18583.removal
Normal file
1
changelog.d/18583.removal
Normal file
@@ -0,0 +1 @@
|
||||
Remove obsolete and experimental `/sync/e2ee` endpoint.
|
||||
1
changelog.d/18641.bugfix
Normal file
1
changelog.d/18641.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Ensure all PDUs sent via `/send` pass canonical JSON checks.
|
||||
1
changelog.d/18695.feature
Normal file
1
changelog.d/18695.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add experimental support for [MSC4308: Thread Subscriptions extension to Sliding Sync](https://github.com/matrix-org/matrix-spec-proposals/pull/4308) when [MSC4306: Thread Subscriptions](https://github.com/matrix-org/matrix-spec-proposals/pull/4306) and [MSC4186: Simplified Sliding Sync](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) are enabled.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug which could corrupt auth chains making it impossible to perform state resolution.
|
||||
@@ -1 +0,0 @@
|
||||
Fix error message in `register_new_matrix_user` utility script for empty `registration_shared_secret`.
|
||||
@@ -1 +0,0 @@
|
||||
Include IPv6 networks in denied-peer-ips of coturn setup. Contributed by @litetex.
|
||||
1
changelog.d/18791.misc
Normal file
1
changelog.d/18791.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix `LaterGauge` metrics to collect from all servers.
|
||||
@@ -1 +0,0 @@
|
||||
Update tests to ensure all database tables are emptied when purging a room.
|
||||
@@ -1 +0,0 @@
|
||||
Instrument the `encode_response` part of Sliding Sync requests for more complete traces in Jaeger.
|
||||
@@ -1 +0,0 @@
|
||||
Tag Sliding Sync traces when we `wait_for_events`.
|
||||
1
changelog.d/18819.misc
Normal file
1
changelog.d/18819.misc
Normal file
@@ -0,0 +1 @@
|
||||
Configure Synapse to run MSC4306: Thread Subscriptions Complement tests.
|
||||
1
changelog.d/18823.bugfix
Normal file
1
changelog.d/18823.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix bug where we did not send invite revocations over federation.
|
||||
@@ -1 +0,0 @@
|
||||
Fix portdb CI by hardcoding the new pg_dump restrict key that was added due to CVE-2025-8714.
|
||||
@@ -1 +0,0 @@
|
||||
Allow enabling MSC4108 when the stable Matrix Authentication Service integration is enabled.
|
||||
1
changelog.d/18846.feature
Normal file
1
changelog.d/18846.feature
Normal file
@@ -0,0 +1 @@
|
||||
Update push rules for experimental [MSC4306: Thread Subscriptions](https://github.com/matrix-org/matrix-doc/issues/4306) to follow newer draft.
|
||||
1
changelog.d/18848.feature
Normal file
1
changelog.d/18848.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add `get_media_upload_limits_for_user` and `on_media_upload_limit_exceeded` module API callbacks for media repository.
|
||||
1
changelog.d/18856.doc
Normal file
1
changelog.d/18856.doc
Normal file
@@ -0,0 +1 @@
|
||||
Clarify Python dependency constraints in our deprecation policy.
|
||||
1
changelog.d/18870.misc
Normal file
1
changelog.d/18870.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove `sentinel` logcontext usage where we log in `setup`, `start` and exit.
|
||||
1
changelog.d/18874.misc
Normal file
1
changelog.d/18874.misc
Normal file
@@ -0,0 +1 @@
|
||||
Use the `Enum`'s value for the dictionary key when responding to an admin request for experimental features.
|
||||
1
changelog.d/18875.bugfix
Normal file
1
changelog.d/18875.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix prefixed support for MSC4133.
|
||||
1
changelog.d/18878.docker
Normal file
1
changelog.d/18878.docker
Normal file
@@ -0,0 +1 @@
|
||||
Suppress "Applying schema" log noise bulk when `SYNAPSE_LOG_TESTING` is set.
|
||||
1
changelog.d/18886.misc
Normal file
1
changelog.d/18886.misc
Normal file
@@ -0,0 +1 @@
|
||||
Start background tasks after we fork the process (daemonize).
|
||||
1
changelog.d/18899.feature
Normal file
1
changelog.d/18899.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add an in-memory cache to `_get_e2e_cross_signing_signatures_for_devices` to reduce DB load.
|
||||
1
changelog.d/18900.misc
Normal file
1
changelog.d/18900.misc
Normal file
@@ -0,0 +1 @@
|
||||
Better explain how we manage the logcontext in `run_in_background(...)` and `run_as_background_process(...)`.
|
||||
1
changelog.d/18906.misc
Normal file
1
changelog.d/18906.misc
Normal file
@@ -0,0 +1 @@
|
||||
Better explain how we manage the logcontext in `run_in_background(...)` and `run_as_background_process(...)`.
|
||||
1
changelog.d/18910.misc
Normal file
1
changelog.d/18910.misc
Normal file
@@ -0,0 +1 @@
|
||||
Replace usages of the deprecated `pkg_resources` interface in preparation of setuptools dropping it soon.
|
||||
24
debian/changelog
vendored
24
debian/changelog
vendored
@@ -1,3 +1,27 @@
|
||||
matrix-synapse-py3 (1.138.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.138.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 09 Sep 2025 11:21:25 +0100
|
||||
|
||||
matrix-synapse-py3 (1.138.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.138.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Sep 2025 12:16:14 +0000
|
||||
|
||||
matrix-synapse-py3 (1.137.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.137.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Aug 2025 10:23:41 +0100
|
||||
|
||||
matrix-synapse-py3 (1.137.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.137.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Aug 2025 10:55:22 +0100
|
||||
|
||||
matrix-synapse-py3 (1.136.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.136.0.
|
||||
|
||||
@@ -133,6 +133,8 @@ experimental_features:
|
||||
msc3984_appservice_key_query: true
|
||||
# Invite filtering
|
||||
msc4155_enabled: true
|
||||
# Thread Subscriptions
|
||||
msc4306_enabled: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
|
||||
@@ -77,6 +77,13 @@ loggers:
|
||||
#}
|
||||
synapse.visibility.filtered_event_debug:
|
||||
level: DEBUG
|
||||
|
||||
{#
|
||||
If Synapse is under test, we don't care about seeing the "Applying schema" log
|
||||
lines at the INFO level every time we run the tests (it's 100 lines of bulk)
|
||||
#}
|
||||
synapse.storage.prepare_database:
|
||||
level: WARN
|
||||
{% endif %}
|
||||
|
||||
root:
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
Deprecation Policy for Platform Dependencies
|
||||
============================================
|
||||
# Deprecation Policy
|
||||
|
||||
Synapse has a number of platform dependencies, including Python, Rust,
|
||||
PostgreSQL and SQLite. This document outlines the policy towards which versions
|
||||
we support, and when we drop support for versions in the future.
|
||||
Synapse has a number of **platform dependencies** (Python, Rust, PostgreSQL, and SQLite)
|
||||
and **application dependencies** (Python and Rust packages). This document outlines the
|
||||
policy towards which versions we support, and when we drop support for versions in the
|
||||
future.
|
||||
|
||||
|
||||
Policy
|
||||
------
|
||||
## Platform Dependencies
|
||||
|
||||
Synapse follows the upstream support life cycles for Python and PostgreSQL,
|
||||
i.e. when a version reaches End of Life Synapse will withdraw support for that
|
||||
@@ -26,8 +24,8 @@ The oldest supported version of SQLite is the version
|
||||
[provided](https://packages.debian.org/bullseye/libsqlite3-0) by
|
||||
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
|
||||
|
||||
Context
|
||||
-------
|
||||
|
||||
### Context
|
||||
|
||||
It is important for system admins to have a clear understanding of the platform
|
||||
requirements of Synapse and its deprecation policies so that they can
|
||||
@@ -50,4 +48,42 @@ the ecosystem.
|
||||
On a similar note, SQLite does not generally have a concept of "supported
|
||||
release"; bugfixes are published for the latest minor release only. We chose to
|
||||
track Debian's oldstable as this is relatively conservative, predictably updated
|
||||
and is consistent with the `.deb` packages released by Matrix.org.
|
||||
and is consistent with the `.deb` packages released by Matrix.org.
|
||||
|
||||
|
||||
## Application dependencies
|
||||
|
||||
For application-level Python dependencies, we often specify loose version constraints
|
||||
(ex. `>=X.Y.Z`) to be forwards compatible with any new versions. Upper bounds (`<A.B.C`)
|
||||
are only added when necessary to prevent known incompatibilities.
|
||||
|
||||
When selecting a minimum version, while we are mindful of the impact on downstream
|
||||
package maintainers, our primary focus is on the maintainability and progress of Synapse
|
||||
itself.
|
||||
|
||||
For developers, a Python dependency version can be considered a "no-brainer" upgrade once it is
|
||||
available in both the latest [Debian Stable](https://packages.debian.org/stable/) and
|
||||
[Ubuntu LTS](https://launchpad.net/ubuntu) repositories. No need to burden yourself with
|
||||
extra scrutiny or consideration at this point.
|
||||
|
||||
We aggressively update Rust dependencies. Since these are statically linked and managed
|
||||
entirely by `cargo` during build, they *can* pose no ongoing maintenance burden on others.
|
||||
This allows us to freely upgrade to leverage the latest ecosystem advancements assuming
|
||||
they don't have their own system-level dependencies.
|
||||
|
||||
|
||||
### Context
|
||||
|
||||
Because Python dependencies can easily be managed in a virtual environment, we are less
|
||||
concerned about the criteria for selecting minimum versions. The only thing of concern
|
||||
is making sure we're not making it unnecessarily difficult for downstream package
|
||||
maintainers. Generally, this just means avoiding the bleeding edge for a few months.
|
||||
|
||||
The situation for Rust dependencies is fundamentally different. For packagers, the
|
||||
concerns around Python dependency versions do not apply. The `cargo` tool handles
|
||||
downloading and building all libraries to satisfy dependencies, and these libraries are
|
||||
statically linked into the final binary. This means that from a packager's perspective,
|
||||
the Rust dependency versions are an internal build detail, not a runtime dependency to
|
||||
be managed on the target system. Consequently, we have even greater flexibility to
|
||||
upgrade Rust dependencies as needed for the project. Some distros (e.g. Fedora) do
|
||||
package Rust libraries, but this appears to be the outlier rather than the norm.
|
||||
|
||||
@@ -59,6 +59,28 @@ def do_request_handling():
|
||||
logger.debug("phew")
|
||||
```
|
||||
|
||||
### The `sentinel` context
|
||||
|
||||
The default logcontext is `synapse.logging.context.SENTINEL_CONTEXT`, which is an empty
|
||||
sentinel value to represent the root logcontext. This is what is used when there is no
|
||||
other logcontext set. The phrase "clear/reset the logcontext" means to set the current
|
||||
logcontext to the `sentinel` logcontext.
|
||||
|
||||
No CPU/database usage metrics are recorded against the `sentinel` logcontext.
|
||||
|
||||
Ideally, nothing from the Synapse homeserver would be logged against the `sentinel`
|
||||
logcontext as we want to know which server the logs came from. In practice, this is not
|
||||
always the case yet especially outside of request handling.
|
||||
|
||||
Global things outside of Synapse (e.g. Twisted reactor code) should run in the
|
||||
`sentinel` logcontext. It's only when it calls into application code that a logcontext
|
||||
gets activated. This means the reactor should be started in the `sentinel` logcontext,
|
||||
and any time an awaitable yields control back to the reactor, it should reset the
|
||||
logcontext to be the `sentinel` logcontext. This is important to avoid leaking the
|
||||
current logcontext to the reactor (which would then get picked up and associated with
|
||||
the next thing the reactor does).
|
||||
|
||||
|
||||
## Using logcontexts with awaitables
|
||||
|
||||
Awaitables break the linear flow of code so that there is no longer a single entry point
|
||||
|
||||
@@ -64,3 +64,68 @@ If multiple modules implement this callback, they will be considered in order. I
|
||||
returns `True`, Synapse falls through to the next one. The value of the first callback that
|
||||
returns `False` will be used. If this happens, Synapse will not call any of the subsequent
|
||||
implementations of this callback.
|
||||
|
||||
### `get_media_upload_limits_for_user`
|
||||
|
||||
_First introduced in Synapse v1.139.0_
|
||||
|
||||
```python
|
||||
async def get_media_upload_limits_for_user(user_id: str, size: int) -> Optional[List[synapse.module_api.MediaUploadLimit]]
|
||||
```
|
||||
|
||||
**<span style="color:red">
|
||||
Caution: This callback is currently experimental. The method signature or behaviour
|
||||
may change without notice.
|
||||
</span>**
|
||||
|
||||
Called when processing a request to store content in the media repository. This can be used to dynamically override
|
||||
the [media upload limits configuration](../usage/configuration/config_documentation.html#media_upload_limits).
|
||||
|
||||
The arguments passed to this callback are:
|
||||
|
||||
* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`) making the request.
|
||||
|
||||
If the callback returns a list then it will be used as the limits instead of those in the configuration (if any).
|
||||
|
||||
If an empty list is returned then no limits are applied (**warning:** users will be able
|
||||
to upload as much data as they desire).
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `None`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `None` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
If there are no registered modules, or if all modules return `None`, then
|
||||
the default
|
||||
[media upload limits configuration](../usage/configuration/config_documentation.html#media_upload_limits)
|
||||
will be used.
|
||||
|
||||
### `on_media_upload_limit_exceeded`
|
||||
|
||||
_First introduced in Synapse v1.139.0_
|
||||
|
||||
```python
|
||||
async def on_media_upload_limit_exceeded(user_id: str, limit: synapse.module_api.MediaUploadLimit, sent_bytes: int, attempted_bytes: int) -> None
|
||||
```
|
||||
|
||||
**<span style="color:red">
|
||||
Caution: This callback is currently experimental. The method signature or behaviour
|
||||
may change without notice.
|
||||
</span>**
|
||||
|
||||
Called when a user attempts to upload media that would exceed a
|
||||
[configured media upload limit](../usage/configuration/config_documentation.html#media_upload_limits).
|
||||
|
||||
This callback will only be called on workers which handle
|
||||
[POST /_matrix/media/v3/upload](https://spec.matrix.org/v1.15/client-server-api/#post_matrixmediav3upload)
|
||||
requests.
|
||||
|
||||
This could be used to inform the user that they have reached a media upload limit through
|
||||
some external method.
|
||||
|
||||
The arguments passed to this callback are:
|
||||
|
||||
* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`) making the request.
|
||||
* `limit`: The `synapse.module_api.MediaUploadLimit` representing the limit that was reached.
|
||||
* `sent_bytes`: The number of bytes already sent during the period of the limit.
|
||||
* `attempted_bytes`: The number of bytes that the user attempted to send.
|
||||
|
||||
@@ -35,7 +35,7 @@ handlers:
|
||||
loggers:
|
||||
synapse:
|
||||
level: INFO
|
||||
handlers: [remote]
|
||||
handlers: [file]
|
||||
synapse.storage.SQL:
|
||||
level: WARNING
|
||||
```
|
||||
|
||||
@@ -2168,9 +2168,12 @@ max_upload_size: 60M
|
||||
### `media_upload_limits`
|
||||
|
||||
*(array)* A list of media upload limits defining how much data a given user can upload in a given time period.
|
||||
These limits are applied in addition to the `max_upload_size` limit above (which applies to individual uploads).
|
||||
|
||||
An empty list means no limits are applied.
|
||||
|
||||
These settings can be overridden using the `get_media_upload_limits_for_user` module API [callback](../../modules/media_repository_callbacks.md#get_media_upload_limits_for_user).
|
||||
|
||||
Defaults to `[]`.
|
||||
|
||||
Example configuration:
|
||||
|
||||
@@ -252,7 +252,7 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
|
||||
^/_matrix/client/(r0|v3|unstable)/capabilities$
|
||||
^/_matrix/client/(r0|v3|unstable)/notifications$
|
||||
^/_synapse/admin/v1/rooms/
|
||||
^/_synapse/admin/v1/rooms/[^/]+$
|
||||
|
||||
# Encryption requests
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/query$
|
||||
|
||||
103
poetry.lock
generated
103
poetry.lock
generated
@@ -34,15 +34,15 @@ tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" a
|
||||
|
||||
[[package]]
|
||||
name = "authlib"
|
||||
version = "1.6.1"
|
||||
version = "1.6.3"
|
||||
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
|
||||
optional = true
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
|
||||
files = [
|
||||
{file = "authlib-1.6.1-py2.py3-none-any.whl", hash = "sha256:e9d2031c34c6309373ab845afc24168fe9e93dc52d252631f52642f21f5ed06e"},
|
||||
{file = "authlib-1.6.1.tar.gz", hash = "sha256:4dffdbb1460ba6ec8c17981a4c67af7d8af131231b5a36a88a1e8c80c111cdfd"},
|
||||
{file = "authlib-1.6.3-py2.py3-none-any.whl", hash = "sha256:7ea0f082edd95a03b7b72edac65ec7f8f68d703017d7e37573aee4fc603f2a48"},
|
||||
{file = "authlib-1.6.3.tar.gz", hash = "sha256:9f7a982cc395de719e4c2215c5707e7ea690ecf84f1ab126f28c053f4219e610"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -919,14 +919,14 @@ i18n = ["Babel (>=2.7)"]
|
||||
|
||||
[[package]]
|
||||
name = "jsonschema"
|
||||
version = "4.25.0"
|
||||
version = "4.25.1"
|
||||
description = "An implementation of JSON Schema validation for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jsonschema-4.25.0-py3-none-any.whl", hash = "sha256:24c2e8da302de79c8b9382fee3e76b355e44d2a4364bb207159ce10b517bd716"},
|
||||
{file = "jsonschema-4.25.0.tar.gz", hash = "sha256:e63acf5c11762c0e6672ffb61482bdf57f0876684d8d249c0fe2d730d48bc55f"},
|
||||
{file = "jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63"},
|
||||
{file = "jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1531,14 +1531,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "phonenumbers"
|
||||
version = "9.0.11"
|
||||
version = "9.0.13"
|
||||
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "phonenumbers-9.0.11-py2.py3-none-any.whl", hash = "sha256:a8ebb2136f1f14dfdbadb98be01cb71b96f880dea011eb5e0921967fe3a23abf"},
|
||||
{file = "phonenumbers-9.0.11.tar.gz", hash = "sha256:6573858dcf0a7a2753a071375e154d9fc11791546c699b575af95d2ba7d84a1d"},
|
||||
{file = "phonenumbers-9.0.13-py2.py3-none-any.whl", hash = "sha256:b97661e177773e7509c6d503e0f537cd0af22aa3746231654590876eb9430915"},
|
||||
{file = "phonenumbers-9.0.13.tar.gz", hash = "sha256:eca06e01382412c45316868f86a44bb217c02f9ee7196589041556a2f54a7639"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1774,14 +1774,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.11.7"
|
||||
version = "2.11.9"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main", "dev"]
|
||||
files = [
|
||||
{file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"},
|
||||
{file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"},
|
||||
{file = "pydantic-2.11.9-py3-none-any.whl", hash = "sha256:c42dd626f5cfc1c6950ce6205ea58c93efa406da65f479dcb4029d5934857da2"},
|
||||
{file = "pydantic-2.11.9.tar.gz", hash = "sha256:6b8ffda597a14812a7975c90b82a8a2e777d9257aba3453f973acd3c032a18e2"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2396,30 +2396,31 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.12.7"
|
||||
version = "0.12.10"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "ruff-0.12.7-py3-none-linux_armv6l.whl", hash = "sha256:76e4f31529899b8c434c3c1dede98c4483b89590e15fb49f2d46183801565303"},
|
||||
{file = "ruff-0.12.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:789b7a03e72507c54fb3ba6209e4bb36517b90f1a3569ea17084e3fd295500fb"},
|
||||
{file = "ruff-0.12.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2e1c2a3b8626339bb6369116e7030a4cf194ea48f49b64bb505732a7fce4f4e3"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32dec41817623d388e645612ec70d5757a6d9c035f3744a52c7b195a57e03860"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47ef751f722053a5df5fa48d412dbb54d41ab9b17875c6840a58ec63ff0c247c"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a828a5fc25a3efd3e1ff7b241fd392686c9386f20e5ac90aa9234a5faa12c423"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5726f59b171111fa6a69d82aef48f00b56598b03a22f0f4170664ff4d8298efb"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74e6f5c04c4dd4aba223f4fe6e7104f79e0eebf7d307e4f9b18c18362124bccd"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d0bfe4e77fba61bf2ccadf8cf005d6133e3ce08793bbe870dd1c734f2699a3e"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06bfb01e1623bf7f59ea749a841da56f8f653d641bfd046edee32ede7ff6c606"},
|
||||
{file = "ruff-0.12.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e41df94a957d50083fd09b916d6e89e497246698c3f3d5c681c8b3e7b9bb4ac8"},
|
||||
{file = "ruff-0.12.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:4000623300563c709458d0ce170c3d0d788c23a058912f28bbadc6f905d67afa"},
|
||||
{file = "ruff-0.12.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:69ffe0e5f9b2cf2b8e289a3f8945b402a1b19eff24ec389f45f23c42a3dd6fb5"},
|
||||
{file = "ruff-0.12.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a07a5c8ffa2611a52732bdc67bf88e243abd84fe2d7f6daef3826b59abbfeda4"},
|
||||
{file = "ruff-0.12.7-py3-none-win32.whl", hash = "sha256:c928f1b2ec59fb77dfdf70e0419408898b63998789cc98197e15f560b9e77f77"},
|
||||
{file = "ruff-0.12.7-py3-none-win_amd64.whl", hash = "sha256:9c18f3d707ee9edf89da76131956aba1270c6348bfee8f6c647de841eac7194f"},
|
||||
{file = "ruff-0.12.7-py3-none-win_arm64.whl", hash = "sha256:dfce05101dbd11833a0776716d5d1578641b7fddb537fe7fa956ab85d1769b69"},
|
||||
{file = "ruff-0.12.7.tar.gz", hash = "sha256:1fc3193f238bc2d7968772c82831a4ff69252f673be371fb49663f0068b7ec71"},
|
||||
{file = "ruff-0.12.10-py3-none-linux_armv6l.whl", hash = "sha256:8b593cb0fb55cc8692dac7b06deb29afda78c721c7ccfed22db941201b7b8f7b"},
|
||||
{file = "ruff-0.12.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ebb7333a45d56efc7c110a46a69a1b32365d5c5161e7244aaf3aa20ce62399c1"},
|
||||
{file = "ruff-0.12.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d59e58586829f8e4a9920788f6efba97a13d1fa320b047814e8afede381c6839"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:822d9677b560f1fdeab69b89d1f444bf5459da4aa04e06e766cf0121771ab844"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b4a64f4062a50c75019c61c7017ff598cb444984b638511f48539d3a1c98db"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c6f4064c69d2542029b2a61d39920c85240c39837599d7f2e32e80d36401d6e"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:059e863ea3a9ade41407ad71c1de2badfbe01539117f38f763ba42a1206f7559"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1bef6161e297c68908b7218fa6e0e93e99a286e5ed9653d4be71e687dff101cf"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4f1345fbf8fb0531cd722285b5f15af49b2932742fc96b633e883da8d841896b"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f68433c4fbc63efbfa3ba5db31727db229fa4e61000f452c540474b03de52a9"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:141ce3d88803c625257b8a6debf4a0473eb6eed9643a6189b68838b43e78165a"},
|
||||
{file = "ruff-0.12.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f3fc21178cd44c98142ae7590f42ddcb587b8e09a3b849cbc84edb62ee95de60"},
|
||||
{file = "ruff-0.12.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7d1a4e0bdfafcd2e3e235ecf50bf0176f74dd37902f241588ae1f6c827a36c56"},
|
||||
{file = "ruff-0.12.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:e67d96827854f50b9e3e8327b031647e7bcc090dbe7bb11101a81a3a2cbf1cc9"},
|
||||
{file = "ruff-0.12.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ae479e1a18b439c59138f066ae79cc0f3ee250712a873d00dbafadaad9481e5b"},
|
||||
{file = "ruff-0.12.10-py3-none-win32.whl", hash = "sha256:9de785e95dc2f09846c5e6e1d3a3d32ecd0b283a979898ad427a9be7be22b266"},
|
||||
{file = "ruff-0.12.10-py3-none-win_amd64.whl", hash = "sha256:7837eca8787f076f67aba2ca559cefd9c5cbc3a9852fd66186f4201b87c1563e"},
|
||||
{file = "ruff-0.12.10-py3-none-win_arm64.whl", hash = "sha256:cc138cc06ed9d4bfa9d667a65af7172b47840e1a98b02ce7011c391e54635ffc"},
|
||||
{file = "ruff-0.12.10.tar.gz", hash = "sha256:189ab65149d11ea69a2d775343adf5f49bb2426fc4780f65ee33b423ad2e47f9"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2746,14 +2747,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "towncrier"
|
||||
version = "24.8.0"
|
||||
version = "25.8.0"
|
||||
description = "Building newsfiles for your project."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "towncrier-24.8.0-py3-none-any.whl", hash = "sha256:9343209592b839209cdf28c339ba45792fbfe9775b5f9c177462fd693e127d8d"},
|
||||
{file = "towncrier-24.8.0.tar.gz", hash = "sha256:013423ee7eed102b2f393c287d22d95f66f1a3ea10a4baa82d298001a7f18af3"},
|
||||
{file = "towncrier-25.8.0-py3-none-any.whl", hash = "sha256:b953d133d98f9aeae9084b56a3563fd2519dfc6ec33f61c9cd2c61ff243fb513"},
|
||||
{file = "towncrier-25.8.0.tar.gz", hash = "sha256:eef16d29f831ad57abb3ae32a0565739866219f1ebfbdd297d32894eb9940eb1"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2877,14 +2878,14 @@ twisted = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-bleach"
|
||||
version = "6.2.0.20250514"
|
||||
version = "6.2.0.20250809"
|
||||
description = "Typing stubs for bleach"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_bleach-6.2.0.20250514-py3-none-any.whl", hash = "sha256:380cb74f0db1e3c3b2e0cde217221108e975e07e95ef0970c9d41f7cd4e8ea3c"},
|
||||
{file = "types_bleach-6.2.0.20250514.tar.gz", hash = "sha256:38c2e51d9cac51dc70c1b66121a11f4dad8bbf47fbad494bb7a77d8b8f3c4323"},
|
||||
{file = "types_bleach-6.2.0.20250809-py3-none-any.whl", hash = "sha256:0b372a75117947d9ac8a31ae733fd0f8d92ec75c4772e7b37093ba3fa5b48fb9"},
|
||||
{file = "types_bleach-6.2.0.20250809.tar.gz", hash = "sha256:188d7a1119f6c953140b513ed57ba4213755695815472c19d0c22ac09c79b90b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2919,14 +2920,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-jsonschema"
|
||||
version = "4.25.0.20250720"
|
||||
version = "4.25.1.20250822"
|
||||
description = "Typing stubs for jsonschema"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_jsonschema-4.25.0.20250720-py3-none-any.whl", hash = "sha256:7d7897c715310d8bf9ae27a2cedba78bbb09e4cad83ce06d2aa79b73a88941df"},
|
||||
{file = "types_jsonschema-4.25.0.20250720.tar.gz", hash = "sha256:765a3b6144798fe3161fd8cbe570a756ed3e8c0e5adb7c09693eb49faad39dbd"},
|
||||
{file = "types_jsonschema-4.25.1.20250822-py3-none-any.whl", hash = "sha256:f82c2d7fa1ce1c0b84ba1de4ed6798469768188884db04e66421913a4e181294"},
|
||||
{file = "types_jsonschema-4.25.1.20250822.tar.gz", hash = "sha256:aac69ed4b23f49aaceb7fcb834141d61b9e4e6a7f6008cb2f0d3b831dfa8464a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2970,14 +2971,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-psycopg2"
|
||||
version = "2.9.21.20250718"
|
||||
version = "2.9.21.20250915"
|
||||
description = "Typing stubs for psycopg2"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_psycopg2-2.9.21.20250718-py3-none-any.whl", hash = "sha256:bcf085d4293bda48f5943a46dadf0389b2f98f7e8007722f7e1c12ee0f541858"},
|
||||
{file = "types_psycopg2-2.9.21.20250718.tar.gz", hash = "sha256:dc09a97272ef67e739e57b9f4740b761208f4514257e311c0b05c8c7a37d04b4"},
|
||||
{file = "types_psycopg2-2.9.21.20250915-py3-none-any.whl", hash = "sha256:eefe5ccdc693fc086146e84c9ba437bb278efe1ef330b299a0cb71169dc6c55f"},
|
||||
{file = "types_psycopg2-2.9.21.20250915.tar.gz", hash = "sha256:bfeb8f54c32490e7b5edc46215ab4163693192bc90407b4a023822de9239f5c8"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3010,14 +3011,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-requests"
|
||||
version = "2.32.4.20250611"
|
||||
version = "2.32.4.20250809"
|
||||
description = "Typing stubs for requests"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_requests-2.32.4.20250611-py3-none-any.whl", hash = "sha256:ad2fe5d3b0cb3c2c902c8815a70e7fb2302c4b8c1f77bdcd738192cdb3878072"},
|
||||
{file = "types_requests-2.32.4.20250611.tar.gz", hash = "sha256:741c8777ed6425830bf51e54d6abe245f79b4dcb9019f1622b773463946bf826"},
|
||||
{file = "types_requests-2.32.4.20250809-py3-none-any.whl", hash = "sha256:f73d1832fb519ece02c85b1f09d5f0dd3108938e7d47e7f94bbfa18a6782b163"},
|
||||
{file = "types_requests-2.32.4.20250809.tar.gz", hash = "sha256:d8060de1c8ee599311f56ff58010fb4902f462a1470802cf9f6ed27bc46c4df3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3025,14 +3026,14 @@ urllib3 = ">=2"
|
||||
|
||||
[[package]]
|
||||
name = "types-setuptools"
|
||||
version = "80.9.0.20250809"
|
||||
version = "80.9.0.20250822"
|
||||
description = "Typing stubs for setuptools"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_setuptools-80.9.0.20250809-py3-none-any.whl", hash = "sha256:7c6539b4c7ac7b4ab4db2be66d8a58fb1e28affa3ee3834be48acafd94f5976a"},
|
||||
{file = "types_setuptools-80.9.0.20250809.tar.gz", hash = "sha256:e986ba37ffde364073d76189e1d79d9928fb6f5278c7d07589cde353d0218864"},
|
||||
{file = "types_setuptools-80.9.0.20250822-py3-none-any.whl", hash = "sha256:53bf881cb9d7e46ed12c76ef76c0aaf28cfe6211d3fab12e0b83620b1a8642c3"},
|
||||
{file = "types_setuptools-80.9.0.20250822.tar.gz", hash = "sha256:070ea7716968ec67a84c7f7768d9952ff24d28b65b6594797a464f1b3066f965"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3255,4 +3256,4 @@ url-preview = ["lxml"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = "^3.9.0"
|
||||
content-hash = "600a349d08dde732df251583094a121b5385eb43ae0c6ceff10dcf9749359446"
|
||||
content-hash = "2e8ea085e1a0c6f0ac051d4bc457a96827d01f621b1827086de01a5ffa98cf79"
|
||||
|
||||
@@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.136.0"
|
||||
version = "1.138.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -324,7 +324,7 @@ all = [
|
||||
# failing on new releases. Keeping lower bounds loose here means that dependabot
|
||||
# can bump versions without having to update the content-hash in the lockfile.
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
ruff = "0.12.7"
|
||||
ruff = "0.12.10"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
|
||||
@@ -289,10 +289,10 @@ pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule {
|
||||
default_enabled: true,
|
||||
}];
|
||||
|
||||
pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
pub const BASE_APPEND_POSTCONTENT_RULES: &[PushRule] = &[
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/content/.io.element.msc4306.rule.unsubscribed_thread"),
|
||||
priority_class: 1,
|
||||
rule_id: Cow::Borrowed("global/postcontent/.io.element.msc4306.rule.unsubscribed_thread"),
|
||||
priority_class: 6,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(
|
||||
KnownCondition::Msc4306ThreadSubscription { subscribed: false },
|
||||
)]),
|
||||
@@ -301,8 +301,8 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/content/.io.element.msc4306.rule.subscribed_thread"),
|
||||
priority_class: 1,
|
||||
rule_id: Cow::Borrowed("global/postcontent/.io.element.msc4306.rule.subscribed_thread"),
|
||||
priority_class: 6,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(
|
||||
KnownCondition::Msc4306ThreadSubscription { subscribed: true },
|
||||
)]),
|
||||
@@ -310,6 +310,9 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
];
|
||||
|
||||
pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.call"),
|
||||
priority_class: 1,
|
||||
@@ -726,6 +729,7 @@ lazy_static! {
|
||||
.iter()
|
||||
.chain(BASE_APPEND_OVERRIDE_RULES.iter())
|
||||
.chain(BASE_APPEND_CONTENT_RULES.iter())
|
||||
.chain(BASE_APPEND_POSTCONTENT_RULES.iter())
|
||||
.chain(BASE_APPEND_UNDERRIDE_RULES.iter())
|
||||
.map(|rule| { (&*rule.rule_id, rule) })
|
||||
.collect();
|
||||
|
||||
@@ -527,6 +527,7 @@ impl PushRules {
|
||||
.chain(base_rules::BASE_APPEND_OVERRIDE_RULES.iter())
|
||||
.chain(self.content.iter())
|
||||
.chain(base_rules::BASE_APPEND_CONTENT_RULES.iter())
|
||||
.chain(base_rules::BASE_APPEND_POSTCONTENT_RULES.iter())
|
||||
.chain(self.room.iter())
|
||||
.chain(self.sender.iter())
|
||||
.chain(self.underride.iter())
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
$schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json
|
||||
$id: https://element-hq.github.io/synapse/schema/synapse/v1.136/synapse-config.schema.json
|
||||
$id: https://element-hq.github.io/synapse/schema/synapse/v1.138/synapse-config.schema.json
|
||||
type: object
|
||||
properties:
|
||||
modules:
|
||||
@@ -2415,8 +2415,15 @@ properties:
|
||||
A list of media upload limits defining how much data a given user can
|
||||
upload in a given time period.
|
||||
|
||||
These limits are applied in addition to the `max_upload_size` limit above
|
||||
(which applies to individual uploads).
|
||||
|
||||
|
||||
An empty list means no limits are applied.
|
||||
|
||||
|
||||
These settings can be overridden using the `get_media_upload_limits_for_user`
|
||||
module API [callback](../../modules/media_repository_callbacks.md#get_media_upload_limits_for_user).
|
||||
default: []
|
||||
items:
|
||||
time_period:
|
||||
|
||||
@@ -230,6 +230,7 @@ test_packages=(
|
||||
./tests/msc3967
|
||||
./tests/msc4140
|
||||
./tests/msc4155
|
||||
./tests/msc4306
|
||||
)
|
||||
|
||||
# Enable dirty runs, so tests will reuse the same container where possible.
|
||||
|
||||
@@ -153,9 +153,13 @@ def get_registered_paths_for_default(
|
||||
"""
|
||||
|
||||
hs = MockHomeserver(base_config, worker_app)
|
||||
|
||||
# TODO We only do this to avoid an error, but don't need the database etc
|
||||
hs.setup()
|
||||
return get_registered_paths_for_hs(hs)
|
||||
registered_paths = get_registered_paths_for_hs(hs)
|
||||
hs.cleanup()
|
||||
|
||||
return registered_paths
|
||||
|
||||
|
||||
def elide_http_methods_if_unconflicting(
|
||||
|
||||
@@ -99,6 +99,7 @@ from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util import SYNAPSE_VERSION, Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
# Cast safety: Twisted does some naughty magic which replaces the
|
||||
# twisted.internet.reactor module with a Reactor instance at runtime.
|
||||
@@ -323,6 +324,7 @@ class MockHomeserver:
|
||||
self.config = config
|
||||
self.hostname = config.server.server_name
|
||||
self.version_string = SYNAPSE_VERSION
|
||||
self.instance_id = random_string(5)
|
||||
|
||||
def get_clock(self) -> Clock:
|
||||
return self.clock
|
||||
@@ -330,6 +332,9 @@ class MockHomeserver:
|
||||
def get_reactor(self) -> ISynapseReactor:
|
||||
return reactor
|
||||
|
||||
def get_instance_id(self) -> str:
|
||||
return self.instance_id
|
||||
|
||||
def get_instance_name(self) -> str:
|
||||
return "master"
|
||||
|
||||
@@ -685,7 +690,15 @@ class Porter:
|
||||
)
|
||||
prepare_database(db_conn, engine, config=self.hs_config)
|
||||
# Type safety: ignore that we're using Mock homeservers here.
|
||||
store = Store(DatabasePool(hs, db_config, engine), db_conn, hs) # type: ignore[arg-type]
|
||||
store = Store(
|
||||
DatabasePool(
|
||||
hs, # type: ignore[arg-type]
|
||||
db_config,
|
||||
engine,
|
||||
),
|
||||
db_conn,
|
||||
hs, # type: ignore[arg-type]
|
||||
)
|
||||
db_conn.commit()
|
||||
|
||||
return store
|
||||
|
||||
@@ -120,6 +120,13 @@ def main() -> None:
|
||||
# DB.
|
||||
hs.setup()
|
||||
|
||||
# This will cause all of the relevant storage classes to be instantiated and call
|
||||
# `register_background_update_handler(...)`,
|
||||
# `register_background_index_update(...)`,
|
||||
# `register_background_validate_constraint(...)`, etc so they are available to use
|
||||
# if we are asked to run those background updates.
|
||||
hs.get_storage_controllers()
|
||||
|
||||
if args.run_background_updates:
|
||||
run_background_updates(hs)
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
from typing import TYPE_CHECKING, Optional, Set
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from synapse._pydantic_compat import (
|
||||
@@ -57,8 +57,10 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
# Scope as defined by MSC2967
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/2967
|
||||
SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
UNSTABLE_SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
STABLE_SCOPE_MATRIX_API = "urn:matrix:client:api:*"
|
||||
STABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:client:device:"
|
||||
|
||||
|
||||
class ServerMetadata(BaseModel):
|
||||
@@ -334,7 +336,10 @@ class MasDelegatedAuth(BaseAuth):
|
||||
scope = introspection_result.get_scope_set()
|
||||
|
||||
# Determine type of user based on presence of particular scopes
|
||||
if SCOPE_MATRIX_API not in scope:
|
||||
if (
|
||||
UNSTABLE_SCOPE_MATRIX_API not in scope
|
||||
and STABLE_SCOPE_MATRIX_API not in scope
|
||||
):
|
||||
raise InvalidClientTokenError(
|
||||
"Token doesn't grant access to the Matrix C-S API"
|
||||
)
|
||||
@@ -366,11 +371,12 @@ class MasDelegatedAuth(BaseAuth):
|
||||
# We only allow a single device_id in the scope, so we find them all in the
|
||||
# scope list, and raise if there are more than one. The OIDC server should be
|
||||
# the one enforcing valid scopes, so we raise a 500 if we find an invalid scope.
|
||||
device_ids = [
|
||||
tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :]
|
||||
for tok in scope
|
||||
if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX)
|
||||
]
|
||||
device_ids: Set[str] = set()
|
||||
for tok in scope:
|
||||
if tok.startswith(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX):
|
||||
device_ids.add(tok[len(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX) :])
|
||||
elif tok.startswith(STABLE_SCOPE_MATRIX_DEVICE_PREFIX):
|
||||
device_ids.add(tok[len(STABLE_SCOPE_MATRIX_DEVICE_PREFIX) :])
|
||||
|
||||
if len(device_ids) > 1:
|
||||
raise AuthError(
|
||||
@@ -378,7 +384,7 @@ class MasDelegatedAuth(BaseAuth):
|
||||
"Multiple device IDs in scope",
|
||||
)
|
||||
|
||||
device_id = device_ids[0] if device_ids else None
|
||||
device_id = next(iter(device_ids), None)
|
||||
|
||||
if device_id is not None:
|
||||
# Sanity check the device_id
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from authlib.oauth2 import ClientAuth
|
||||
@@ -34,7 +34,6 @@ from synapse.api.errors import (
|
||||
AuthError,
|
||||
HttpResponseException,
|
||||
InvalidClientTokenError,
|
||||
OAuthInsufficientScopeError,
|
||||
SynapseError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
@@ -63,9 +62,10 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
# Scope as defined by MSC2967
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/2967
|
||||
SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
SCOPE_MATRIX_GUEST = "urn:matrix:org.matrix.msc2967.client:api:guest"
|
||||
SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
UNSTABLE_SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
STABLE_SCOPE_MATRIX_API = "urn:matrix:client:api:*"
|
||||
STABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:client:device:"
|
||||
|
||||
# Scope which allows access to the Synapse admin API
|
||||
SCOPE_SYNAPSE_ADMIN = "urn:synapse:admin:*"
|
||||
@@ -444,9 +444,6 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
if not self._is_access_token_the_admin_token(access_token):
|
||||
await self._record_request(request, requester)
|
||||
|
||||
if not allow_guest and requester.is_guest:
|
||||
raise OAuthInsufficientScopeError([SCOPE_MATRIX_API])
|
||||
|
||||
request.requester = requester
|
||||
|
||||
return requester
|
||||
@@ -528,10 +525,11 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
scope: List[str] = introspection_result.get_scope_list()
|
||||
|
||||
# Determine type of user based on presence of particular scopes
|
||||
has_user_scope = SCOPE_MATRIX_API in scope
|
||||
has_guest_scope = SCOPE_MATRIX_GUEST in scope
|
||||
has_user_scope = (
|
||||
UNSTABLE_SCOPE_MATRIX_API in scope or STABLE_SCOPE_MATRIX_API in scope
|
||||
)
|
||||
|
||||
if not has_user_scope and not has_guest_scope:
|
||||
if not has_user_scope:
|
||||
raise InvalidClientTokenError("No scope in token granting user rights")
|
||||
|
||||
# Match via the sub claim
|
||||
@@ -579,11 +577,12 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
# We only allow a single device_id in the scope, so we find them all in the
|
||||
# scope list, and raise if there are more than one. The OIDC server should be
|
||||
# the one enforcing valid scopes, so we raise a 500 if we find an invalid scope.
|
||||
device_ids = [
|
||||
tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :]
|
||||
for tok in scope
|
||||
if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX)
|
||||
]
|
||||
device_ids: Set[str] = set()
|
||||
for tok in scope:
|
||||
if tok.startswith(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX):
|
||||
device_ids.add(tok[len(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX) :])
|
||||
elif tok.startswith(STABLE_SCOPE_MATRIX_DEVICE_PREFIX):
|
||||
device_ids.add(tok[len(STABLE_SCOPE_MATRIX_DEVICE_PREFIX) :])
|
||||
|
||||
if len(device_ids) > 1:
|
||||
raise AuthError(
|
||||
@@ -591,7 +590,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
"Multiple device IDs in scope",
|
||||
)
|
||||
|
||||
device_id = device_ids[0] if device_ids else None
|
||||
device_id = next(iter(device_ids), None)
|
||||
|
||||
if device_id is not None:
|
||||
# Sanity check the device_id
|
||||
@@ -617,5 +616,4 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
scope=scope,
|
||||
is_guest=(has_guest_scope and not has_user_scope),
|
||||
)
|
||||
|
||||
@@ -72,7 +72,7 @@ from synapse.events.auto_accept_invites import InviteAutoAccepter
|
||||
from synapse.events.presence_router import load_legacy_presence_router
|
||||
from synapse.handlers.auth import load_legacy_password_auth_providers
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.logging.context import LoggingContext, PreserveLoggingContext
|
||||
from synapse.logging.opentracing import init_tracer
|
||||
from synapse.metrics import install_gc_manager, register_threadpool
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -183,25 +183,23 @@ def start_reactor(
|
||||
if gc_thresholds:
|
||||
gc.set_threshold(*gc_thresholds)
|
||||
install_gc_manager()
|
||||
run_command()
|
||||
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
#
|
||||
# We also need to drop the logcontext before forking if we're daemonizing,
|
||||
# otherwise the cputime metrics get confused about the per-thread resource usage
|
||||
# appearing to go backwards.
|
||||
with PreserveLoggingContext():
|
||||
if daemonize:
|
||||
assert pid_file is not None
|
||||
# Reset the logging context when we start the reactor (whenever we yield control
|
||||
# to the reactor, the `sentinel` logging context needs to be set so we don't
|
||||
# leak the current logging context and erroneously apply it to the next task the
|
||||
# reactor event loop picks up)
|
||||
with PreserveLoggingContext():
|
||||
run_command()
|
||||
|
||||
if print_pidfile:
|
||||
print(pid_file)
|
||||
if daemonize:
|
||||
assert pid_file is not None
|
||||
|
||||
daemonize_process(pid_file, logger)
|
||||
run()
|
||||
if print_pidfile:
|
||||
print(pid_file)
|
||||
|
||||
daemonize_process(pid_file, logger)
|
||||
|
||||
run()
|
||||
|
||||
|
||||
def quit_with_error(error_string: str) -> NoReturn:
|
||||
@@ -601,18 +599,38 @@ async def start(hs: "HomeServer") -> None:
|
||||
hs.get_datastores().main.db_pool.start_profiling()
|
||||
hs.get_pusherpool().start()
|
||||
|
||||
def log_shutdown() -> None:
|
||||
with LoggingContext("log_shutdown"):
|
||||
logger.info("Shutting down...")
|
||||
|
||||
# Log when we start the shut down process.
|
||||
hs.get_reactor().addSystemEventTrigger(
|
||||
"before", "shutdown", logger.info, "Shutting down..."
|
||||
)
|
||||
hs.get_reactor().addSystemEventTrigger("before", "shutdown", log_shutdown)
|
||||
|
||||
setup_sentry(hs)
|
||||
setup_sdnotify(hs)
|
||||
|
||||
# If background tasks are running on the main process or this is the worker in
|
||||
# charge of them, start collecting the phone home stats and shared usage metrics.
|
||||
# Register background tasks required by this server. This must be done
|
||||
# somewhat manually due to the background tasks not being registered
|
||||
# unless handlers are instantiated.
|
||||
#
|
||||
# While we could "start" these before the reactor runs, nothing will happen until
|
||||
# the reactor is running, so we may as well do it here in `start`.
|
||||
#
|
||||
# Additionally, this means we also start them after we daemonize and fork the
|
||||
# process which means we can avoid any potential problems with cputime metrics
|
||||
# getting confused about the per-thread resource usage appearing to go backwards
|
||||
# because we're comparing the resource usage (`rusage`) from the original process to
|
||||
# the forked process.
|
||||
if hs.config.worker.run_background_tasks:
|
||||
hs.start_background_tasks()
|
||||
|
||||
# TODO: This should be moved to same pattern we use for other background tasks:
|
||||
# Add to `REQUIRED_ON_BACKGROUND_TASK_STARTUP` and rely on
|
||||
# `start_background_tasks` to start it.
|
||||
await hs.get_common_usage_metrics_manager().setup()
|
||||
|
||||
# TODO: This feels like another pattern that should refactored as one of the
|
||||
# `REQUIRED_ON_BACKGROUND_TASK_STARTUP`
|
||||
start_phone_stats_home(hs)
|
||||
|
||||
# We now freeze all allocated objects in the hopes that (almost)
|
||||
|
||||
@@ -355,7 +355,12 @@ def start(config_options: List[str]) -> None:
|
||||
except Exception as e:
|
||||
handle_startup_exception(e)
|
||||
|
||||
register_start(_base.start, hs)
|
||||
async def start() -> None:
|
||||
# Re-establish log context now that we're back from the reactor
|
||||
with LoggingContext("start"):
|
||||
await _base.start(hs)
|
||||
|
||||
register_start(start)
|
||||
|
||||
# redirect stdio to the logs, if configured.
|
||||
if not hs.config.logging.no_redirect_stdio:
|
||||
|
||||
@@ -377,15 +377,17 @@ def setup(config_options: List[str]) -> SynapseHomeServer:
|
||||
handle_startup_exception(e)
|
||||
|
||||
async def start() -> None:
|
||||
# Load the OIDC provider metadatas, if OIDC is enabled.
|
||||
if hs.config.oidc.oidc_enabled:
|
||||
oidc = hs.get_oidc_handler()
|
||||
# Loading the provider metadata also ensures the provider config is valid.
|
||||
await oidc.load_metadata()
|
||||
# Re-establish log context now that we're back from the reactor
|
||||
with LoggingContext("start"):
|
||||
# Load the OIDC provider metadatas, if OIDC is enabled.
|
||||
if hs.config.oidc.oidc_enabled:
|
||||
oidc = hs.get_oidc_handler()
|
||||
# Loading the provider metadata also ensures the provider config is valid.
|
||||
await oidc.load_metadata()
|
||||
|
||||
await _base.start(hs)
|
||||
await _base.start(hs)
|
||||
|
||||
hs.get_datastores().main.db_pool.updates.start_doing_background_updates()
|
||||
hs.get_datastores().main.db_pool.updates.start_doing_background_updates()
|
||||
|
||||
register_start(start)
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
|
||||
import argparse
|
||||
import errno
|
||||
import importlib.resources as importlib_resources
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
@@ -46,7 +47,6 @@ from typing import (
|
||||
|
||||
import attr
|
||||
import jinja2
|
||||
import pkg_resources
|
||||
import yaml
|
||||
|
||||
from synapse.types import StrSequence
|
||||
@@ -174,8 +174,8 @@ class Config:
|
||||
self.root = root_config
|
||||
|
||||
# Get the path to the default Synapse template directory
|
||||
self.default_template_dir = pkg_resources.resource_filename(
|
||||
"synapse", "res/templates"
|
||||
self.default_template_dir = str(
|
||||
importlib_resources.files("synapse").joinpath("res").joinpath("templates")
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -590,5 +590,5 @@ class ExperimentalConfig(Config):
|
||||
self.msc4293_enabled: bool = experimental.get("msc4293_enabled", False)
|
||||
|
||||
# MSC4306: Thread Subscriptions
|
||||
# (and MSC4308: sliding sync extension for thread subscriptions)
|
||||
# (and MSC4308: Thread Subscriptions extension to Sliding Sync)
|
||||
self.msc4306_enabled: bool = experimental.get("msc4306_enabled", False)
|
||||
|
||||
@@ -18,13 +18,13 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
import importlib.resources as importlib_resources
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Dict, Iterable, List, Optional, Pattern
|
||||
from urllib import parse as urlparse
|
||||
|
||||
import attr
|
||||
import pkg_resources
|
||||
|
||||
from synapse.types import JsonDict, StrSequence
|
||||
|
||||
@@ -64,7 +64,12 @@ class OembedConfig(Config):
|
||||
"""
|
||||
# Whether to use the packaged providers.json file.
|
||||
if not oembed_config.get("disable_default_providers") or False:
|
||||
with pkg_resources.resource_stream("synapse", "res/providers.json") as s:
|
||||
path = (
|
||||
importlib_resources.files("synapse")
|
||||
.joinpath("res")
|
||||
.joinpath("providers.json")
|
||||
)
|
||||
with path.open("r", encoding="utf-8") as s:
|
||||
providers = json.load(s)
|
||||
|
||||
yield from self._parse_and_validate_provider(
|
||||
|
||||
@@ -120,11 +120,19 @@ def parse_thumbnail_requirements(
|
||||
|
||||
@attr.s(auto_attribs=True, slots=True, frozen=True)
|
||||
class MediaUploadLimit:
|
||||
"""A limit on the amount of data a user can upload in a given time
|
||||
period."""
|
||||
"""
|
||||
Represents a limit on the amount of data a user can upload in a given time
|
||||
period.
|
||||
|
||||
These can be configured through the `media_upload_limits` [config option](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#media_upload_limits)
|
||||
or via the `get_media_upload_limits_for_user` module API [callback](https://element-hq.github.io/synapse/latest/modules/media_repository_callbacks.html#get_media_upload_limits_for_user).
|
||||
"""
|
||||
|
||||
max_bytes: int
|
||||
"""The maximum number of bytes that can be uploaded in the given time period."""
|
||||
|
||||
time_period_ms: int
|
||||
"""The time period in milliseconds."""
|
||||
|
||||
|
||||
class ContentRepositoryConfig(Config):
|
||||
|
||||
@@ -306,6 +306,12 @@ class EventContext(UnpersistedEventContextBase):
|
||||
)
|
||||
|
||||
|
||||
EventPersistencePair = Tuple[EventBase, EventContext]
|
||||
"""
|
||||
The combination of an event to be persisted and its context.
|
||||
"""
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class UnpersistedEventContext(UnpersistedEventContextBase):
|
||||
"""
|
||||
@@ -363,7 +369,7 @@ class UnpersistedEventContext(UnpersistedEventContextBase):
|
||||
room_id: str,
|
||||
last_known_state_group: int,
|
||||
datastore: "StateGroupDataStore",
|
||||
) -> List[Tuple[EventBase, EventContext]]:
|
||||
) -> List[EventPersistencePair]:
|
||||
"""
|
||||
Takes a list of events and their associated unpersisted contexts and persists
|
||||
the unpersisted contexts, returning a list of events and persisted contexts.
|
||||
|
||||
@@ -59,7 +59,7 @@ from synapse.api.errors import (
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events.snapshot import EventPersistencePair
|
||||
from synapse.federation.federation_base import (
|
||||
FederationBase,
|
||||
InvalidEventSignatureError,
|
||||
@@ -914,7 +914,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
async def _on_send_membership_event(
|
||||
self, origin: str, content: JsonDict, membership_type: str, room_id: str
|
||||
) -> Tuple[EventBase, EventContext]:
|
||||
) -> EventPersistencePair:
|
||||
"""Handle an on_send_{join,leave,knock} request
|
||||
|
||||
Does some preliminary validation before passing the request on to the
|
||||
|
||||
@@ -37,6 +37,7 @@ Events are replicated via a separate events stream.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict,
|
||||
@@ -67,6 +68,25 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class QueueNames(str, Enum):
|
||||
PRESENCE_MAP = "presence_map"
|
||||
KEYED_EDU = "keyed_edu"
|
||||
KEYED_EDU_CHANGED = "keyed_edu_changed"
|
||||
EDUS = "edus"
|
||||
POS_TIME = "pos_time"
|
||||
PRESENCE_DESTINATIONS = "presence_destinations"
|
||||
|
||||
|
||||
queue_name_to_gauge_map: Dict[QueueNames, LaterGauge] = {}
|
||||
|
||||
for queue_name in QueueNames:
|
||||
queue_name_to_gauge_map[queue_name] = LaterGauge(
|
||||
name=f"synapse_federation_send_queue_{queue_name.value}_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
|
||||
class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
"""A drop in replacement for FederationSender"""
|
||||
|
||||
@@ -111,23 +131,16 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
# we make a new function, so we need to make a new function so the inner
|
||||
# lambda binds to the queue rather than to the name of the queue which
|
||||
# changes. ARGH.
|
||||
def register(name: str, queue: Sized) -> None:
|
||||
LaterGauge(
|
||||
name="synapse_federation_send_queue_%s_size" % (queue_name,),
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(queue)},
|
||||
def register(queue_name: QueueNames, queue: Sized) -> None:
|
||||
queue_name_to_gauge_map[queue_name].register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(queue)},
|
||||
)
|
||||
|
||||
for queue_name in [
|
||||
"presence_map",
|
||||
"keyed_edu",
|
||||
"keyed_edu_changed",
|
||||
"edus",
|
||||
"pos_time",
|
||||
"presence_destinations",
|
||||
]:
|
||||
register(queue_name, getattr(self, queue_name))
|
||||
for queue_name in QueueNames:
|
||||
queue = getattr(self, queue_name.value)
|
||||
assert isinstance(queue, Sized)
|
||||
register(queue_name, queue=queue)
|
||||
|
||||
self.clock.looping_call(self._clear_queue, 30 * 1000)
|
||||
|
||||
|
||||
@@ -150,6 +150,7 @@ from prometheus_client import Counter
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.metrics
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.sender.per_destination_queue import (
|
||||
@@ -199,6 +200,24 @@ sent_pdus_destination_dist_total = Counter(
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
transaction_queue_pending_destinations_gauge = LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_destinations",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
transaction_queue_pending_pdus_gauge = LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_pdus",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
transaction_queue_pending_edus_gauge = LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_edus",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
# Time (in s) to wait before trying to wake up destinations that have
|
||||
# catch-up outstanding.
|
||||
# Please note that rate limiting still applies, so while the loop is
|
||||
@@ -398,11 +417,9 @@ class FederationSender(AbstractFederationSender):
|
||||
# map from destination to PerDestinationQueue
|
||||
self._per_destination_queues: Dict[str, PerDestinationQueue] = {}
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_destinations",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
transaction_queue_pending_destinations_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
(self.server_name,): sum(
|
||||
1
|
||||
for d in self._per_destination_queues.values()
|
||||
@@ -410,22 +427,17 @@ class FederationSender(AbstractFederationSender):
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_pdus",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
transaction_queue_pending_pdus_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
(self.server_name,): sum(
|
||||
d.pending_pdu_count() for d in self._per_destination_queues.values()
|
||||
)
|
||||
},
|
||||
)
|
||||
LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_edus",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
transaction_queue_pending_edus_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
(self.server_name,): sum(
|
||||
d.pending_edu_count() for d in self._per_destination_queues.values()
|
||||
)
|
||||
@@ -644,6 +656,31 @@ class FederationSender(AbstractFederationSender):
|
||||
)
|
||||
return
|
||||
|
||||
# If we've rescinded an invite then we want to tell the
|
||||
# other server.
|
||||
if (
|
||||
event.type == EventTypes.Member
|
||||
and event.membership == Membership.LEAVE
|
||||
and event.sender != event.state_key
|
||||
):
|
||||
# We check if this leave event is rescinding an invite
|
||||
# by looking if there is an invite event for the user in
|
||||
# the auth events. It could otherwise be a kick or
|
||||
# unban, which we don't want to send (if the user wasn't
|
||||
# already in the room).
|
||||
auth_events = await self.store.get_events_as_list(
|
||||
event.auth_event_ids()
|
||||
)
|
||||
for auth_event in auth_events:
|
||||
if (
|
||||
auth_event.type == EventTypes.Member
|
||||
and auth_event.state_key == event.state_key
|
||||
and auth_event.membership == Membership.INVITE
|
||||
):
|
||||
destinations = set(destinations)
|
||||
destinations.add(get_domain_from_id(event.state_key))
|
||||
break
|
||||
|
||||
sharded_destinations = {
|
||||
d
|
||||
for d in destinations
|
||||
|
||||
@@ -26,7 +26,7 @@ from synapse.api.constants import EduTypes
|
||||
from synapse.api.errors import HttpResponseException
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
from synapse.federation.units import Edu, Transaction, serialize_and_filter_pdus
|
||||
from synapse.logging.opentracing import (
|
||||
extract_text_map,
|
||||
set_tag,
|
||||
@@ -119,7 +119,7 @@ class TransactionManager:
|
||||
transaction_id=txn_id,
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
pdus=[p.get_pdu_json() for p in pdus],
|
||||
pdus=serialize_and_filter_pdus(pdus),
|
||||
edus=[edu.get_dict() for edu in edus],
|
||||
)
|
||||
|
||||
|
||||
@@ -135,7 +135,7 @@ class PublicRoomList(BaseFederationServlet):
|
||||
if not self.allow_access:
|
||||
raise FederationDeniedError(origin)
|
||||
|
||||
limit = parse_integer_from_args(query, "limit", 0)
|
||||
limit: Optional[int] = parse_integer_from_args(query, "limit", 0)
|
||||
since_token = parse_string_from_args(query, "since", None)
|
||||
include_all_networks = parse_boolean_from_args(
|
||||
query, "include_all_networks", default=False
|
||||
|
||||
@@ -215,9 +215,9 @@ class DelayedEventsHandler:
|
||||
"Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id
|
||||
)
|
||||
|
||||
event = await self._store.get_event(
|
||||
delta.event_id, check_room_id=delta.room_id
|
||||
)
|
||||
event = await self._store.get_event(delta.event_id, allow_none=True)
|
||||
if not event:
|
||||
continue
|
||||
sender = UserID.from_string(event.sender)
|
||||
|
||||
next_send_ts = await self._store.cancel_delayed_state_events(
|
||||
|
||||
@@ -66,7 +66,11 @@ from synapse.event_auth import (
|
||||
validate_event_for_room_version,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
|
||||
from synapse.events.snapshot import (
|
||||
EventContext,
|
||||
EventPersistencePair,
|
||||
UnpersistedEventContextBase,
|
||||
)
|
||||
from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo
|
||||
from synapse.logging.context import nested_logging_context
|
||||
from synapse.logging.opentracing import (
|
||||
@@ -244,9 +248,10 @@ class FederationEventHandler:
|
||||
self.room_queues[room_id].append((pdu, origin))
|
||||
return
|
||||
|
||||
# If we're not in the room just ditch the event entirely. This is
|
||||
# probably an old server that has come back and thinks we're still in
|
||||
# the room (or we've been rejoined to the room by a state reset).
|
||||
# If we're not in the room just ditch the event entirely (and not
|
||||
# invited). This is probably an old server that has come back and thinks
|
||||
# we're still in the room (or we've been rejoined to the room by a state
|
||||
# reset).
|
||||
#
|
||||
# Note that if we were never in the room then we would have already
|
||||
# dropped the event, since we wouldn't know the room version.
|
||||
@@ -254,6 +259,43 @@ class FederationEventHandler:
|
||||
room_id, self.server_name
|
||||
)
|
||||
if not is_in_room:
|
||||
# Check if this is a leave event rescinding an invite
|
||||
if (
|
||||
pdu.type == EventTypes.Member
|
||||
and pdu.membership == Membership.LEAVE
|
||||
and pdu.state_key != pdu.sender
|
||||
and self._is_mine_id(pdu.state_key)
|
||||
):
|
||||
(
|
||||
membership,
|
||||
membership_event_id,
|
||||
) = await self._store.get_local_current_membership_for_user_in_room(
|
||||
pdu.state_key, pdu.room_id
|
||||
)
|
||||
if (
|
||||
membership == Membership.INVITE
|
||||
and membership_event_id
|
||||
and membership_event_id
|
||||
in pdu.auth_event_ids() # The invite should be in the auth events of the rescission.
|
||||
):
|
||||
invite_event = await self._store.get_event(
|
||||
membership_event_id, allow_none=True
|
||||
)
|
||||
|
||||
# We cannot fully auth the rescission event, but we can
|
||||
# check if the sender of the leave event is the same as the
|
||||
# invite.
|
||||
#
|
||||
# Technically, a room admin could rescind the invite, but we
|
||||
# have no way of knowing who is and isn't a room admin.
|
||||
if invite_event and pdu.sender == invite_event.sender:
|
||||
# Handle the rescission event
|
||||
pdu.internal_metadata.outlier = True
|
||||
pdu.internal_metadata.out_of_band_membership = True
|
||||
context = EventContext.for_outlier(self._storage_controllers)
|
||||
await self.persist_events_and_notify(room_id, [(pdu, context)])
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Ignoring PDU from %s as we're not in the room",
|
||||
origin,
|
||||
@@ -341,7 +383,7 @@ class FederationEventHandler:
|
||||
|
||||
async def on_send_membership_event(
|
||||
self, origin: str, event: EventBase
|
||||
) -> Tuple[EventBase, EventContext]:
|
||||
) -> EventPersistencePair:
|
||||
"""
|
||||
We have received a join/leave/knock event for a room via send_join/leave/knock.
|
||||
|
||||
@@ -1712,7 +1754,7 @@ class FederationEventHandler:
|
||||
)
|
||||
auth_map.update(persisted_events)
|
||||
|
||||
events_and_contexts_to_persist: List[Tuple[EventBase, EventContext]] = []
|
||||
events_and_contexts_to_persist: List[EventPersistencePair] = []
|
||||
|
||||
async def prep(event: EventBase) -> None:
|
||||
with nested_logging_context(suffix=event.event_id):
|
||||
@@ -2225,7 +2267,7 @@ class FederationEventHandler:
|
||||
async def persist_events_and_notify(
|
||||
self,
|
||||
room_id: str,
|
||||
event_and_contexts: Sequence[Tuple[EventBase, EventContext]],
|
||||
event_and_contexts: Sequence[EventPersistencePair],
|
||||
backfilled: bool = False,
|
||||
) -> int:
|
||||
"""Persists events and tells the notifier/pushers about them, if
|
||||
|
||||
@@ -57,6 +57,7 @@ from synapse.events import EventBase, relation_from_event
|
||||
from synapse.events.builder import EventBuilder
|
||||
from synapse.events.snapshot import (
|
||||
EventContext,
|
||||
EventPersistencePair,
|
||||
UnpersistedEventContext,
|
||||
UnpersistedEventContextBase,
|
||||
)
|
||||
@@ -1439,7 +1440,7 @@ class EventCreationHandler:
|
||||
async def handle_new_client_event(
|
||||
self,
|
||||
requester: Requester,
|
||||
events_and_context: List[Tuple[EventBase, EventContext]],
|
||||
events_and_context: List[EventPersistencePair],
|
||||
ratelimit: bool = True,
|
||||
extra_users: Optional[List[UserID]] = None,
|
||||
ignore_shadow_ban: bool = False,
|
||||
@@ -1651,7 +1652,7 @@ class EventCreationHandler:
|
||||
async def _persist_events(
|
||||
self,
|
||||
requester: Requester,
|
||||
events_and_context: List[Tuple[EventBase, EventContext]],
|
||||
events_and_context: List[EventPersistencePair],
|
||||
ratelimit: bool = True,
|
||||
extra_users: Optional[List[UserID]] = None,
|
||||
) -> EventBase:
|
||||
@@ -1737,7 +1738,7 @@ class EventCreationHandler:
|
||||
raise
|
||||
|
||||
async def cache_joined_hosts_for_events(
|
||||
self, events_and_context: List[Tuple[EventBase, EventContext]]
|
||||
self, events_and_context: List[EventPersistencePair]
|
||||
) -> None:
|
||||
"""Precalculate the joined hosts at each of the given events, when using Redis, so that
|
||||
external federation senders don't have to recalculate it themselves.
|
||||
@@ -1843,7 +1844,7 @@ class EventCreationHandler:
|
||||
async def persist_and_notify_client_events(
|
||||
self,
|
||||
requester: Requester,
|
||||
events_and_context: List[Tuple[EventBase, EventContext]],
|
||||
events_and_context: List[EventPersistencePair],
|
||||
ratelimit: bool = True,
|
||||
extra_users: Optional[List[UserID]] = None,
|
||||
) -> EventBase:
|
||||
|
||||
@@ -173,6 +173,18 @@ state_transition_counter = Counter(
|
||||
labelnames=["locality", "from", "to", SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
presence_user_to_current_state_size_gauge = LaterGauge(
|
||||
name="synapse_handlers_presence_user_to_current_state_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
presence_wheel_timer_size_gauge = LaterGauge(
|
||||
name="synapse_handlers_presence_wheel_timer_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
|
||||
# "currently_active"
|
||||
LAST_ACTIVE_GRANULARITY = 60 * 1000
|
||||
@@ -779,11 +791,9 @@ class PresenceHandler(BasePresenceHandler):
|
||||
EduTypes.PRESENCE, self.incoming_presence
|
||||
)
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_handlers_presence_user_to_current_state_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(self.user_to_current_state)},
|
||||
presence_user_to_current_state_size_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(self.user_to_current_state)},
|
||||
)
|
||||
|
||||
# The per-device presence state, maps user to devices to per-device presence state.
|
||||
@@ -882,11 +892,9 @@ class PresenceHandler(BasePresenceHandler):
|
||||
60 * 1000,
|
||||
)
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_handlers_presence_wheel_timer_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(self.wheel_timer)},
|
||||
presence_wheel_timer_size_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(self.wheel_timer)},
|
||||
)
|
||||
|
||||
# Used to handle sending of presence to newly joined users/servers
|
||||
|
||||
@@ -211,7 +211,7 @@ class SlidingSyncHandler:
|
||||
|
||||
Args:
|
||||
sync_config: Sync configuration
|
||||
to_token: The point in the stream to sync up to.
|
||||
to_token: The latest point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from. Token of the end of the
|
||||
previous batch. May be `None` if this is the initial sync request.
|
||||
"""
|
||||
|
||||
@@ -27,7 +27,7 @@ from typing import (
|
||||
cast,
|
||||
)
|
||||
|
||||
from typing_extensions import assert_never
|
||||
from typing_extensions import TypeAlias, assert_never
|
||||
|
||||
from synapse.api.constants import AccountDataTypes, EduTypes
|
||||
from synapse.handlers.receipts import ReceiptEventSource
|
||||
@@ -40,6 +40,7 @@ from synapse.types import (
|
||||
SlidingSyncStreamToken,
|
||||
StrCollection,
|
||||
StreamToken,
|
||||
ThreadSubscriptionsToken,
|
||||
)
|
||||
from synapse.types.handlers.sliding_sync import (
|
||||
HaveSentRoomFlag,
|
||||
@@ -54,6 +55,13 @@ from synapse.util.async_helpers import (
|
||||
gather_optional_coroutines,
|
||||
)
|
||||
|
||||
_ThreadSubscription: TypeAlias = (
|
||||
SlidingSyncResult.Extensions.ThreadSubscriptionsExtension.ThreadSubscription
|
||||
)
|
||||
_ThreadUnsubscription: TypeAlias = (
|
||||
SlidingSyncResult.Extensions.ThreadSubscriptionsExtension.ThreadUnsubscription
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
@@ -68,6 +76,7 @@ class SlidingSyncExtensionHandler:
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.push_rules_handler = hs.get_push_rules_handler()
|
||||
self._enable_thread_subscriptions = hs.config.experimental.msc4306_enabled
|
||||
|
||||
@trace
|
||||
async def get_extensions_response(
|
||||
@@ -93,7 +102,7 @@ class SlidingSyncExtensionHandler:
|
||||
actual_room_ids: The actual room IDs in the the Sliding Sync response.
|
||||
actual_room_response_map: A map of room ID to room results in the the
|
||||
Sliding Sync response.
|
||||
to_token: The point in the stream to sync up to.
|
||||
to_token: The latest point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from.
|
||||
"""
|
||||
|
||||
@@ -156,18 +165,32 @@ class SlidingSyncExtensionHandler:
|
||||
from_token=from_token,
|
||||
)
|
||||
|
||||
thread_subs_coro = None
|
||||
if (
|
||||
sync_config.extensions.thread_subscriptions is not None
|
||||
and self._enable_thread_subscriptions
|
||||
):
|
||||
thread_subs_coro = self.get_thread_subscriptions_extension_response(
|
||||
sync_config=sync_config,
|
||||
thread_subscriptions_request=sync_config.extensions.thread_subscriptions,
|
||||
to_token=to_token,
|
||||
from_token=from_token,
|
||||
)
|
||||
|
||||
(
|
||||
to_device_response,
|
||||
e2ee_response,
|
||||
account_data_response,
|
||||
receipts_response,
|
||||
typing_response,
|
||||
thread_subs_response,
|
||||
) = await gather_optional_coroutines(
|
||||
to_device_coro,
|
||||
e2ee_coro,
|
||||
account_data_coro,
|
||||
receipts_coro,
|
||||
typing_coro,
|
||||
thread_subs_coro,
|
||||
)
|
||||
|
||||
return SlidingSyncResult.Extensions(
|
||||
@@ -176,6 +199,7 @@ class SlidingSyncExtensionHandler:
|
||||
account_data=account_data_response,
|
||||
receipts=receipts_response,
|
||||
typing=typing_response,
|
||||
thread_subscriptions=thread_subs_response,
|
||||
)
|
||||
|
||||
def find_relevant_room_ids_for_extension(
|
||||
@@ -877,3 +901,72 @@ class SlidingSyncExtensionHandler:
|
||||
return SlidingSyncResult.Extensions.TypingExtension(
|
||||
room_id_to_typing_map=room_id_to_typing_map,
|
||||
)
|
||||
|
||||
async def get_thread_subscriptions_extension_response(
|
||||
self,
|
||||
sync_config: SlidingSyncConfig,
|
||||
thread_subscriptions_request: SlidingSyncConfig.Extensions.ThreadSubscriptionsExtension,
|
||||
to_token: StreamToken,
|
||||
from_token: Optional[SlidingSyncStreamToken],
|
||||
) -> Optional[SlidingSyncResult.Extensions.ThreadSubscriptionsExtension]:
|
||||
"""Handle Thread Subscriptions extension (MSC4308)
|
||||
|
||||
Args:
|
||||
sync_config: Sync configuration
|
||||
thread_subscriptions_request: The thread_subscriptions extension from the request
|
||||
to_token: The point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from.
|
||||
|
||||
Returns:
|
||||
the response (None if empty or thread subscriptions are disabled)
|
||||
"""
|
||||
if not thread_subscriptions_request.enabled:
|
||||
return None
|
||||
|
||||
limit = thread_subscriptions_request.limit
|
||||
|
||||
if from_token:
|
||||
from_stream_id = from_token.stream_token.thread_subscriptions_key
|
||||
else:
|
||||
from_stream_id = StreamToken.START.thread_subscriptions_key
|
||||
|
||||
to_stream_id = to_token.thread_subscriptions_key
|
||||
|
||||
updates = await self.store.get_latest_updated_thread_subscriptions_for_user(
|
||||
user_id=sync_config.user.to_string(),
|
||||
from_id=from_stream_id,
|
||||
to_id=to_stream_id,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
if len(updates) == 0:
|
||||
return None
|
||||
|
||||
subscribed_threads: Dict[str, Dict[str, _ThreadSubscription]] = {}
|
||||
unsubscribed_threads: Dict[str, Dict[str, _ThreadUnsubscription]] = {}
|
||||
for stream_id, room_id, thread_root_id, subscribed, automatic in updates:
|
||||
if subscribed:
|
||||
subscribed_threads.setdefault(room_id, {})[thread_root_id] = (
|
||||
_ThreadSubscription(
|
||||
automatic=automatic,
|
||||
bump_stamp=stream_id,
|
||||
)
|
||||
)
|
||||
else:
|
||||
unsubscribed_threads.setdefault(room_id, {})[thread_root_id] = (
|
||||
_ThreadUnsubscription(bump_stamp=stream_id)
|
||||
)
|
||||
|
||||
prev_batch = None
|
||||
if len(updates) == limit:
|
||||
# Tell the client about a potential gap where there may be more
|
||||
# thread subscriptions for it to backpaginate.
|
||||
# We subtract one because the 'later in the stream' bound is inclusive,
|
||||
# and we already saw the element at index 0.
|
||||
prev_batch = ThreadSubscriptionsToken(updates[0][0] - 1)
|
||||
|
||||
return SlidingSyncResult.Extensions.ThreadSubscriptionsExtension(
|
||||
subscribed=subscribed_threads,
|
||||
unsubscribed=unsubscribed_threads,
|
||||
prev_batch=prev_batch,
|
||||
)
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
#
|
||||
import itertools
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
AbstractSet,
|
||||
@@ -28,14 +27,11 @@ from typing import (
|
||||
Dict,
|
||||
FrozenSet,
|
||||
List,
|
||||
Literal,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
overload,
|
||||
)
|
||||
|
||||
import attr
|
||||
@@ -120,25 +116,6 @@ LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE = 100
|
||||
SyncRequestKey = Tuple[Any, ...]
|
||||
|
||||
|
||||
class SyncVersion(Enum):
|
||||
"""
|
||||
Enum for specifying the version of sync request. This is used to key which type of
|
||||
sync response that we are generating.
|
||||
|
||||
This is different than the `sync_type` you might see used in other code below; which
|
||||
specifies the sub-type sync request (e.g. initial_sync, full_state_sync,
|
||||
incremental_sync) and is really only relevant for the `/sync` v2 endpoint.
|
||||
"""
|
||||
|
||||
# These string values are semantically significant because they are used in the the
|
||||
# metrics
|
||||
|
||||
# Traditional `/sync` endpoint
|
||||
SYNC_V2 = "sync_v2"
|
||||
# Part of MSC3575 Sliding Sync
|
||||
E2EE_SYNC = "e2ee_sync"
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class SyncConfig:
|
||||
user: UserID
|
||||
@@ -308,26 +285,6 @@ class SyncResult:
|
||||
)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class E2eeSyncResult:
|
||||
"""
|
||||
Attributes:
|
||||
next_batch: Token for the next sync
|
||||
to_device: List of direct messages for the device.
|
||||
device_lists: List of user_ids whose devices have changed
|
||||
device_one_time_keys_count: Dict of algorithm to count for one time keys
|
||||
for this device
|
||||
device_unused_fallback_key_types: List of key types that have an unused fallback
|
||||
key
|
||||
"""
|
||||
|
||||
next_batch: StreamToken
|
||||
to_device: List[JsonDict]
|
||||
device_lists: DeviceListUpdates
|
||||
device_one_time_keys_count: JsonMapping
|
||||
device_unused_fallback_key_types: List[str]
|
||||
|
||||
|
||||
class SyncHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname
|
||||
@@ -373,52 +330,15 @@ class SyncHandler:
|
||||
|
||||
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
|
||||
|
||||
@overload
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.SYNC_V2],
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> SyncResult: ...
|
||||
|
||||
@overload
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.E2EE_SYNC],
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> E2eeSyncResult: ...
|
||||
|
||||
@overload
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> Union[SyncResult, E2eeSyncResult]: ...
|
||||
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
) -> SyncResult:
|
||||
"""Get the sync for a client if we have new data for it now. Otherwise
|
||||
wait for new data to arrive on the server. If the timeout expires, then
|
||||
return an empty sync result.
|
||||
@@ -433,8 +353,7 @@ class SyncHandler:
|
||||
full_state: Whether to return the full state for each room.
|
||||
|
||||
Returns:
|
||||
When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
|
||||
When `SyncVersion.E2EE_SYNC`, returns a `E2eeSyncResult`.
|
||||
returns a full `SyncResult`.
|
||||
"""
|
||||
# If the user is not part of the mau group, then check that limits have
|
||||
# not been exceeded (if not part of the group by this point, almost certain
|
||||
@@ -446,7 +365,6 @@ class SyncHandler:
|
||||
request_key,
|
||||
self._wait_for_sync_for_user,
|
||||
sync_config,
|
||||
sync_version,
|
||||
since_token,
|
||||
timeout,
|
||||
full_state,
|
||||
@@ -455,48 +373,14 @@ class SyncHandler:
|
||||
logger.debug("Returning sync response for %s", user_id)
|
||||
return res
|
||||
|
||||
@overload
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.SYNC_V2],
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> SyncResult: ...
|
||||
|
||||
@overload
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.E2EE_SYNC],
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> E2eeSyncResult: ...
|
||||
|
||||
@overload
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> Union[SyncResult, E2eeSyncResult]: ...
|
||||
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
) -> SyncResult:
|
||||
"""The start of the machinery that produces a /sync response.
|
||||
|
||||
See https://spec.matrix.org/v1.1/client-server-api/#syncing for full details.
|
||||
@@ -517,7 +401,7 @@ class SyncHandler:
|
||||
else:
|
||||
sync_type = "incremental_sync"
|
||||
|
||||
sync_label = f"{sync_version}:{sync_type}"
|
||||
sync_label = f"sync_v2:{sync_type}"
|
||||
|
||||
context = current_context()
|
||||
if context:
|
||||
@@ -578,19 +462,15 @@ class SyncHandler:
|
||||
if timeout == 0 or since_token is None or full_state:
|
||||
# we are going to return immediately, so don't bother calling
|
||||
# notifier.wait_for_events.
|
||||
result: Union[
|
||||
SyncResult, E2eeSyncResult
|
||||
] = await self.current_sync_for_user(
|
||||
sync_config, sync_version, since_token, full_state=full_state
|
||||
result = await self.current_sync_for_user(
|
||||
sync_config, since_token, full_state=full_state
|
||||
)
|
||||
else:
|
||||
# Otherwise, we wait for something to happen and report it to the user.
|
||||
async def current_sync_callback(
|
||||
before_token: StreamToken, after_token: StreamToken
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
return await self.current_sync_for_user(
|
||||
sync_config, sync_version, since_token
|
||||
)
|
||||
) -> SyncResult:
|
||||
return await self.current_sync_for_user(sync_config, since_token)
|
||||
|
||||
result = await self.notifier.wait_for_events(
|
||||
sync_config.user.to_string(),
|
||||
@@ -623,43 +503,15 @@ class SyncHandler:
|
||||
|
||||
return result
|
||||
|
||||
@overload
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.SYNC_V2],
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> SyncResult: ...
|
||||
|
||||
@overload
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.E2EE_SYNC],
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> E2eeSyncResult: ...
|
||||
|
||||
@overload
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> Union[SyncResult, E2eeSyncResult]: ...
|
||||
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
) -> SyncResult:
|
||||
"""
|
||||
Generates the response body of a sync result, represented as a
|
||||
`SyncResult`/`E2eeSyncResult`.
|
||||
`SyncResult`.
|
||||
|
||||
This is a wrapper around `generate_sync_result` which starts an open tracing
|
||||
span to track the sync. See `generate_sync_result` for the next part of your
|
||||
@@ -672,28 +524,15 @@ class SyncHandler:
|
||||
full_state: Whether to return the full state for each room.
|
||||
|
||||
Returns:
|
||||
When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
|
||||
When `SyncVersion.E2EE_SYNC`, returns a `E2eeSyncResult`.
|
||||
returns a full `SyncResult`.
|
||||
"""
|
||||
with start_active_span("sync.current_sync_for_user"):
|
||||
log_kv({"since_token": since_token})
|
||||
|
||||
# Go through the `/sync` v2 path
|
||||
if sync_version == SyncVersion.SYNC_V2:
|
||||
sync_result: Union[
|
||||
SyncResult, E2eeSyncResult
|
||||
] = await self.generate_sync_result(
|
||||
sync_config, since_token, full_state
|
||||
)
|
||||
# Go through the MSC3575 Sliding Sync `/sync/e2ee` path
|
||||
elif sync_version == SyncVersion.E2EE_SYNC:
|
||||
sync_result = await self.generate_e2ee_sync_result(
|
||||
sync_config, since_token
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
f"Unknown sync_version (this is a Synapse problem): {sync_version}"
|
||||
)
|
||||
sync_result = await self.generate_sync_result(
|
||||
sync_config, since_token, full_state
|
||||
)
|
||||
|
||||
set_tag(SynapseTags.SYNC_RESULT, bool(sync_result))
|
||||
return sync_result
|
||||
@@ -1968,102 +1807,6 @@ class SyncHandler:
|
||||
next_batch=sync_result_builder.now_token,
|
||||
)
|
||||
|
||||
async def generate_e2ee_sync_result(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
) -> E2eeSyncResult:
|
||||
"""
|
||||
Generates the response body of a MSC3575 Sliding Sync `/sync/e2ee` result.
|
||||
|
||||
This is represented by a `E2eeSyncResult` struct, which is built from small
|
||||
pieces using a `SyncResultBuilder`. The `sync_result_builder` is passed as a
|
||||
mutable ("inout") parameter to various helper functions. These retrieve and
|
||||
process the data which forms the sync body, often writing to the
|
||||
`sync_result_builder` to store their output.
|
||||
|
||||
At the end, we transfer data from the `sync_result_builder` to a new `E2eeSyncResult`
|
||||
instance to signify that the sync calculation is complete.
|
||||
"""
|
||||
user_id = sync_config.user.to_string()
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service:
|
||||
# We no longer support AS users using /sync directly.
|
||||
# See https://github.com/matrix-org/matrix-doc/issues/1144
|
||||
raise NotImplementedError()
|
||||
|
||||
sync_result_builder = await self.get_sync_result_builder(
|
||||
sync_config,
|
||||
since_token,
|
||||
full_state=False,
|
||||
)
|
||||
|
||||
# 1. Calculate `to_device` events
|
||||
await self._generate_sync_entry_for_to_device(sync_result_builder)
|
||||
|
||||
# 2. Calculate `device_lists`
|
||||
# Device list updates are sent if a since token is provided.
|
||||
device_lists = DeviceListUpdates()
|
||||
include_device_list_updates = bool(since_token and since_token.device_list_key)
|
||||
if include_device_list_updates:
|
||||
# Note that _generate_sync_entry_for_rooms sets sync_result_builder.joined, which
|
||||
# is used in calculate_user_changes below.
|
||||
#
|
||||
# TODO: Running `_generate_sync_entry_for_rooms()` is a lot of work just to
|
||||
# figure out the membership changes/derived info needed for
|
||||
# `_generate_sync_entry_for_device_list()`. In the future, we should try to
|
||||
# refactor this away.
|
||||
(
|
||||
newly_joined_rooms,
|
||||
newly_left_rooms,
|
||||
) = await self._generate_sync_entry_for_rooms(sync_result_builder)
|
||||
|
||||
# This uses the sync_result_builder.joined which is set in
|
||||
# `_generate_sync_entry_for_rooms`, if that didn't find any joined
|
||||
# rooms for some reason it is a no-op.
|
||||
(
|
||||
newly_joined_or_invited_or_knocked_users,
|
||||
newly_left_users,
|
||||
) = sync_result_builder.calculate_user_changes()
|
||||
|
||||
# include_device_list_updates can only be True if we have a
|
||||
# since token.
|
||||
assert since_token is not None
|
||||
device_lists = await self._device_handler.generate_sync_entry_for_device_list(
|
||||
user_id=user_id,
|
||||
since_token=since_token,
|
||||
now_token=sync_result_builder.now_token,
|
||||
joined_room_ids=sync_result_builder.joined_room_ids,
|
||||
newly_joined_rooms=newly_joined_rooms,
|
||||
newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
|
||||
newly_left_rooms=newly_left_rooms,
|
||||
newly_left_users=newly_left_users,
|
||||
)
|
||||
|
||||
# 3. Calculate `device_one_time_keys_count` and `device_unused_fallback_key_types`
|
||||
device_id = sync_config.device_id
|
||||
one_time_keys_count: JsonMapping = {}
|
||||
unused_fallback_key_types: List[str] = []
|
||||
if device_id:
|
||||
# TODO: We should have a way to let clients differentiate between the states of:
|
||||
# * no change in OTK count since the provided since token
|
||||
# * the server has zero OTKs left for this device
|
||||
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
|
||||
one_time_keys_count = await self.store.count_e2e_one_time_keys(
|
||||
user_id, device_id
|
||||
)
|
||||
unused_fallback_key_types = list(
|
||||
await self.store.get_e2e_unused_fallback_key_types(user_id, device_id)
|
||||
)
|
||||
|
||||
return E2eeSyncResult(
|
||||
to_device=sync_result_builder.to_device,
|
||||
device_lists=device_lists,
|
||||
device_one_time_keys_count=one_time_keys_count,
|
||||
device_unused_fallback_key_types=unused_fallback_key_types,
|
||||
next_batch=sync_result_builder.now_token,
|
||||
)
|
||||
|
||||
async def get_sync_result_builder(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
|
||||
@@ -9,7 +9,7 @@ from synapse.storage.databases.main.thread_subscriptions import (
|
||||
AutomaticSubscriptionConflicted,
|
||||
ThreadSubscription,
|
||||
)
|
||||
from synapse.types import EventOrderings, UserID
|
||||
from synapse.types import EventOrderings, StreamKeyType, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -22,6 +22,7 @@ class ThreadSubscriptionsHandler:
|
||||
self.store = hs.get_datastores().main
|
||||
self.event_handler = hs.get_event_handler()
|
||||
self.auth = hs.get_auth()
|
||||
self._notifier = hs.get_notifier()
|
||||
|
||||
async def get_thread_subscription_settings(
|
||||
self,
|
||||
@@ -132,6 +133,15 @@ class ThreadSubscriptionsHandler:
|
||||
errcode=Codes.MSC4306_CONFLICTING_UNSUBSCRIPTION,
|
||||
)
|
||||
|
||||
if outcome is not None:
|
||||
# wake up user streams (e.g. sliding sync) on the same worker
|
||||
self._notifier.on_new_event(
|
||||
StreamKeyType.THREAD_SUBSCRIPTIONS,
|
||||
# outcome is a stream_id
|
||||
outcome,
|
||||
users=[user_id.to_string()],
|
||||
)
|
||||
|
||||
return outcome
|
||||
|
||||
async def unsubscribe_user_from_thread(
|
||||
@@ -162,8 +172,19 @@ class ThreadSubscriptionsHandler:
|
||||
logger.info("rejecting thread subscriptions change (thread not accessible)")
|
||||
raise NotFoundError("No such thread root")
|
||||
|
||||
return await self.store.unsubscribe_user_from_thread(
|
||||
outcome = await self.store.unsubscribe_user_from_thread(
|
||||
user_id.to_string(),
|
||||
event.room_id,
|
||||
thread_root_event_id,
|
||||
)
|
||||
|
||||
if outcome is not None:
|
||||
# wake up user streams (e.g. sliding sync) on the same worker
|
||||
self._notifier.on_new_event(
|
||||
StreamKeyType.THREAD_SUBSCRIPTIONS,
|
||||
# outcome is a stream_id
|
||||
outcome,
|
||||
users=[user_id.to_string()],
|
||||
)
|
||||
|
||||
return outcome
|
||||
|
||||
@@ -164,11 +164,13 @@ def _get_in_flight_counts() -> Mapping[Tuple[str, ...], int]:
|
||||
return counts
|
||||
|
||||
|
||||
LaterGauge(
|
||||
in_flight_requests = LaterGauge(
|
||||
name="synapse_http_server_in_flight_requests_count",
|
||||
desc="",
|
||||
labelnames=["method", "servlet", SERVER_NAME_LABEL],
|
||||
caller=_get_in_flight_counts,
|
||||
)
|
||||
in_flight_requests.register_hook(
|
||||
homeserver_instance_id=None, hook=_get_in_flight_counts
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -702,6 +702,10 @@ class _ByteProducer:
|
||||
self._request: Optional[Request] = request
|
||||
self._iterator = iterator
|
||||
self._paused = False
|
||||
self.tracing_scope = start_active_span(
|
||||
"write_bytes_to_request",
|
||||
)
|
||||
self.tracing_scope.__enter__()
|
||||
|
||||
try:
|
||||
self._request.registerProducer(self, True)
|
||||
@@ -712,8 +716,8 @@ class _ByteProducer:
|
||||
logger.info("Connection disconnected before response was written: %r", e)
|
||||
|
||||
# We drop our references to data we'll not use.
|
||||
self._request = None
|
||||
self._iterator = iter(())
|
||||
self.tracing_scope.__exit__(type(e), None, e.__traceback__)
|
||||
else:
|
||||
# Start producing if `registerProducer` was successful
|
||||
self.resumeProducing()
|
||||
@@ -727,6 +731,9 @@ class _ByteProducer:
|
||||
self._request.write(b"".join(data))
|
||||
|
||||
def pauseProducing(self) -> None:
|
||||
opentracing_span = active_span()
|
||||
if opentracing_span is not None:
|
||||
opentracing_span.log_kv({"event": "producer_paused"})
|
||||
self._paused = True
|
||||
|
||||
def resumeProducing(self) -> None:
|
||||
@@ -737,6 +744,10 @@ class _ByteProducer:
|
||||
|
||||
self._paused = False
|
||||
|
||||
opentracing_span = active_span()
|
||||
if opentracing_span is not None:
|
||||
opentracing_span.log_kv({"event": "producer_resumed"})
|
||||
|
||||
# Write until there's backpressure telling us to stop.
|
||||
while not self._paused:
|
||||
# Get the next chunk and write it to the request.
|
||||
@@ -771,6 +782,7 @@ class _ByteProducer:
|
||||
def stopProducing(self) -> None:
|
||||
# Clear a circular reference.
|
||||
self._request = None
|
||||
self.tracing_scope.__exit__(None, None, None)
|
||||
|
||||
|
||||
def _encode_json_bytes(json_object: object) -> bytes:
|
||||
@@ -913,8 +925,9 @@ def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None:
|
||||
# once (via `Request.write`) is that doing so starts the timeout for the
|
||||
# next request to be received: so if it takes longer than 60s to stream back
|
||||
# the response to the client, the client never gets it.
|
||||
# c.f https://github.com/twisted/twisted/issues/12498
|
||||
#
|
||||
# The correct solution is to use a Producer; then the timeout is only
|
||||
# One workaround is to use a `Producer`; then the timeout is only
|
||||
# started once all of the content is sent over the TCP connection.
|
||||
|
||||
# To make sure we don't write all of the bytes at once we split it up into
|
||||
|
||||
@@ -130,6 +130,16 @@ def parse_integer(
|
||||
return parse_integer_from_args(args, name, default, required, negative)
|
||||
|
||||
|
||||
@overload
|
||||
def parse_integer_from_args(
|
||||
args: Mapping[bytes, Sequence[bytes]],
|
||||
name: str,
|
||||
default: int,
|
||||
required: Literal[False] = False,
|
||||
negative: bool = False,
|
||||
) -> int: ...
|
||||
|
||||
|
||||
@overload
|
||||
def parse_integer_from_args(
|
||||
args: Mapping[bytes, Sequence[bytes]],
|
||||
|
||||
@@ -56,7 +56,6 @@ from twisted.internet import defer, threads
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.logging.scopecontextmanager import _LogContextScope
|
||||
from synapse.types import ISynapseReactor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -228,16 +227,24 @@ LoggingContextOrSentinel = Union["LoggingContext", "_Sentinel"]
|
||||
|
||||
|
||||
class _Sentinel:
|
||||
"""Sentinel to represent the root context"""
|
||||
"""
|
||||
Sentinel to represent the root context
|
||||
|
||||
__slots__ = ["previous_context", "finished", "request", "scope", "tag"]
|
||||
This should only be used for tasks outside of Synapse like when we yield control
|
||||
back to the Twisted reactor (event loop) so we don't leak the current logging
|
||||
context to other tasks that are scheduled next in the event loop.
|
||||
|
||||
Nothing from the Synapse homeserver should be logged with the sentinel context. i.e.
|
||||
we should always know which server the logs are coming from.
|
||||
"""
|
||||
|
||||
__slots__ = ["previous_context", "finished", "request", "tag"]
|
||||
|
||||
def __init__(self) -> None:
|
||||
# Minimal set for compatibility with LoggingContext
|
||||
self.previous_context = None
|
||||
self.finished = False
|
||||
self.request = None
|
||||
self.scope = None
|
||||
self.tag = None
|
||||
|
||||
def __str__(self) -> str:
|
||||
@@ -290,7 +297,6 @@ class LoggingContext:
|
||||
"finished",
|
||||
"request",
|
||||
"tag",
|
||||
"scope",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
@@ -311,7 +317,6 @@ class LoggingContext:
|
||||
self.main_thread = get_thread_id()
|
||||
self.request = None
|
||||
self.tag = ""
|
||||
self.scope: Optional["_LogContextScope"] = None
|
||||
|
||||
# keep track of whether we have hit the __exit__ block for this context
|
||||
# (suggesting that the the thing that created the context thinks it should
|
||||
@@ -324,9 +329,6 @@ class LoggingContext:
|
||||
# we track the current request_id
|
||||
self.request = self.parent_context.request
|
||||
|
||||
# we also track the current scope:
|
||||
self.scope = self.parent_context.scope
|
||||
|
||||
if request is not None:
|
||||
# the request param overrides the request from the parent context
|
||||
self.request = request
|
||||
@@ -623,9 +625,17 @@ class LoggingContextFilter(logging.Filter):
|
||||
|
||||
|
||||
class PreserveLoggingContext:
|
||||
"""Context manager which replaces the logging context
|
||||
"""
|
||||
Context manager which replaces the logging context
|
||||
|
||||
The previous logging context is restored on exit."""
|
||||
The previous logging context is restored on exit.
|
||||
|
||||
`make_deferred_yieldable` is pretty equivalent to using `with
|
||||
PreserveLoggingContext():` (using the default sentinel context), i.e. it clears the
|
||||
logcontext before awaiting (and so before execution passes back to the reactor) and
|
||||
restores the old context once the awaitable completes (execution passes from the
|
||||
reactor back to the code).
|
||||
"""
|
||||
|
||||
__slots__ = ["_old_context", "_new_context"]
|
||||
|
||||
@@ -791,6 +801,15 @@ def run_in_background(
|
||||
return from the function, and that the sentinel context is set once the
|
||||
deferred returned by the function completes.
|
||||
|
||||
To explain how the log contexts work here:
|
||||
- When `run_in_background` is called, the current context is stored ("original"),
|
||||
we kick off the background task in the current context, and we restore that
|
||||
original context before returning
|
||||
- When the background task finishes, we don't want to leak our context into the
|
||||
reactor which would erroneously get attached to the next operation picked up by
|
||||
the event loop. We add a callback to the deferred which will clear the logging
|
||||
context after it finishes and yields control back to the reactor.
|
||||
|
||||
Useful for wrapping functions that return a deferred or coroutine, which you don't
|
||||
yield or await on (for instance because you want to pass it to
|
||||
deferred.gatherResults()).
|
||||
@@ -802,9 +821,15 @@ def run_in_background(
|
||||
`f` doesn't raise any deferred exceptions, otherwise a scary-looking
|
||||
CRITICAL error about an unhandled error will be logged without much
|
||||
indication about where it came from.
|
||||
|
||||
Returns:
|
||||
Deferred which returns the result of func, or `None` if func raises.
|
||||
Note that the returned Deferred does not follow the synapse logcontext
|
||||
rules.
|
||||
"""
|
||||
current = current_context()
|
||||
calling_context = current_context()
|
||||
try:
|
||||
# (kick off the task in the current context)
|
||||
res = f(*args, **kwargs)
|
||||
except Exception:
|
||||
# the assumption here is that the caller doesn't want to be disturbed
|
||||
@@ -813,6 +838,9 @@ def run_in_background(
|
||||
|
||||
# `res` may be a coroutine, `Deferred`, some other kind of awaitable, or a plain
|
||||
# value. Convert it to a `Deferred`.
|
||||
#
|
||||
# Wrapping the value in a deferred has the side effect of executing the coroutine,
|
||||
# if it is one. If it's already a deferred, then we can just use that.
|
||||
d: "defer.Deferred[R]"
|
||||
if isinstance(res, typing.Coroutine):
|
||||
# Wrap the coroutine in a `Deferred`.
|
||||
@@ -827,20 +855,24 @@ def run_in_background(
|
||||
# `res` is a plain value. Wrap it in a `Deferred`.
|
||||
d = defer.succeed(res)
|
||||
|
||||
# The deferred has already completed
|
||||
if d.called and not d.paused:
|
||||
# The function should have maintained the logcontext, so we can
|
||||
# optimise out the messing about
|
||||
return d
|
||||
|
||||
# The function may have reset the context before returning, so
|
||||
# we need to restore it now.
|
||||
ctx = set_current_context(current)
|
||||
# The function may have reset the context before returning, so we need to restore it
|
||||
# now.
|
||||
#
|
||||
# Our goal is to have the caller logcontext unchanged after firing off the
|
||||
# background task and returning.
|
||||
set_current_context(calling_context)
|
||||
|
||||
# The original context will be restored when the deferred
|
||||
# completes, but there is nothing waiting for it, so it will
|
||||
# get leaked into the reactor or some other function which
|
||||
# wasn't expecting it. We therefore need to reset the context
|
||||
# here.
|
||||
# The original logcontext will be restored when the deferred completes, but
|
||||
# there is nothing waiting for it, so it will get leaked into the reactor (which
|
||||
# would then get picked up by the next thing the reactor does). We therefore
|
||||
# need to reset the logcontext here (set the `sentinel` logcontext) before
|
||||
# yielding control back to the reactor.
|
||||
#
|
||||
# (If this feels asymmetric, consider it this way: we are
|
||||
# effectively forking a new thread of execution. We are
|
||||
@@ -848,7 +880,7 @@ def run_in_background(
|
||||
# which is supposed to have a single entry and exit point. But
|
||||
# by spawning off another deferred, we are effectively
|
||||
# adding a new exit point.)
|
||||
d.addBoth(_set_context_cb, ctx)
|
||||
d.addBoth(_set_context_cb, SENTINEL_CONTEXT)
|
||||
return d
|
||||
|
||||
|
||||
@@ -866,20 +898,34 @@ def run_coroutine_in_background(
|
||||
coroutine directly rather than a function. We can do this because coroutines
|
||||
do not run until called, and so calling an async function without awaiting
|
||||
cannot change the log contexts.
|
||||
"""
|
||||
|
||||
current = current_context()
|
||||
This is an ergonomic helper so we can do this:
|
||||
```python
|
||||
run_coroutine_in_background(func1(arg1))
|
||||
```
|
||||
Rather than having to do this:
|
||||
```python
|
||||
run_in_background(lambda: func1(arg1))
|
||||
```
|
||||
"""
|
||||
calling_context = current_context()
|
||||
|
||||
# Wrap the coroutine in a deferred, which will have the side effect of executing the
|
||||
# coroutine in the background.
|
||||
d = defer.ensureDeferred(coroutine)
|
||||
|
||||
# The function may have reset the context before returning, so
|
||||
# we need to restore it now.
|
||||
ctx = set_current_context(current)
|
||||
# The function may have reset the context before returning, so we need to restore it
|
||||
# now.
|
||||
#
|
||||
# Our goal is to have the caller logcontext unchanged after firing off the
|
||||
# background task and returning.
|
||||
set_current_context(calling_context)
|
||||
|
||||
# The original context will be restored when the deferred
|
||||
# completes, but there is nothing waiting for it, so it will
|
||||
# get leaked into the reactor or some other function which
|
||||
# wasn't expecting it. We therefore need to reset the context
|
||||
# here.
|
||||
# The original logcontext will be restored when the deferred completes, but
|
||||
# there is nothing waiting for it, so it will get leaked into the reactor (which
|
||||
# would then get picked up by the next thing the reactor does). We therefore
|
||||
# need to reset the logcontext here (set the `sentinel` logcontext) before
|
||||
# yielding control back to the reactor.
|
||||
#
|
||||
# (If this feels asymmetric, consider it this way: we are
|
||||
# effectively forking a new thread of execution. We are
|
||||
@@ -887,7 +933,7 @@ def run_coroutine_in_background(
|
||||
# which is supposed to have a single entry and exit point. But
|
||||
# by spawning off another deferred, we are effectively
|
||||
# adding a new exit point.)
|
||||
d.addBoth(_set_context_cb, ctx)
|
||||
d.addBoth(_set_context_cb, SENTINEL_CONTEXT)
|
||||
return d
|
||||
|
||||
|
||||
@@ -895,24 +941,43 @@ T = TypeVar("T")
|
||||
|
||||
|
||||
def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]":
|
||||
"""Given a deferred, make it follow the Synapse logcontext rules:
|
||||
|
||||
If the deferred has completed, essentially does nothing (just returns another
|
||||
completed deferred with the result/failure).
|
||||
|
||||
If the deferred has not yet completed, resets the logcontext before
|
||||
returning a deferred. Then, when the deferred completes, restores the
|
||||
current logcontext before running callbacks/errbacks.
|
||||
|
||||
(This is more-or-less the opposite operation to run_in_background.)
|
||||
"""
|
||||
Given a deferred, make it follow the Synapse logcontext rules:
|
||||
|
||||
- If the deferred has completed, essentially does nothing (just returns another
|
||||
completed deferred with the result/failure).
|
||||
- If the deferred has not yet completed, resets the logcontext before returning a
|
||||
incomplete deferred. Then, when the deferred completes, restores the current
|
||||
logcontext before running callbacks/errbacks.
|
||||
|
||||
This means the resultant deferred can be awaited without leaking the current
|
||||
logcontext to the reactor (which would then get erroneously picked up by the next
|
||||
thing the reactor does), and also means that the logcontext is preserved when the
|
||||
deferred completes.
|
||||
|
||||
(This is more-or-less the opposite operation to run_in_background in terms of how it
|
||||
handles log contexts.)
|
||||
|
||||
Pretty much equivalent to using `with PreserveLoggingContext():`, i.e. it clears the
|
||||
logcontext before awaiting (and so before execution passes back to the reactor) and
|
||||
restores the old context once the awaitable completes (execution passes from the
|
||||
reactor back to the code).
|
||||
"""
|
||||
# The deferred has already completed
|
||||
if deferred.called and not deferred.paused:
|
||||
# it looks like this deferred is ready to run any callbacks we give it
|
||||
# immediately. We may as well optimise out the logcontext faffery.
|
||||
return deferred
|
||||
|
||||
# ok, we can't be sure that a yield won't block, so let's reset the
|
||||
# logcontext, and add a callback to the deferred to restore it.
|
||||
# Our goal is to have the caller logcontext unchanged after they yield/await the
|
||||
# returned deferred.
|
||||
#
|
||||
# When the caller yield/await's the returned deferred, it may yield
|
||||
# control back to the reactor. To avoid leaking the current logcontext to the
|
||||
# reactor (which would then get erroneously picked up by the next thing the reactor
|
||||
# does) while the deferred runs in the reactor event loop, we reset the logcontext
|
||||
# and add a callback to the deferred to restore it so the caller's logcontext is
|
||||
# active when the deferred completes.
|
||||
prev_context = set_current_context(SENTINEL_CONTEXT)
|
||||
deferred.addBoth(_set_context_cb, prev_context)
|
||||
return deferred
|
||||
|
||||
@@ -251,18 +251,17 @@ class _DummyTagNames:
|
||||
try:
|
||||
import opentracing
|
||||
import opentracing.tags
|
||||
from opentracing.scope_managers.contextvars import ContextVarsScopeManager
|
||||
|
||||
tags = opentracing.tags
|
||||
except ImportError:
|
||||
opentracing = None # type: ignore[assignment]
|
||||
tags = _DummyTagNames # type: ignore[assignment]
|
||||
ContextVarsScopeManager = None # type: ignore
|
||||
try:
|
||||
from jaeger_client import Config as JaegerConfig
|
||||
|
||||
from synapse.logging.scopecontextmanager import LogContextScopeManager
|
||||
except ImportError:
|
||||
JaegerConfig = None # type: ignore
|
||||
LogContextScopeManager = None # type: ignore
|
||||
|
||||
|
||||
try:
|
||||
@@ -484,7 +483,7 @@ def init_tracer(hs: "HomeServer") -> None:
|
||||
config = JaegerConfig(
|
||||
config=jaeger_config,
|
||||
service_name=f"{hs.config.server.server_name} {instance_name_by_type}",
|
||||
scope_manager=LogContextScopeManager(),
|
||||
scope_manager=ContextVarsScopeManager(),
|
||||
metrics_factory=PrometheusMetricsFactory(),
|
||||
)
|
||||
|
||||
|
||||
@@ -1,161 +0,0 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
# Copyright (C) 2023 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
# Originally licensed under the Apache License, Version 2.0:
|
||||
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
||||
#
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from opentracing import Scope, ScopeManager, Span
|
||||
|
||||
from synapse.logging.context import (
|
||||
LoggingContext,
|
||||
current_context,
|
||||
nested_logging_context,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LogContextScopeManager(ScopeManager):
|
||||
"""
|
||||
The LogContextScopeManager tracks the active scope in opentracing
|
||||
by using the log contexts which are native to synapse. This is so
|
||||
that the basic opentracing api can be used across twisted defereds.
|
||||
|
||||
It would be nice just to use opentracing's ContextVarsScopeManager,
|
||||
but currently that doesn't work due to https://twistedmatrix.com/trac/ticket/10301.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
@property
|
||||
def active(self) -> Optional[Scope]:
|
||||
"""
|
||||
Returns the currently active Scope which can be used to access the
|
||||
currently active Scope.span.
|
||||
If there is a non-null Scope, its wrapped Span
|
||||
becomes an implicit parent of any newly-created Span at
|
||||
Tracer.start_active_span() time.
|
||||
|
||||
Return:
|
||||
The Scope that is active, or None if not available.
|
||||
"""
|
||||
ctx = current_context()
|
||||
return ctx.scope
|
||||
|
||||
def activate(self, span: Span, finish_on_close: bool) -> Scope:
|
||||
"""
|
||||
Makes a Span active.
|
||||
Args
|
||||
span: the span that should become active.
|
||||
finish_on_close: whether Span should be automatically finished when
|
||||
Scope.close() is called.
|
||||
|
||||
Returns:
|
||||
Scope to control the end of the active period for
|
||||
*span*. It is a programming error to neglect to call
|
||||
Scope.close() on the returned instance.
|
||||
"""
|
||||
|
||||
ctx = current_context()
|
||||
|
||||
if not ctx:
|
||||
logger.error("Tried to activate scope outside of loggingcontext")
|
||||
return Scope(None, span) # type: ignore[arg-type]
|
||||
|
||||
if ctx.scope is not None:
|
||||
# start a new logging context as a child of the existing one.
|
||||
# Doing so -- rather than updating the existing logcontext -- means that
|
||||
# creating several concurrent spans under the same logcontext works
|
||||
# correctly.
|
||||
ctx = nested_logging_context("")
|
||||
enter_logcontext = True
|
||||
else:
|
||||
# if there is no span currently associated with the current logcontext, we
|
||||
# just store the scope in it.
|
||||
#
|
||||
# This feels a bit dubious, but it does hack around a problem where a
|
||||
# span outlasts its parent logcontext (which would otherwise lead to
|
||||
# "Re-starting finished log context" errors).
|
||||
enter_logcontext = False
|
||||
|
||||
scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close)
|
||||
ctx.scope = scope
|
||||
if enter_logcontext:
|
||||
ctx.__enter__()
|
||||
|
||||
return scope
|
||||
|
||||
|
||||
class _LogContextScope(Scope):
|
||||
"""
|
||||
A custom opentracing scope, associated with a LogContext
|
||||
|
||||
* When the scope is closed, the logcontext's active scope is reset to None.
|
||||
and - if enter_logcontext was set - the logcontext is finished too.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
manager: LogContextScopeManager,
|
||||
span: Span,
|
||||
logcontext: LoggingContext,
|
||||
enter_logcontext: bool,
|
||||
finish_on_close: bool,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
manager:
|
||||
the manager that is responsible for this scope.
|
||||
span:
|
||||
the opentracing span which this scope represents the local
|
||||
lifetime for.
|
||||
logcontext:
|
||||
the log context to which this scope is attached.
|
||||
enter_logcontext:
|
||||
if True the log context will be exited when the scope is finished
|
||||
finish_on_close:
|
||||
if True finish the span when the scope is closed
|
||||
"""
|
||||
super().__init__(manager, span)
|
||||
self.logcontext = logcontext
|
||||
self._finish_on_close = finish_on_close
|
||||
self._enter_logcontext = enter_logcontext
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"Scope<{self.span}>"
|
||||
|
||||
def close(self) -> None:
|
||||
active_scope = self.manager.active
|
||||
if active_scope is not self:
|
||||
logger.error(
|
||||
"Closing scope %s which is not the currently-active one %s",
|
||||
self,
|
||||
active_scope,
|
||||
)
|
||||
|
||||
if self._finish_on_close:
|
||||
self.span.finish()
|
||||
|
||||
self.logcontext.scope = None
|
||||
|
||||
if self._enter_logcontext:
|
||||
self.logcontext.__exit__(None, None, None)
|
||||
@@ -179,11 +179,13 @@ class MediaRepository:
|
||||
|
||||
# We get the media upload limits and sort them in descending order of
|
||||
# time period, so that we can apply some optimizations.
|
||||
self.media_upload_limits = hs.config.media.media_upload_limits
|
||||
self.media_upload_limits.sort(
|
||||
self.default_media_upload_limits = hs.config.media.media_upload_limits
|
||||
self.default_media_upload_limits.sort(
|
||||
key=lambda limit: limit.time_period_ms, reverse=True
|
||||
)
|
||||
|
||||
self.media_repository_callbacks = hs.get_module_api_callbacks().media_repository
|
||||
|
||||
def _start_update_recently_accessed(self) -> Deferred:
|
||||
return run_as_background_process(
|
||||
"update_recently_accessed_media",
|
||||
@@ -340,16 +342,27 @@ class MediaRepository:
|
||||
|
||||
# Check that the user has not exceeded any of the media upload limits.
|
||||
|
||||
# Use limits from module API if provided
|
||||
media_upload_limits = (
|
||||
await self.media_repository_callbacks.get_media_upload_limits_for_user(
|
||||
auth_user.to_string()
|
||||
)
|
||||
)
|
||||
|
||||
# Otherwise use the default limits from config
|
||||
if media_upload_limits is None:
|
||||
# Note: the media upload limits are sorted so larger time periods are
|
||||
# first.
|
||||
media_upload_limits = self.default_media_upload_limits
|
||||
|
||||
# This is the total size of media uploaded by the user in the last
|
||||
# `time_period_ms` milliseconds, or None if we haven't checked yet.
|
||||
uploaded_media_size: Optional[int] = None
|
||||
|
||||
# Note: the media upload limits are sorted so larger time periods are
|
||||
# first.
|
||||
for limit in self.media_upload_limits:
|
||||
for limit in media_upload_limits:
|
||||
# We only need to check the amount of media uploaded by the user in
|
||||
# this latest (smaller) time period if the amount of media uploaded
|
||||
# in a previous (larger) time period is above the limit.
|
||||
# in a previous (larger) time period is below the limit.
|
||||
#
|
||||
# This optimization means that in the common case where the user
|
||||
# hasn't uploaded much media, we only need to query the database
|
||||
@@ -363,6 +376,12 @@ class MediaRepository:
|
||||
)
|
||||
|
||||
if uploaded_media_size + content_length > limit.max_bytes:
|
||||
await self.media_repository_callbacks.on_media_upload_limit_exceeded(
|
||||
user_id=auth_user.to_string(),
|
||||
limit=limit,
|
||||
sent_bytes=uploaded_media_size,
|
||||
attempted_bytes=content_length,
|
||||
)
|
||||
raise SynapseError(
|
||||
400, "Media upload limit exceeded", Codes.RESOURCE_LIMIT_EXCEEDED
|
||||
)
|
||||
|
||||
@@ -43,7 +43,7 @@ from typing import (
|
||||
)
|
||||
|
||||
import attr
|
||||
from pkg_resources import parse_version
|
||||
from packaging.version import parse as parse_version
|
||||
from prometheus_client import (
|
||||
CollectorRegistry,
|
||||
Counter,
|
||||
@@ -73,8 +73,6 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
METRICS_PREFIX = "/_synapse/metrics"
|
||||
|
||||
all_gauges: Dict[str, Collector] = {}
|
||||
|
||||
HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
|
||||
|
||||
SERVER_NAME_LABEL = "server_name"
|
||||
@@ -163,42 +161,110 @@ class LaterGauge(Collector):
|
||||
name: str
|
||||
desc: str
|
||||
labelnames: Optional[StrSequence] = attr.ib(hash=False)
|
||||
# callback: should either return a value (if there are no labels for this metric),
|
||||
# or dict mapping from a label tuple to a value
|
||||
caller: Callable[
|
||||
[], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]]
|
||||
]
|
||||
_instance_id_to_hook_map: Dict[
|
||||
Optional[str], # instance_id
|
||||
Callable[
|
||||
[], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]]
|
||||
],
|
||||
] = attr.ib(factory=dict, hash=False)
|
||||
"""
|
||||
Map from homeserver instance_id to a callback. Each callback should either return a
|
||||
value (if there are no labels for this metric), or dict mapping from a label tuple
|
||||
to a value.
|
||||
|
||||
We use `instance_id` instead of `server_name` because it's possible to have multiple
|
||||
workers running in the same process with the same `server_name`.
|
||||
"""
|
||||
|
||||
def collect(self) -> Iterable[Metric]:
|
||||
# The decision to add `SERVER_NAME_LABEL` is from the `LaterGauge` usage itself
|
||||
# (we don't enforce it here, one level up).
|
||||
g = GaugeMetricFamily(self.name, self.desc, labels=self.labelnames) # type: ignore[missing-server-name-label]
|
||||
|
||||
try:
|
||||
calls = self.caller()
|
||||
except Exception:
|
||||
logger.exception("Exception running callback for LaterGauge(%s)", self.name)
|
||||
yield g
|
||||
return
|
||||
for homeserver_instance_id, hook in self._instance_id_to_hook_map.items():
|
||||
try:
|
||||
hook_result = hook()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Exception running callback for LaterGauge(%s) for homeserver_instance_id=%s",
|
||||
self.name,
|
||||
homeserver_instance_id,
|
||||
)
|
||||
# Continue to return the rest of the metrics that aren't broken
|
||||
continue
|
||||
|
||||
if isinstance(calls, (int, float)):
|
||||
g.add_metric([], calls)
|
||||
else:
|
||||
for k, v in calls.items():
|
||||
g.add_metric(k, v)
|
||||
if isinstance(hook_result, (int, float)):
|
||||
g.add_metric([], hook_result)
|
||||
else:
|
||||
for k, v in hook_result.items():
|
||||
g.add_metric(k, v)
|
||||
|
||||
yield g
|
||||
|
||||
def register_hook(
|
||||
self,
|
||||
*,
|
||||
homeserver_instance_id: Optional[str],
|
||||
hook: Callable[
|
||||
[], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]]
|
||||
],
|
||||
) -> None:
|
||||
"""
|
||||
Register a callback/hook that will be called to generate a metric samples for
|
||||
the gauge.
|
||||
|
||||
Args:
|
||||
homeserver_instance_id: The unique ID for this Synapse process instance
|
||||
(`hs.get_instance_id()`) that this hook is associated with. This can be used
|
||||
later to lookup all hooks associated with a given server name in order to
|
||||
unregister them. This should only be omitted for global hooks that work
|
||||
across all homeservers.
|
||||
hook: A callback that should either return a value (if there are no
|
||||
labels for this metric), or dict mapping from a label tuple to a value
|
||||
"""
|
||||
# We shouldn't have multiple hooks registered for the same homeserver `instance_id`.
|
||||
existing_hook = self._instance_id_to_hook_map.get(homeserver_instance_id)
|
||||
assert existing_hook is None, (
|
||||
f"LaterGauge(name={self.name}) hook already registered for homeserver_instance_id={homeserver_instance_id}. "
|
||||
"This is likely a Synapse bug and you forgot to unregister the previous hooks for "
|
||||
"the server (especially in tests)."
|
||||
)
|
||||
|
||||
self._instance_id_to_hook_map[homeserver_instance_id] = hook
|
||||
|
||||
def unregister_hooks_for_homeserver_instance_id(
|
||||
self, homeserver_instance_id: str
|
||||
) -> None:
|
||||
"""
|
||||
Unregister all hooks associated with the given homeserver `instance_id`. This should be
|
||||
called when a homeserver is shutdown to avoid extra hooks sitting around.
|
||||
|
||||
Args:
|
||||
homeserver_instance_id: The unique ID for this Synapse process instance to
|
||||
unregister hooks for (`hs.get_instance_id()`).
|
||||
"""
|
||||
self._instance_id_to_hook_map.pop(homeserver_instance_id, None)
|
||||
|
||||
def __attrs_post_init__(self) -> None:
|
||||
self._register()
|
||||
|
||||
def _register(self) -> None:
|
||||
if self.name in all_gauges.keys():
|
||||
logger.warning("%s already registered, reregistering", self.name)
|
||||
REGISTRY.unregister(all_gauges.pop(self.name))
|
||||
|
||||
REGISTRY.register(self)
|
||||
all_gauges[self.name] = self
|
||||
|
||||
# We shouldn't have multiple metrics with the same name. Typically, metrics
|
||||
# should be created globally so you shouldn't be running into this and this will
|
||||
# catch any stupid mistakes. The `REGISTRY.register(self)` call above will also
|
||||
# raise an error if the metric already exists but to make things explicit, we'll
|
||||
# also check here.
|
||||
existing_gauge = all_later_gauges_to_clean_up_on_shutdown.get(self.name)
|
||||
assert existing_gauge is None, f"LaterGauge(name={self.name}) already exists. "
|
||||
|
||||
# Keep track of the gauge so we can clean it up later.
|
||||
all_later_gauges_to_clean_up_on_shutdown[self.name] = self
|
||||
|
||||
|
||||
all_later_gauges_to_clean_up_on_shutdown: Dict[str, LaterGauge] = {}
|
||||
"""
|
||||
Track all `LaterGauge` instances so we can remove any associated hooks during homeserver
|
||||
shutdown.
|
||||
"""
|
||||
|
||||
|
||||
# `MetricsEntry` only makes sense when it is a `Protocol`,
|
||||
@@ -250,7 +316,7 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
|
||||
# Protects access to _registrations
|
||||
self._lock = threading.Lock()
|
||||
|
||||
self._register_with_collector()
|
||||
REGISTRY.register(self)
|
||||
|
||||
def register(
|
||||
self,
|
||||
@@ -341,14 +407,6 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
|
||||
gauge.add_metric(labels=key, value=getattr(metrics, name))
|
||||
yield gauge
|
||||
|
||||
def _register_with_collector(self) -> None:
|
||||
if self.name in all_gauges.keys():
|
||||
logger.warning("%s already registered, reregistering", self.name)
|
||||
REGISTRY.unregister(all_gauges.pop(self.name))
|
||||
|
||||
REGISTRY.register(self)
|
||||
all_gauges[self.name] = self
|
||||
|
||||
|
||||
class GaugeHistogramMetricFamilyWithLabels(GaugeHistogramMetricFamily):
|
||||
"""
|
||||
|
||||
@@ -228,6 +228,11 @@ def run_as_background_process(
|
||||
clock.looping_call and friends (or for firing-and-forgetting in the middle of a
|
||||
normal synapse async function).
|
||||
|
||||
Because the returned Deferred does not follow the synapse logcontext rules, awaiting
|
||||
the result of this function will result in the log context being cleared (bad). In
|
||||
order to properly await the result of this function and maintain the current log
|
||||
context, use `make_deferred_yieldable`.
|
||||
|
||||
Args:
|
||||
desc: a description for this background process type
|
||||
server_name: The homeserver name that this background process is being run for
|
||||
@@ -280,6 +285,20 @@ def run_as_background_process(
|
||||
name=desc, **{SERVER_NAME_LABEL: server_name}
|
||||
).dec()
|
||||
|
||||
# To explain how the log contexts work here:
|
||||
# - When `run_as_background_process` is called, the current context is stored
|
||||
# (using `PreserveLoggingContext`), we kick off the background task, and we
|
||||
# restore the original context before returning (also part of
|
||||
# `PreserveLoggingContext`).
|
||||
# - The background task runs in its own new logcontext named after `desc`
|
||||
# - When the background task finishes, we don't want to leak our background context
|
||||
# into the reactor which would erroneously get attached to the next operation
|
||||
# picked up by the event loop. We use `PreserveLoggingContext` to set the
|
||||
# `sentinel` context and means the new `BackgroundProcessLoggingContext` will
|
||||
# remember the `sentinel` context as its previous context to return to when it
|
||||
# exits and yields control back to the reactor.
|
||||
#
|
||||
# TODO: Why can't we simplify to using `return run_in_background(run)`?
|
||||
with PreserveLoggingContext():
|
||||
# Note that we return a Deferred here so that it can be used in a
|
||||
# looping_call and other places that expect a Deferred.
|
||||
|
||||
@@ -50,6 +50,7 @@ from synapse.api.constants import ProfileFields
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config.repository import MediaUploadLimit
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.presence_router import (
|
||||
GET_INTERESTED_USERS_CALLBACK,
|
||||
@@ -94,7 +95,9 @@ from synapse.module_api.callbacks.account_validity_callbacks import (
|
||||
)
|
||||
from synapse.module_api.callbacks.media_repository_callbacks import (
|
||||
GET_MEDIA_CONFIG_FOR_USER_CALLBACK,
|
||||
GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK,
|
||||
IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK,
|
||||
ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK,
|
||||
)
|
||||
from synapse.module_api.callbacks.ratelimit_callbacks import (
|
||||
GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK,
|
||||
@@ -205,6 +208,7 @@ __all__ = [
|
||||
"RoomAlias",
|
||||
"UserProfile",
|
||||
"RatelimitOverride",
|
||||
"MediaUploadLimit",
|
||||
]
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -462,6 +466,12 @@ class ModuleApi:
|
||||
is_user_allowed_to_upload_media_of_size: Optional[
|
||||
IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK
|
||||
] = None,
|
||||
get_media_upload_limits_for_user: Optional[
|
||||
GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK
|
||||
] = None,
|
||||
on_media_upload_limit_exceeded: Optional[
|
||||
ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK
|
||||
] = None,
|
||||
) -> None:
|
||||
"""Registers callbacks for media repository capabilities.
|
||||
Added in Synapse v1.132.0.
|
||||
@@ -469,6 +479,8 @@ class ModuleApi:
|
||||
return self._callbacks.media_repository.register_callbacks(
|
||||
get_media_config_for_user=get_media_config_for_user,
|
||||
is_user_allowed_to_upload_media_of_size=is_user_allowed_to_upload_media_of_size,
|
||||
get_media_upload_limits_for_user=get_media_upload_limits_for_user,
|
||||
on_media_upload_limit_exceeded=on_media_upload_limit_exceeded,
|
||||
)
|
||||
|
||||
def register_third_party_rules_callbacks(
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional
|
||||
|
||||
from synapse.config.repository import MediaUploadLimit
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.async_helpers import delay_cancellation
|
||||
from synapse.util.metrics import Measure
|
||||
@@ -28,6 +29,14 @@ GET_MEDIA_CONFIG_FOR_USER_CALLBACK = Callable[[str], Awaitable[Optional[JsonDict
|
||||
|
||||
IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK = Callable[[str, int], Awaitable[bool]]
|
||||
|
||||
GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK = Callable[
|
||||
[str], Awaitable[Optional[List[MediaUploadLimit]]]
|
||||
]
|
||||
|
||||
ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK = Callable[
|
||||
[str, MediaUploadLimit, int, int], Awaitable[None]
|
||||
]
|
||||
|
||||
|
||||
class MediaRepositoryModuleApiCallbacks:
|
||||
def __init__(self, hs: "HomeServer") -> None:
|
||||
@@ -39,6 +48,12 @@ class MediaRepositoryModuleApiCallbacks:
|
||||
self._is_user_allowed_to_upload_media_of_size_callbacks: List[
|
||||
IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK
|
||||
] = []
|
||||
self._get_media_upload_limits_for_user_callbacks: List[
|
||||
GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK
|
||||
] = []
|
||||
self._on_media_upload_limit_exceeded_callbacks: List[
|
||||
ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK
|
||||
] = []
|
||||
|
||||
def register_callbacks(
|
||||
self,
|
||||
@@ -46,6 +61,12 @@ class MediaRepositoryModuleApiCallbacks:
|
||||
is_user_allowed_to_upload_media_of_size: Optional[
|
||||
IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK
|
||||
] = None,
|
||||
get_media_upload_limits_for_user: Optional[
|
||||
GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK
|
||||
] = None,
|
||||
on_media_upload_limit_exceeded: Optional[
|
||||
ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK
|
||||
] = None,
|
||||
) -> None:
|
||||
"""Register callbacks from module for each hook."""
|
||||
if get_media_config_for_user is not None:
|
||||
@@ -56,6 +77,16 @@ class MediaRepositoryModuleApiCallbacks:
|
||||
is_user_allowed_to_upload_media_of_size
|
||||
)
|
||||
|
||||
if get_media_upload_limits_for_user is not None:
|
||||
self._get_media_upload_limits_for_user_callbacks.append(
|
||||
get_media_upload_limits_for_user
|
||||
)
|
||||
|
||||
if on_media_upload_limit_exceeded is not None:
|
||||
self._on_media_upload_limit_exceeded_callbacks.append(
|
||||
on_media_upload_limit_exceeded
|
||||
)
|
||||
|
||||
async def get_media_config_for_user(self, user_id: str) -> Optional[JsonDict]:
|
||||
for callback in self._get_media_config_for_user_callbacks:
|
||||
with Measure(
|
||||
@@ -83,3 +114,47 @@ class MediaRepositoryModuleApiCallbacks:
|
||||
return res
|
||||
|
||||
return True
|
||||
|
||||
async def get_media_upload_limits_for_user(
|
||||
self, user_id: str
|
||||
) -> Optional[List[MediaUploadLimit]]:
|
||||
"""
|
||||
Get the first non-None list of MediaUploadLimits for the user from the registered callbacks.
|
||||
If a list is returned it will be sorted in descending order of duration.
|
||||
"""
|
||||
for callback in self._get_media_upload_limits_for_user_callbacks:
|
||||
with Measure(
|
||||
self.clock,
|
||||
name=f"{callback.__module__}.{callback.__qualname__}",
|
||||
server_name=self.server_name,
|
||||
):
|
||||
res: Optional[List[MediaUploadLimit]] = await delay_cancellation(
|
||||
callback(user_id)
|
||||
)
|
||||
if res is not None: # to allow [] to be returned meaning no limit
|
||||
# We sort them in descending order of time period
|
||||
res.sort(key=lambda limit: limit.time_period_ms, reverse=True)
|
||||
return res
|
||||
|
||||
return None
|
||||
|
||||
async def on_media_upload_limit_exceeded(
|
||||
self,
|
||||
user_id: str,
|
||||
limit: MediaUploadLimit,
|
||||
sent_bytes: int,
|
||||
attempted_bytes: int,
|
||||
) -> None:
|
||||
for callback in self._on_media_upload_limit_exceeded_callbacks:
|
||||
with Measure(
|
||||
self.clock,
|
||||
name=f"{callback.__module__}.{callback.__qualname__}",
|
||||
server_name=self.server_name,
|
||||
):
|
||||
# Use a copy of the data in case the module modifies it
|
||||
limit_copy = MediaUploadLimit(
|
||||
max_bytes=limit.max_bytes, time_period_ms=limit.time_period_ms
|
||||
)
|
||||
await delay_cancellation(
|
||||
callback(user_id, limit_copy, sent_bytes, attempted_bytes)
|
||||
)
|
||||
|
||||
@@ -86,6 +86,24 @@ users_woken_by_stream_counter = Counter(
|
||||
labelnames=["stream", SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
|
||||
notifier_listeners_gauge = LaterGauge(
|
||||
name="synapse_notifier_listeners",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
notifier_rooms_gauge = LaterGauge(
|
||||
name="synapse_notifier_rooms",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
notifier_users_gauge = LaterGauge(
|
||||
name="synapse_notifier_users",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
@@ -281,28 +299,20 @@ class Notifier:
|
||||
)
|
||||
}
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_notifier_listeners",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=count_listeners,
|
||||
notifier_listeners_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(), hook=count_listeners
|
||||
)
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_notifier_rooms",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
notifier_rooms_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
(self.server_name,): count(
|
||||
bool, list(self.room_to_user_streams.values())
|
||||
)
|
||||
},
|
||||
)
|
||||
LaterGauge(
|
||||
name="synapse_notifier_users",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(self.user_to_user_stream)},
|
||||
notifier_users_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(self.user_to_user_stream)},
|
||||
)
|
||||
|
||||
def add_replication_callback(self, cb: Callable[[], None]) -> None:
|
||||
@@ -522,6 +532,7 @@ class Notifier:
|
||||
StreamKeyType.TO_DEVICE,
|
||||
StreamKeyType.TYPING,
|
||||
StreamKeyType.UN_PARTIAL_STATED_ROOMS,
|
||||
StreamKeyType.THREAD_SUBSCRIPTIONS,
|
||||
],
|
||||
new_token: int,
|
||||
users: Optional[Collection[Union[str, UserID]]] = None,
|
||||
|
||||
@@ -49,7 +49,7 @@ from synapse.api.constants import (
|
||||
from synapse.api.room_versions import PushRuleRoomFlag
|
||||
from synapse.event_auth import auth_types_for_event, get_user_power_level
|
||||
from synapse.events import EventBase, relation_from_event
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events.snapshot import EventContext, EventPersistencePair
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.state import CREATE_KEY, POWER_KEY
|
||||
@@ -352,7 +352,7 @@ class BulkPushRuleEvaluator:
|
||||
return related_events
|
||||
|
||||
async def action_for_events_by_user(
|
||||
self, events_and_context: List[Tuple[EventBase, EventContext]]
|
||||
self, events_and_context: List[EventPersistencePair]
|
||||
) -> None:
|
||||
"""Given a list of events and their associated contexts, evaluate the push rules
|
||||
for each event, check if the message should increment the unread count, and
|
||||
|
||||
@@ -91,7 +91,7 @@ def _rule_to_template(rule: PushRule) -> Optional[Dict[str, Any]]:
|
||||
unscoped_rule_id = _rule_id_from_namespaced(rule.rule_id)
|
||||
|
||||
template_name = _priority_class_to_template_name(rule.priority_class)
|
||||
if template_name in ["override", "underride"]:
|
||||
if template_name in ["override", "underride", "postcontent"]:
|
||||
templaterule = {"conditions": rule.conditions, "actions": rule.actions}
|
||||
elif template_name in ["sender", "room"]:
|
||||
templaterule = {"actions": rule.actions}
|
||||
|
||||
@@ -19,10 +19,14 @@
|
||||
#
|
||||
#
|
||||
|
||||
# Integer literals for push rule `kind`s
|
||||
# This is used to store them in the database.
|
||||
PRIORITY_CLASS_MAP = {
|
||||
"underride": 1,
|
||||
"sender": 2,
|
||||
"room": 3,
|
||||
# MSC4306
|
||||
"postcontent": 6,
|
||||
"content": 4,
|
||||
"override": 5,
|
||||
}
|
||||
|
||||
@@ -24,8 +24,8 @@ from typing import TYPE_CHECKING, List, Tuple
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events import make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext, EventPersistencePair
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict
|
||||
@@ -86,7 +86,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
|
||||
async def _serialize_payload( # type: ignore[override]
|
||||
store: "DataStore",
|
||||
room_id: str,
|
||||
event_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
event_and_contexts: List[EventPersistencePair],
|
||||
backfilled: bool,
|
||||
) -> JsonDict:
|
||||
"""
|
||||
|
||||
@@ -25,8 +25,8 @@ from typing import TYPE_CHECKING, List, Tuple
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events import make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext, EventPersistencePair
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
@@ -85,7 +85,7 @@ class ReplicationSendEventsRestServlet(ReplicationEndpoint):
|
||||
|
||||
@staticmethod
|
||||
async def _serialize_payload( # type: ignore[override]
|
||||
events_and_context: List[Tuple[EventBase, EventContext]],
|
||||
events_and_context: List[EventPersistencePair],
|
||||
store: "DataStore",
|
||||
requester: Requester,
|
||||
ratelimit: bool,
|
||||
|
||||
@@ -44,6 +44,7 @@ from synapse.replication.tcp.streams import (
|
||||
UnPartialStatedEventStream,
|
||||
UnPartialStatedRoomStream,
|
||||
)
|
||||
from synapse.replication.tcp.streams._base import ThreadSubscriptionsStream
|
||||
from synapse.replication.tcp.streams.events import (
|
||||
EventsStream,
|
||||
EventsStreamEventRow,
|
||||
@@ -255,6 +256,12 @@ class ReplicationDataHandler:
|
||||
self._state_storage_controller.notify_event_un_partial_stated(
|
||||
row.event_id
|
||||
)
|
||||
elif stream_name == ThreadSubscriptionsStream.NAME:
|
||||
self.notifier.on_new_event(
|
||||
StreamKeyType.THREAD_SUBSCRIPTIONS,
|
||||
token,
|
||||
users=[row.user_id for row in rows],
|
||||
)
|
||||
|
||||
await self._presence_handler.process_replication_rows(
|
||||
stream_name, instance_name, token, rows
|
||||
|
||||
@@ -106,6 +106,18 @@ user_ip_cache_counter = Counter(
|
||||
"synapse_replication_tcp_resource_user_ip_cache", "", labelnames=[SERVER_NAME_LABEL]
|
||||
)
|
||||
|
||||
tcp_resource_total_connections_gauge = LaterGauge(
|
||||
name="synapse_replication_tcp_resource_total_connections",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
tcp_command_queue_gauge = LaterGauge(
|
||||
name="synapse_replication_tcp_command_queue",
|
||||
desc="Number of inbound RDATA/POSITION commands queued for processing",
|
||||
labelnames=["stream_name", SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
|
||||
# the type of the entries in _command_queues_by_stream
|
||||
_StreamCommandQueue = Deque[
|
||||
@@ -243,11 +255,9 @@ class ReplicationCommandHandler:
|
||||
# outgoing replication commands to.)
|
||||
self._connections: List[IReplicationConnection] = []
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_replication_tcp_resource_total_connections",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(self._connections)},
|
||||
tcp_resource_total_connections_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(self._connections)},
|
||||
)
|
||||
|
||||
# When POSITION or RDATA commands arrive, we stick them in a queue and process
|
||||
@@ -266,11 +276,9 @@ class ReplicationCommandHandler:
|
||||
# from that connection.
|
||||
self._streams_by_connection: Dict[IReplicationConnection, Set[str]] = {}
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_replication_tcp_command_queue",
|
||||
desc="Number of inbound RDATA/POSITION commands queued for processing",
|
||||
labelnames=["stream_name", SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
tcp_command_queue_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
(stream_name, self.server_name): len(queue)
|
||||
for stream_name, queue in self._command_queues_by_stream.items()
|
||||
},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user