mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
1 Commits
anoa/valid
...
quenting/l
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
724a48ecdd |
12
.github/workflows/docker.yml
vendored
12
.github/workflows/docker.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Extract version from pyproject.toml
|
||||
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
|
||||
@@ -41,13 +41,13 @@ jobs:
|
||||
echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -102,14 +102,14 @@ jobs:
|
||||
merge-multiple: true
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
if: ${{ startsWith(matrix.repository, 'docker.io') }}
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
if: ${{ startsWith(matrix.repository, 'ghcr.io') }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
@@ -120,7 +120,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # v3.10.0
|
||||
uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # v3.9.2
|
||||
|
||||
- name: Calculate docker image tag
|
||||
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
|
||||
|
||||
6
.github/workflows/docs-pr.yaml
vendored
6
.github/workflows/docs-pr.yaml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
with:
|
||||
# Fetch all history so that the schema_versions script works.
|
||||
fetch-depth: 0
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
name: Check links in documentation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0
|
||||
|
||||
4
.github/workflows/docs.yaml
vendored
4
.github/workflows/docs.yaml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
needs:
|
||||
- pre
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
with:
|
||||
# Fetch all history so that the schema_versions script works.
|
||||
fetch-depth: 0
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
|
||||
4
.github/workflows/fix_lint.yaml
vendored
4
.github/workflows/fix_lint.yaml
vendored
@@ -18,10 +18,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
components: clippy, rustfmt
|
||||
|
||||
20
.github/workflows/latest_deps.yml
vendored
20
.github/workflows/latest_deps.yml
vendored
@@ -42,9 +42,9 @@ jobs:
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -77,10 +77,10 @@ jobs:
|
||||
postgres-version: "14"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -93,7 +93,7 @@ jobs:
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: pip install .[all,test]
|
||||
@@ -152,10 +152,10 @@ jobs:
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -202,14 +202,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out synapse codebase
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
@@ -234,7 +234,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
4
.github/workflows/poetry_lockfile.yaml
vendored
4
.github/workflows/poetry_lockfile.yaml
vendored
@@ -16,8 +16,8 @@ jobs:
|
||||
name: "Check locked dependencies have sdists"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- run: pip install tomli
|
||||
|
||||
8
.github/workflows/push_complement_image.yml
vendored
8
.github/workflows/push_complement_image.yml
vendored
@@ -33,22 +33,22 @@ jobs:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout specific branch (debug build)
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- name: Checkout clean copy of develop (scheduled build)
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
ref: develop
|
||||
- name: Checkout clean copy of master (on-push)
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
ref: master
|
||||
- name: Login to registry
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
|
||||
16
.github/workflows/release-artifacts.yml
vendored
16
.github/workflows/release-artifacts.yml
vendored
@@ -27,8 +27,8 @@ jobs:
|
||||
name: "Calculate list of debian distros"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- id: set-distros
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
with:
|
||||
path: src
|
||||
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Set up python
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
@@ -132,9 +132,9 @@ jobs:
|
||||
os: "ubuntu-24.04-arm"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
|
||||
# here, because `python` on osx points to Python 2.7.
|
||||
@@ -165,8 +165,8 @@ jobs:
|
||||
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
|
||||
8
.github/workflows/schema.yaml
vendored
8
.github/workflows/schema.yaml
vendored
@@ -14,8 +14,8 @@ jobs:
|
||||
name: Ensure Synapse config schema is valid
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- name: Install check-jsonschema
|
||||
@@ -40,8 +40,8 @@ jobs:
|
||||
name: Ensure generated documentation is up-to-date
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- name: Install PyYAML
|
||||
|
||||
86
.github/workflows/tests.yml
vendored
86
.github/workflows/tests.yml
vendored
@@ -86,9 +86,9 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -106,8 +106,8 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
@@ -116,8 +116,8 @@ jobs:
|
||||
check-lockfile:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: .ci/scripts/check_lockfile.py
|
||||
@@ -129,7 +129,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
@@ -151,10 +151,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -187,7 +187,7 @@ jobs:
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Check line endings
|
||||
run: scripts-dev/check_line_terminators.sh
|
||||
|
||||
@@ -195,11 +195,11 @@ jobs:
|
||||
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
@@ -213,11 +213,11 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -233,10 +233,10 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
components: clippy
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
@@ -252,10 +252,10 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: nightly-2025-04-23
|
||||
components: clippy
|
||||
@@ -270,10 +270,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -306,10 +306,10 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
# We use nightly so that we can use some unstable options that we use in
|
||||
# `.rustfmt.toml`.
|
||||
@@ -326,8 +326,8 @@ jobs:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install rstcheck"
|
||||
@@ -376,8 +376,8 @@ jobs:
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- id: get-matrix
|
||||
@@ -397,7 +397,7 @@ jobs:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
@@ -412,7 +412,7 @@ jobs:
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -453,10 +453,10 @@ jobs:
|
||||
- changes
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -468,7 +468,7 @@ jobs:
|
||||
sudo apt-get -qq install build-essential libffi-dev python3-dev \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.9'
|
||||
|
||||
@@ -518,7 +518,7 @@ jobs:
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
# Install libs necessary for PyPy to build binary wheels for dependencies
|
||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
@@ -568,12 +568,12 @@ jobs:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -615,7 +615,7 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
@@ -659,7 +659,7 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Add PostgreSQL apt repository
|
||||
# We need a version of pg_dump that can handle the version of
|
||||
# PostgreSQL being tested against. The Ubuntu package repository lags
|
||||
@@ -714,12 +714,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout synapse codebase
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -727,7 +727,7 @@ jobs:
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
@@ -750,10 +750,10 @@ jobs:
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -770,10 +770,10 @@ jobs:
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
|
||||
2
.github/workflows/triage_labelled.yml
vendored
2
.github/workflows/triage_labelled.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
if: >
|
||||
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
|
||||
steps:
|
||||
- uses: actions/add-to-project@4515659e2b458b27365e167605ac44f219494b66 # v1.0.2
|
||||
- uses: actions/add-to-project@c0c5949b017d0d4a39f7ba888255881bdac2a823 # v1.0.2
|
||||
id: add_project
|
||||
with:
|
||||
project-url: "https://github.com/orgs/matrix-org/projects/67"
|
||||
|
||||
18
.github/workflows/twisted_trunk.yml
vendored
18
.github/workflows/twisted_trunk.yml
vendored
@@ -43,10 +43,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -70,11 +70,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -117,10 +117,10 @@ jobs:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -175,14 +175,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v4 for synapse
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
@@ -217,7 +217,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
92
CHANGES.md
92
CHANGES.md
@@ -1,95 +1,3 @@
|
||||
# Synapse 1.138.0 (2025-09-09)
|
||||
|
||||
No significant changes since 1.138.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.138.0rc1 (2025-09-02)
|
||||
|
||||
### Features
|
||||
|
||||
- Support for the stable endpoint and scopes of [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) & co. ([\#18549](https://github.com/element-hq/synapse/issues/18549))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Improve database performance of [MSC4293](https://github.com/matrix-org/matrix-spec-proposals/pull/4293) - Redact on Kick/Ban. ([\#18851](https://github.com/element-hq/synapse/issues/18851))
|
||||
- Do not throw an error when fetching a rejected delayed state event on startup. ([\#18858](https://github.com/element-hq/synapse/issues/18858))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Fix worker documentation incorrectly indicating all room Admin API requests were capable of being handled by workers. ([\#18853](https://github.com/element-hq/synapse/issues/18853))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Instrument `_ByteProducer` with tracing to measure potential dead time while writing bytes to the request. ([\#18804](https://github.com/element-hq/synapse/issues/18804))
|
||||
- Switch to OpenTracing's `ContextVarsScopeManager` instead of our own custom `LogContextScopeManager`. ([\#18849](https://github.com/element-hq/synapse/issues/18849))
|
||||
- Trace how much work is being done while "recursively fetching redactions". ([\#18854](https://github.com/element-hq/synapse/issues/18854))
|
||||
- Link [upstream Twisted bug](https://github.com/twisted/twisted/issues/12498) tracking the problem that explains why we have to use a `Producer` to write bytes to the request. ([\#18855](https://github.com/element-hq/synapse/issues/18855))
|
||||
- Introduce `EventPersistencePair` type. ([\#18857](https://github.com/element-hq/synapse/issues/18857))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump actions/add-to-project from c0c5949b017d0d4a39f7ba888255881bdac2a823 to 4515659e2b458b27365e167605ac44f219494b66. ([\#18863](https://github.com/element-hq/synapse/issues/18863))
|
||||
* Bump actions/checkout from 4.3.0 to 5.0.0. ([\#18834](https://github.com/element-hq/synapse/issues/18834))
|
||||
* Bump anyhow from 1.0.98 to 1.0.99. ([\#18841](https://github.com/element-hq/synapse/issues/18841))
|
||||
* Bump docker/login-action from 3.4.0 to 3.5.0. ([\#18835](https://github.com/element-hq/synapse/issues/18835))
|
||||
* Bump dtolnay/rust-toolchain from b3b07ba8b418998c39fb20f53e8b695cdcc8de1b to e97e2d8cc328f1b50210efc529dca0028893a2d9. ([\#18862](https://github.com/element-hq/synapse/issues/18862))
|
||||
* Bump phonenumbers from 9.0.11 to 9.0.12. ([\#18837](https://github.com/element-hq/synapse/issues/18837))
|
||||
* Bump regex from 1.11.1 to 1.11.2. ([\#18864](https://github.com/element-hq/synapse/issues/18864))
|
||||
* Bump reqwest from 0.12.22 to 0.12.23. ([\#18842](https://github.com/element-hq/synapse/issues/18842))
|
||||
* Bump ruff from 0.12.7 to 0.12.10. ([\#18865](https://github.com/element-hq/synapse/issues/18865))
|
||||
* Bump serde_json from 1.0.142 to 1.0.143. ([\#18866](https://github.com/element-hq/synapse/issues/18866))
|
||||
* Bump types-bleach from 6.2.0.20250514 to 6.2.0.20250809. ([\#18838](https://github.com/element-hq/synapse/issues/18838))
|
||||
* Bump types-jsonschema from 4.25.0.20250720 to 4.25.1.20250822. ([\#18867](https://github.com/element-hq/synapse/issues/18867))
|
||||
* Bump types-psycopg2 from 2.9.21.20250718 to 2.9.21.20250809. ([\#18836](https://github.com/element-hq/synapse/issues/18836))
|
||||
|
||||
# Synapse 1.137.0 (2025-08-26)
|
||||
|
||||
No significant changes since 1.137.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.137.0rc1 (2025-08-19)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix a bug which could corrupt auth chains making it impossible to perform state resolution. ([\#18746](https://github.com/element-hq/synapse/issues/18746))
|
||||
- Fix error message in `register_new_matrix_user` utility script for empty `registration_shared_secret`. ([\#18780](https://github.com/element-hq/synapse/issues/18780))
|
||||
- Allow enabling [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108) when the stable Matrix Authentication Service integration is enabled. ([\#18832](https://github.com/element-hq/synapse/issues/18832))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Include IPv6 networks in `denied-peer-ips` of coturn setup. Contributed by @litetex. ([\#18781](https://github.com/element-hq/synapse/issues/18781))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Update tests to ensure all database tables are emptied when purging a room. ([\#18794](https://github.com/element-hq/synapse/issues/18794))
|
||||
- Instrument the `encode_response` part of Sliding Sync requests for more complete traces in Jaeger. ([\#18815](https://github.com/element-hq/synapse/issues/18815))
|
||||
- Tag Sliding Sync traces when we `wait_for_events`. ([\#18816](https://github.com/element-hq/synapse/issues/18816))
|
||||
- Fix `portdb` CI by hardcoding the new `pg_dump` restrict key that was added due to [CVE-2025-8714](https://nvd.nist.gov/vuln/detail/cve-2025-8714). ([\#18824](https://github.com/element-hq/synapse/issues/18824))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump actions/add-to-project from 5b1a254a3546aef88e0a7724a77a623fa2e47c36 to 0c37450c4be3b6a7582b2fb013c9ebfd9c8e9300. ([\#18557](https://github.com/element-hq/synapse/issues/18557))
|
||||
* Bump actions/cache from 4.2.3 to 4.2.4. ([\#18799](https://github.com/element-hq/synapse/issues/18799))
|
||||
* Bump actions/checkout from 4.2.2 to 4.3.0. ([\#18800](https://github.com/element-hq/synapse/issues/18800))
|
||||
* Bump actions/download-artifact from 4.3.0 to 5.0.0. ([\#18801](https://github.com/element-hq/synapse/issues/18801))
|
||||
* Bump docker/metadata-action from 5.7.0 to 5.8.0. ([\#18773](https://github.com/element-hq/synapse/issues/18773))
|
||||
* Bump mypy from 1.16.1 to 1.17.1. ([\#18775](https://github.com/element-hq/synapse/issues/18775))
|
||||
* Bump phonenumbers from 9.0.10 to 9.0.11. ([\#18797](https://github.com/element-hq/synapse/issues/18797))
|
||||
* Bump pygithub from 2.6.1 to 2.7.0. ([\#18779](https://github.com/element-hq/synapse/issues/18779))
|
||||
* Bump serde_json from 1.0.141 to 1.0.142. ([\#18776](https://github.com/element-hq/synapse/issues/18776))
|
||||
* Bump slab from 0.4.10 to 0.4.11. ([\#18809](https://github.com/element-hq/synapse/issues/18809))
|
||||
* Bump tokio from 1.47.0 to 1.47.1. ([\#18774](https://github.com/element-hq/synapse/issues/18774))
|
||||
* Bump types-pyyaml from 6.0.12.20250516 to 6.0.12.20250809. ([\#18798](https://github.com/element-hq/synapse/issues/18798))
|
||||
* Bump types-setuptools from 80.9.0.20250529 to 80.9.0.20250809. ([\#18796](https://github.com/element-hq/synapse/issues/18796))
|
||||
|
||||
# Synapse 1.136.0 (2025-08-12)
|
||||
|
||||
Note: This release includes the security fixes from `1.135.2` and `1.136.0rc2`, detailed below.
|
||||
|
||||
39
Cargo.lock
generated
39
Cargo.lock
generated
@@ -28,9 +28,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.99"
|
||||
version = "1.0.98"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100"
|
||||
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -753,9 +753,9 @@ checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.28"
|
||||
version = "0.4.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"
|
||||
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||
|
||||
[[package]]
|
||||
name = "lru-slab"
|
||||
@@ -1062,9 +1062,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.11.2"
|
||||
version = "1.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912"
|
||||
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -1091,9 +1091,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
version = "0.12.23"
|
||||
version = "0.12.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb"
|
||||
checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"bytes",
|
||||
@@ -1250,28 +1250,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.224"
|
||||
version = "1.0.219"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6aaeb1e94f53b16384af593c71e20b095e958dab1d26939c1b70645c5cfbcc0b"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_core"
|
||||
version = "1.0.224"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32f39390fa6346e24defbcdd3d9544ba8a19985d0af74df8501fbfe9a64341ab"
|
||||
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.224"
|
||||
version = "1.0.219"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87ff78ab5e8561c9a675bfc1785cb07ae721f0ee53329a595cefd8c04c2ac4e0"
|
||||
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -1280,15 +1270,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.145"
|
||||
version = "1.0.142"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
|
||||
checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
"ryu",
|
||||
"serde",
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Extend validation of uploaded device keys.
|
||||
@@ -1 +0,0 @@
|
||||
Remove obsolete and experimental `/sync/e2ee` endpoint.
|
||||
@@ -1 +0,0 @@
|
||||
Ensure all PDUs sent via `/send` pass canonical JSON checks.
|
||||
@@ -1 +0,0 @@
|
||||
Add experimental support for [MSC4308: Thread Subscriptions extension to Sliding Sync](https://github.com/matrix-org/matrix-spec-proposals/pull/4308) when [MSC4306: Thread Subscriptions](https://github.com/matrix-org/matrix-spec-proposals/pull/4306) and [MSC4186: Simplified Sliding Sync](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) are enabled.
|
||||
1
changelog.d/18746.bugfix
Normal file
1
changelog.d/18746.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug which could corrupt auth chains making it impossible to perform state resolution.
|
||||
1
changelog.d/18780.bugfix
Normal file
1
changelog.d/18780.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix error message in `register_new_matrix_user` utility script for empty `registration_shared_secret`.
|
||||
1
changelog.d/18781.doc
Normal file
1
changelog.d/18781.doc
Normal file
@@ -0,0 +1 @@
|
||||
Include IPv6 networks in denied-peer-ips of coturn setup. Contributed by @litetex.
|
||||
@@ -1 +0,0 @@
|
||||
Fix `LaterGauge` metrics to collect from all servers.
|
||||
1
changelog.d/18794.misc
Normal file
1
changelog.d/18794.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update tests to ensure all database tables are emptied when purging a room.
|
||||
1
changelog.d/18815.misc
Normal file
1
changelog.d/18815.misc
Normal file
@@ -0,0 +1 @@
|
||||
Instrument the `encode_response` part of Sliding Sync requests for more complete traces in Jaeger.
|
||||
1
changelog.d/18816.misc
Normal file
1
changelog.d/18816.misc
Normal file
@@ -0,0 +1 @@
|
||||
Tag Sliding Sync traces when we `wait_for_events`.
|
||||
@@ -1 +0,0 @@
|
||||
Configure Synapse to run MSC4306: Thread Subscriptions Complement tests.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug where we did not send invite revocations over federation.
|
||||
1
changelog.d/18824.misc
Normal file
1
changelog.d/18824.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix portdb CI by hardcoding the new pg_dump restrict key that was added due to CVE-2025-8714.
|
||||
1
changelog.d/18832.bugfix
Normal file
1
changelog.d/18832.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Allow enabling MSC4108 when the stable Matrix Authentication Service integration is enabled.
|
||||
@@ -1 +0,0 @@
|
||||
Update push rules for experimental [MSC4306: Thread Subscriptions](https://github.com/matrix-org/matrix-doc/issues/4306) to follow newer draft.
|
||||
@@ -1 +0,0 @@
|
||||
Add `get_media_upload_limits_for_user` and `on_media_upload_limit_exceeded` module API callbacks for media repository.
|
||||
@@ -1 +0,0 @@
|
||||
Clarify Python dependency constraints in our deprecation policy.
|
||||
@@ -1 +0,0 @@
|
||||
Remove `sentinel` logcontext usage where we log in `setup`, `start` and exit.
|
||||
@@ -1 +0,0 @@
|
||||
Use the `Enum`'s value for the dictionary key when responding to an admin request for experimental features.
|
||||
@@ -1 +0,0 @@
|
||||
Fix prefixed support for MSC4133.
|
||||
@@ -1 +0,0 @@
|
||||
Suppress "Applying schema" log noise bulk when `SYNAPSE_LOG_TESTING` is set.
|
||||
@@ -1 +0,0 @@
|
||||
Start background tasks after we fork the process (daemonize).
|
||||
@@ -1 +0,0 @@
|
||||
Add an in-memory cache to `_get_e2e_cross_signing_signatures_for_devices` to reduce DB load.
|
||||
@@ -1 +0,0 @@
|
||||
Better explain how we manage the logcontext in `run_in_background(...)` and `run_as_background_process(...)`.
|
||||
@@ -1 +0,0 @@
|
||||
Better explain how we manage the logcontext in `run_in_background(...)` and `run_as_background_process(...)`.
|
||||
@@ -1 +0,0 @@
|
||||
Fix open redirect in legacy SSO flow with the `idp` query parameter.
|
||||
@@ -1 +0,0 @@
|
||||
Replace usages of the deprecated `pkg_resources` interface in preparation of setuptools dropping it soon.
|
||||
@@ -1,2 +0,0 @@
|
||||
Clarify necessary `jwt_config` parameter in OIDC documentation for authentik.
|
||||
Contributed by @maxkratz.
|
||||
24
debian/changelog
vendored
24
debian/changelog
vendored
@@ -1,27 +1,3 @@
|
||||
matrix-synapse-py3 (1.138.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.138.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 09 Sep 2025 11:21:25 +0100
|
||||
|
||||
matrix-synapse-py3 (1.138.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.138.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Sep 2025 12:16:14 +0000
|
||||
|
||||
matrix-synapse-py3 (1.137.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.137.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Aug 2025 10:23:41 +0100
|
||||
|
||||
matrix-synapse-py3 (1.137.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.137.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Aug 2025 10:55:22 +0100
|
||||
|
||||
matrix-synapse-py3 (1.136.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.136.0.
|
||||
|
||||
@@ -133,8 +133,6 @@ experimental_features:
|
||||
msc3984_appservice_key_query: true
|
||||
# Invite filtering
|
||||
msc4155_enabled: true
|
||||
# Thread Subscriptions
|
||||
msc4306_enabled: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
|
||||
@@ -77,13 +77,6 @@ loggers:
|
||||
#}
|
||||
synapse.visibility.filtered_event_debug:
|
||||
level: DEBUG
|
||||
|
||||
{#
|
||||
If Synapse is under test, we don't care about seeing the "Applying schema" log
|
||||
lines at the INFO level every time we run the tests (it's 100 lines of bulk)
|
||||
#}
|
||||
synapse.storage.prepare_database:
|
||||
level: WARN
|
||||
{% endif %}
|
||||
|
||||
root:
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
# Deprecation Policy
|
||||
Deprecation Policy for Platform Dependencies
|
||||
============================================
|
||||
|
||||
Synapse has a number of **platform dependencies** (Python, Rust, PostgreSQL, and SQLite)
|
||||
and **application dependencies** (Python and Rust packages). This document outlines the
|
||||
policy towards which versions we support, and when we drop support for versions in the
|
||||
future.
|
||||
Synapse has a number of platform dependencies, including Python, Rust,
|
||||
PostgreSQL and SQLite. This document outlines the policy towards which versions
|
||||
we support, and when we drop support for versions in the future.
|
||||
|
||||
## Platform Dependencies
|
||||
|
||||
Policy
|
||||
------
|
||||
|
||||
Synapse follows the upstream support life cycles for Python and PostgreSQL,
|
||||
i.e. when a version reaches End of Life Synapse will withdraw support for that
|
||||
@@ -24,8 +26,8 @@ The oldest supported version of SQLite is the version
|
||||
[provided](https://packages.debian.org/bullseye/libsqlite3-0) by
|
||||
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
|
||||
|
||||
|
||||
### Context
|
||||
Context
|
||||
-------
|
||||
|
||||
It is important for system admins to have a clear understanding of the platform
|
||||
requirements of Synapse and its deprecation policies so that they can
|
||||
@@ -48,42 +50,4 @@ the ecosystem.
|
||||
On a similar note, SQLite does not generally have a concept of "supported
|
||||
release"; bugfixes are published for the latest minor release only. We chose to
|
||||
track Debian's oldstable as this is relatively conservative, predictably updated
|
||||
and is consistent with the `.deb` packages released by Matrix.org.
|
||||
|
||||
|
||||
## Application dependencies
|
||||
|
||||
For application-level Python dependencies, we often specify loose version constraints
|
||||
(ex. `>=X.Y.Z`) to be forwards compatible with any new versions. Upper bounds (`<A.B.C`)
|
||||
are only added when necessary to prevent known incompatibilities.
|
||||
|
||||
When selecting a minimum version, while we are mindful of the impact on downstream
|
||||
package maintainers, our primary focus is on the maintainability and progress of Synapse
|
||||
itself.
|
||||
|
||||
For developers, a Python dependency version can be considered a "no-brainer" upgrade once it is
|
||||
available in both the latest [Debian Stable](https://packages.debian.org/stable/) and
|
||||
[Ubuntu LTS](https://launchpad.net/ubuntu) repositories. No need to burden yourself with
|
||||
extra scrutiny or consideration at this point.
|
||||
|
||||
We aggressively update Rust dependencies. Since these are statically linked and managed
|
||||
entirely by `cargo` during build, they *can* pose no ongoing maintenance burden on others.
|
||||
This allows us to freely upgrade to leverage the latest ecosystem advancements assuming
|
||||
they don't have their own system-level dependencies.
|
||||
|
||||
|
||||
### Context
|
||||
|
||||
Because Python dependencies can easily be managed in a virtual environment, we are less
|
||||
concerned about the criteria for selecting minimum versions. The only thing of concern
|
||||
is making sure we're not making it unnecessarily difficult for downstream package
|
||||
maintainers. Generally, this just means avoiding the bleeding edge for a few months.
|
||||
|
||||
The situation for Rust dependencies is fundamentally different. For packagers, the
|
||||
concerns around Python dependency versions do not apply. The `cargo` tool handles
|
||||
downloading and building all libraries to satisfy dependencies, and these libraries are
|
||||
statically linked into the final binary. This means that from a packager's perspective,
|
||||
the Rust dependency versions are an internal build detail, not a runtime dependency to
|
||||
be managed on the target system. Consequently, we have even greater flexibility to
|
||||
upgrade Rust dependencies as needed for the project. Some distros (e.g. Fedora) do
|
||||
package Rust libraries, but this appears to be the outlier rather than the norm.
|
||||
and is consistent with the `.deb` packages released by Matrix.org.
|
||||
@@ -59,28 +59,6 @@ def do_request_handling():
|
||||
logger.debug("phew")
|
||||
```
|
||||
|
||||
### The `sentinel` context
|
||||
|
||||
The default logcontext is `synapse.logging.context.SENTINEL_CONTEXT`, which is an empty
|
||||
sentinel value to represent the root logcontext. This is what is used when there is no
|
||||
other logcontext set. The phrase "clear/reset the logcontext" means to set the current
|
||||
logcontext to the `sentinel` logcontext.
|
||||
|
||||
No CPU/database usage metrics are recorded against the `sentinel` logcontext.
|
||||
|
||||
Ideally, nothing from the Synapse homeserver would be logged against the `sentinel`
|
||||
logcontext as we want to know which server the logs came from. In practice, this is not
|
||||
always the case yet especially outside of request handling.
|
||||
|
||||
Global things outside of Synapse (e.g. Twisted reactor code) should run in the
|
||||
`sentinel` logcontext. It's only when it calls into application code that a logcontext
|
||||
gets activated. This means the reactor should be started in the `sentinel` logcontext,
|
||||
and any time an awaitable yields control back to the reactor, it should reset the
|
||||
logcontext to be the `sentinel` logcontext. This is important to avoid leaking the
|
||||
current logcontext to the reactor (which would then get picked up and associated with
|
||||
the next thing the reactor does).
|
||||
|
||||
|
||||
## Using logcontexts with awaitables
|
||||
|
||||
Awaitables break the linear flow of code so that there is no longer a single entry point
|
||||
|
||||
@@ -64,68 +64,3 @@ If multiple modules implement this callback, they will be considered in order. I
|
||||
returns `True`, Synapse falls through to the next one. The value of the first callback that
|
||||
returns `False` will be used. If this happens, Synapse will not call any of the subsequent
|
||||
implementations of this callback.
|
||||
|
||||
### `get_media_upload_limits_for_user`
|
||||
|
||||
_First introduced in Synapse v1.139.0_
|
||||
|
||||
```python
|
||||
async def get_media_upload_limits_for_user(user_id: str, size: int) -> Optional[List[synapse.module_api.MediaUploadLimit]]
|
||||
```
|
||||
|
||||
**<span style="color:red">
|
||||
Caution: This callback is currently experimental. The method signature or behaviour
|
||||
may change without notice.
|
||||
</span>**
|
||||
|
||||
Called when processing a request to store content in the media repository. This can be used to dynamically override
|
||||
the [media upload limits configuration](../usage/configuration/config_documentation.html#media_upload_limits).
|
||||
|
||||
The arguments passed to this callback are:
|
||||
|
||||
* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`) making the request.
|
||||
|
||||
If the callback returns a list then it will be used as the limits instead of those in the configuration (if any).
|
||||
|
||||
If an empty list is returned then no limits are applied (**warning:** users will be able
|
||||
to upload as much data as they desire).
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `None`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `None` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
If there are no registered modules, or if all modules return `None`, then
|
||||
the default
|
||||
[media upload limits configuration](../usage/configuration/config_documentation.html#media_upload_limits)
|
||||
will be used.
|
||||
|
||||
### `on_media_upload_limit_exceeded`
|
||||
|
||||
_First introduced in Synapse v1.139.0_
|
||||
|
||||
```python
|
||||
async def on_media_upload_limit_exceeded(user_id: str, limit: synapse.module_api.MediaUploadLimit, sent_bytes: int, attempted_bytes: int) -> None
|
||||
```
|
||||
|
||||
**<span style="color:red">
|
||||
Caution: This callback is currently experimental. The method signature or behaviour
|
||||
may change without notice.
|
||||
</span>**
|
||||
|
||||
Called when a user attempts to upload media that would exceed a
|
||||
[configured media upload limit](../usage/configuration/config_documentation.html#media_upload_limits).
|
||||
|
||||
This callback will only be called on workers which handle
|
||||
[POST /_matrix/media/v3/upload](https://spec.matrix.org/v1.15/client-server-api/#post_matrixmediav3upload)
|
||||
requests.
|
||||
|
||||
This could be used to inform the user that they have reached a media upload limit through
|
||||
some external method.
|
||||
|
||||
The arguments passed to this callback are:
|
||||
|
||||
* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`) making the request.
|
||||
* `limit`: The `synapse.module_api.MediaUploadLimit` representing the limit that was reached.
|
||||
* `sent_bytes`: The number of bytes already sent during the period of the limit.
|
||||
* `attempted_bytes`: The number of bytes that the user attempted to send.
|
||||
|
||||
@@ -186,7 +186,6 @@ oidc_providers:
|
||||
4. Note the slug of your application, Client ID and Client Secret.
|
||||
|
||||
Note: RSA keys must be used for signing for Authentik, ECC keys do not work.
|
||||
Note: The provider must have a signing key set and must not use an encryption key.
|
||||
|
||||
Synapse config:
|
||||
```yaml
|
||||
@@ -205,12 +204,6 @@ oidc_providers:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.preferred_username|capitalize }}" # TO BE FILLED: If your users have names in Authentik and you want those in Synapse, this should be replaced with user.name|capitalize.
|
||||
[...]
|
||||
jwt_config:
|
||||
enabled: true
|
||||
secret: "your client secret" # TO BE FILLED (same as `client_secret` above)
|
||||
algorithm: "RS256"
|
||||
# (...other fields)
|
||||
```
|
||||
|
||||
### Dex
|
||||
|
||||
@@ -35,7 +35,7 @@ handlers:
|
||||
loggers:
|
||||
synapse:
|
||||
level: INFO
|
||||
handlers: [file]
|
||||
handlers: [remote]
|
||||
synapse.storage.SQL:
|
||||
level: WARNING
|
||||
```
|
||||
|
||||
@@ -2168,12 +2168,9 @@ max_upload_size: 60M
|
||||
### `media_upload_limits`
|
||||
|
||||
*(array)* A list of media upload limits defining how much data a given user can upload in a given time period.
|
||||
These limits are applied in addition to the `max_upload_size` limit above (which applies to individual uploads).
|
||||
|
||||
An empty list means no limits are applied.
|
||||
|
||||
These settings can be overridden using the `get_media_upload_limits_for_user` module API [callback](../../modules/media_repository_callbacks.md#get_media_upload_limits_for_user).
|
||||
|
||||
Defaults to `[]`.
|
||||
|
||||
Example configuration:
|
||||
|
||||
@@ -252,7 +252,7 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
|
||||
^/_matrix/client/(r0|v3|unstable)/capabilities$
|
||||
^/_matrix/client/(r0|v3|unstable)/notifications$
|
||||
^/_synapse/admin/v1/rooms/[^/]+$
|
||||
^/_synapse/admin/v1/rooms/
|
||||
|
||||
# Encryption requests
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/query$
|
||||
|
||||
174
poetry.lock
generated
174
poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "annotated-types"
|
||||
@@ -34,15 +34,15 @@ tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" a
|
||||
|
||||
[[package]]
|
||||
name = "authlib"
|
||||
version = "1.6.3"
|
||||
version = "1.6.1"
|
||||
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
|
||||
optional = true
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
|
||||
files = [
|
||||
{file = "authlib-1.6.3-py2.py3-none-any.whl", hash = "sha256:7ea0f082edd95a03b7b72edac65ec7f8f68d703017d7e37573aee4fc603f2a48"},
|
||||
{file = "authlib-1.6.3.tar.gz", hash = "sha256:9f7a982cc395de719e4c2215c5707e7ea690ecf84f1ab126f28c053f4219e610"},
|
||||
{file = "authlib-1.6.1-py2.py3-none-any.whl", hash = "sha256:e9d2031c34c6309373ab845afc24168fe9e93dc52d252631f52642f21f5ed06e"},
|
||||
{file = "authlib-1.6.1.tar.gz", hash = "sha256:4dffdbb1460ba6ec8c17981a4c67af7d8af131231b5a36a88a1e8c80c111cdfd"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -435,7 +435,7 @@ description = "XML bomb protection for Python stdlib modules"
|
||||
optional = true
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"saml2\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"saml2\""
|
||||
files = [
|
||||
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
|
||||
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
|
||||
@@ -460,7 +460,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"saml2\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"saml2\""
|
||||
files = [
|
||||
{file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
|
||||
{file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
|
||||
@@ -511,7 +511,7 @@ description = "Python wrapper for hiredis"
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"redis\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"redis\""
|
||||
files = [
|
||||
{file = "hiredis-3.2.1-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:add17efcbae46c5a6a13b244ff0b4a8fa079602ceb62290095c941b42e9d5dec"},
|
||||
{file = "hiredis-3.2.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:5fe955cc4f66c57df1ae8e5caf4de2925d43b5efab4e40859662311d1bcc5f54"},
|
||||
@@ -848,7 +848,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||
files = [
|
||||
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
|
||||
]
|
||||
@@ -919,14 +919,14 @@ i18n = ["Babel (>=2.7)"]
|
||||
|
||||
[[package]]
|
||||
name = "jsonschema"
|
||||
version = "4.25.1"
|
||||
version = "4.25.0"
|
||||
description = "An implementation of JSON Schema validation for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63"},
|
||||
{file = "jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85"},
|
||||
{file = "jsonschema-4.25.0-py3-none-any.whl", hash = "sha256:24c2e8da302de79c8b9382fee3e76b355e44d2a4364bb207159ce10b517bd716"},
|
||||
{file = "jsonschema-4.25.0.tar.gz", hash = "sha256:e63acf5c11762c0e6672ffb61482bdf57f0876684d8d249c0fe2d730d48bc55f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -986,7 +986,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
|
||||
files = [
|
||||
{file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
|
||||
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
|
||||
@@ -1002,7 +1002,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"url-preview\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"url-preview\""
|
||||
files = [
|
||||
{file = "lxml-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:35bc626eec405f745199200ccb5c6b36f202675d204aa29bb52e27ba2b71dea8"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:246b40f8a4aec341cbbf52617cad8ab7c888d944bfe12a6abd2b1f6cfb6f6082"},
|
||||
@@ -1038,10 +1038,12 @@ files = [
|
||||
{file = "lxml-6.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:219e0431ea8006e15005767f0351e3f7f9143e793e58519dc97fe9e07fae5563"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bd5913b4972681ffc9718bc2d4c53cde39ef81415e1671ff93e9aa30b46595e7"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:390240baeb9f415a82eefc2e13285016f9c8b5ad71ec80574ae8fa9605093cd7"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-manylinux_2_27_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d6e200909a119626744dd81bae409fc44134389e03fbf1d68ed2a55a2fb10991"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ca50bd612438258a91b5b3788c6621c1f05c8c478e7951899f492be42defc0da"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:c24b8efd9c0f62bad0439283c2c795ef916c5a6b75f03c17799775c7ae3c0c9e"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:afd27d8629ae94c5d863e32ab0e1d5590371d296b87dae0a751fb22bf3685741"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:54c4855eabd9fc29707d30141be99e5cd1102e7d2258d2892314cf4c110726c3"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c907516d49f77f6cd8ead1322198bdfd902003c3c330c77a1c5f3cc32a0e4d16"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:36531f81c8214e293097cd2b7873f178997dae33d3667caaae8bdfb9666b76c0"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-win32.whl", hash = "sha256:690b20e3388a7ec98e899fd54c924e50ba6693874aa65ef9cb53de7f7de9d64a"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:310b719b695b3dd442cdfbbe64936b2f2e231bb91d998e99e6f0daf991a3eba3"},
|
||||
@@ -1052,10 +1054,12 @@ files = [
|
||||
{file = "lxml-6.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d18a25b19ca7307045581b18b3ec9ead2b1db5ccd8719c291f0cd0a5cec6cb81"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d4f0c66df4386b75d2ab1e20a489f30dc7fd9a06a896d64980541506086be1f1"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9f4b481b6cc3a897adb4279216695150bbe7a44c03daba3c894f49d2037e0a24"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-manylinux_2_27_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8a78d6c9168f5bcb20971bf3329c2b83078611fbe1f807baadc64afc70523b3a"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae06fbab4f1bb7db4f7c8ca9897dc8db4447d1a2b9bee78474ad403437bcc29"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:1fa377b827ca2023244a06554c6e7dc6828a10aaf74ca41965c5d8a4925aebb4"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1676b56d48048a62ef77a250428d1f31f610763636e0784ba67a9740823988ca"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:0e32698462aacc5c1cf6bdfebc9c781821b7e74c79f13e5ffc8bfe27c42b1abf"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4d6036c3a296707357efb375cfc24bb64cd955b9ec731abf11ebb1e40063949f"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7488a43033c958637b1a08cddc9188eb06d3ad36582cebc7d4815980b47e27ef"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-win32.whl", hash = "sha256:5fcd7d3b1d8ecb91445bd71b9c88bdbeae528fefee4f379895becfc72298d181"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:2f34687222b78fff795feeb799a7d44eca2477c3d9d3a46ce17d51a4f383e32e"},
|
||||
@@ -1239,7 +1243,7 @@ description = "An LDAP3 auth provider for Synapse"
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
|
||||
files = [
|
||||
{file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
|
||||
{file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
|
||||
@@ -1478,7 +1482,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||
files = [
|
||||
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
|
||||
]
|
||||
@@ -1527,14 +1531,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "phonenumbers"
|
||||
version = "9.0.13"
|
||||
version = "9.0.11"
|
||||
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "phonenumbers-9.0.13-py2.py3-none-any.whl", hash = "sha256:b97661e177773e7509c6d503e0f537cd0af22aa3746231654590876eb9430915"},
|
||||
{file = "phonenumbers-9.0.13.tar.gz", hash = "sha256:eca06e01382412c45316868f86a44bb217c02f9ee7196589041556a2f54a7639"},
|
||||
{file = "phonenumbers-9.0.11-py2.py3-none-any.whl", hash = "sha256:a8ebb2136f1f14dfdbadb98be01cb71b96f880dea011eb5e0921967fe3a23abf"},
|
||||
{file = "phonenumbers-9.0.11.tar.gz", hash = "sha256:6573858dcf0a7a2753a071375e154d9fc11791546c699b575af95d2ba7d84a1d"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1547,6 +1551,8 @@ groups = ["main"]
|
||||
files = [
|
||||
{file = "pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860"},
|
||||
{file = "pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad"},
|
||||
{file = "pillow-11.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0"},
|
||||
{file = "pillow-11.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b"},
|
||||
{file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50"},
|
||||
{file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae"},
|
||||
{file = "pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9"},
|
||||
@@ -1556,6 +1562,8 @@ files = [
|
||||
{file = "pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f"},
|
||||
{file = "pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722"},
|
||||
{file = "pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288"},
|
||||
{file = "pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d"},
|
||||
{file = "pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494"},
|
||||
{file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58"},
|
||||
{file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f"},
|
||||
{file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e"},
|
||||
@@ -1565,6 +1573,8 @@ files = [
|
||||
{file = "pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd"},
|
||||
{file = "pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4"},
|
||||
{file = "pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69"},
|
||||
{file = "pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d"},
|
||||
{file = "pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6"},
|
||||
{file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7"},
|
||||
{file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024"},
|
||||
{file = "pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809"},
|
||||
@@ -1577,6 +1587,8 @@ files = [
|
||||
{file = "pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f"},
|
||||
{file = "pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c"},
|
||||
{file = "pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd"},
|
||||
{file = "pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e"},
|
||||
{file = "pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1"},
|
||||
{file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805"},
|
||||
{file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8"},
|
||||
{file = "pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2"},
|
||||
@@ -1586,6 +1598,8 @@ files = [
|
||||
{file = "pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580"},
|
||||
{file = "pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e"},
|
||||
{file = "pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d"},
|
||||
{file = "pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced"},
|
||||
{file = "pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c"},
|
||||
{file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8"},
|
||||
{file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59"},
|
||||
{file = "pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe"},
|
||||
@@ -1595,6 +1609,8 @@ files = [
|
||||
{file = "pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e"},
|
||||
{file = "pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12"},
|
||||
{file = "pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a"},
|
||||
{file = "pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632"},
|
||||
{file = "pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673"},
|
||||
{file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027"},
|
||||
{file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77"},
|
||||
{file = "pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874"},
|
||||
@@ -1604,6 +1620,8 @@ files = [
|
||||
{file = "pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6"},
|
||||
{file = "pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae"},
|
||||
{file = "pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653"},
|
||||
{file = "pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6"},
|
||||
{file = "pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36"},
|
||||
{file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b"},
|
||||
{file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477"},
|
||||
{file = "pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50"},
|
||||
@@ -1613,6 +1631,8 @@ files = [
|
||||
{file = "pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa"},
|
||||
{file = "pillow-11.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:48d254f8a4c776de343051023eb61ffe818299eeac478da55227d96e241de53f"},
|
||||
{file = "pillow-11.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7aee118e30a4cf54fdd873bd3a29de51e29105ab11f9aad8c32123f58c8f8081"},
|
||||
{file = "pillow-11.3.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:23cff760a9049c502721bdb743a7cb3e03365fafcdfc2ef9784610714166e5a4"},
|
||||
{file = "pillow-11.3.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6359a3bc43f57d5b375d1ad54a0074318a0844d11b76abccf478c37c986d3cfc"},
|
||||
{file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:092c80c76635f5ecb10f3f83d76716165c96f5229addbd1ec2bdbbda7d496e06"},
|
||||
{file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cadc9e0ea0a2431124cde7e1697106471fc4c1da01530e679b2391c37d3fbb3a"},
|
||||
{file = "pillow-11.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6a418691000f2a418c9135a7cf0d797c1bb7d9a485e61fe8e7722845b95ef978"},
|
||||
@@ -1622,11 +1642,15 @@ files = [
|
||||
{file = "pillow-11.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:6abdbfd3aea42be05702a8dd98832329c167ee84400a1d1f61ab11437f1717eb"},
|
||||
{file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967"},
|
||||
{file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe"},
|
||||
{file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c"},
|
||||
{file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25"},
|
||||
{file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27"},
|
||||
{file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a"},
|
||||
{file = "pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f"},
|
||||
{file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6"},
|
||||
{file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438"},
|
||||
{file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3"},
|
||||
{file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c"},
|
||||
{file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361"},
|
||||
{file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7"},
|
||||
{file = "pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8"},
|
||||
@@ -1664,7 +1688,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter"
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"postgres\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"postgres\""
|
||||
files = [
|
||||
{file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"},
|
||||
{file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"},
|
||||
@@ -1672,6 +1696,7 @@ files = [
|
||||
{file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"},
|
||||
{file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"},
|
||||
{file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"},
|
||||
{file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"},
|
||||
{file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"},
|
||||
{file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"},
|
||||
{file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"},
|
||||
@@ -1684,7 +1709,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
|
||||
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
|
||||
files = [
|
||||
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
|
||||
]
|
||||
@@ -1700,7 +1725,7 @@ description = "A Simple library to enable psycopg2 compatability"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
|
||||
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
|
||||
files = [
|
||||
{file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
|
||||
]
|
||||
@@ -1749,14 +1774,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.11.9"
|
||||
version = "2.11.7"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main", "dev"]
|
||||
files = [
|
||||
{file = "pydantic-2.11.9-py3-none-any.whl", hash = "sha256:c42dd626f5cfc1c6950ce6205ea58c93efa406da65f479dcb4029d5934857da2"},
|
||||
{file = "pydantic-2.11.9.tar.gz", hash = "sha256:6b8ffda597a14812a7975c90b82a8a2e777d9257aba3453f973acd3c032a18e2"},
|
||||
{file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"},
|
||||
{file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1959,7 +1984,7 @@ description = "A development tool to measure, monitor and analyze the memory beh
|
||||
optional = true
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"cache-memory\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"cache-memory\""
|
||||
files = [
|
||||
{file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
|
||||
{file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
|
||||
@@ -2019,7 +2044,7 @@ description = "Python implementation of SAML Version 2 Standard"
|
||||
optional = true
|
||||
python-versions = ">=3.9,<4.0"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"saml2\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"saml2\""
|
||||
files = [
|
||||
{file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
|
||||
{file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
|
||||
@@ -2044,7 +2069,7 @@ description = "Extensions to the standard Python datetime module"
|
||||
optional = true
|
||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"saml2\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"saml2\""
|
||||
files = [
|
||||
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
|
||||
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
|
||||
@@ -2072,7 +2097,7 @@ description = "World timezone definitions, modern and historical"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"saml2\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"saml2\""
|
||||
files = [
|
||||
{file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
|
||||
{file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
|
||||
@@ -2371,31 +2396,30 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.12.10"
|
||||
version = "0.12.7"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "ruff-0.12.10-py3-none-linux_armv6l.whl", hash = "sha256:8b593cb0fb55cc8692dac7b06deb29afda78c721c7ccfed22db941201b7b8f7b"},
|
||||
{file = "ruff-0.12.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ebb7333a45d56efc7c110a46a69a1b32365d5c5161e7244aaf3aa20ce62399c1"},
|
||||
{file = "ruff-0.12.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d59e58586829f8e4a9920788f6efba97a13d1fa320b047814e8afede381c6839"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:822d9677b560f1fdeab69b89d1f444bf5459da4aa04e06e766cf0121771ab844"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b4a64f4062a50c75019c61c7017ff598cb444984b638511f48539d3a1c98db"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c6f4064c69d2542029b2a61d39920c85240c39837599d7f2e32e80d36401d6e"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:059e863ea3a9ade41407ad71c1de2badfbe01539117f38f763ba42a1206f7559"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1bef6161e297c68908b7218fa6e0e93e99a286e5ed9653d4be71e687dff101cf"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4f1345fbf8fb0531cd722285b5f15af49b2932742fc96b633e883da8d841896b"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f68433c4fbc63efbfa3ba5db31727db229fa4e61000f452c540474b03de52a9"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:141ce3d88803c625257b8a6debf4a0473eb6eed9643a6189b68838b43e78165a"},
|
||||
{file = "ruff-0.12.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f3fc21178cd44c98142ae7590f42ddcb587b8e09a3b849cbc84edb62ee95de60"},
|
||||
{file = "ruff-0.12.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7d1a4e0bdfafcd2e3e235ecf50bf0176f74dd37902f241588ae1f6c827a36c56"},
|
||||
{file = "ruff-0.12.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:e67d96827854f50b9e3e8327b031647e7bcc090dbe7bb11101a81a3a2cbf1cc9"},
|
||||
{file = "ruff-0.12.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ae479e1a18b439c59138f066ae79cc0f3ee250712a873d00dbafadaad9481e5b"},
|
||||
{file = "ruff-0.12.10-py3-none-win32.whl", hash = "sha256:9de785e95dc2f09846c5e6e1d3a3d32ecd0b283a979898ad427a9be7be22b266"},
|
||||
{file = "ruff-0.12.10-py3-none-win_amd64.whl", hash = "sha256:7837eca8787f076f67aba2ca559cefd9c5cbc3a9852fd66186f4201b87c1563e"},
|
||||
{file = "ruff-0.12.10-py3-none-win_arm64.whl", hash = "sha256:cc138cc06ed9d4bfa9d667a65af7172b47840e1a98b02ce7011c391e54635ffc"},
|
||||
{file = "ruff-0.12.10.tar.gz", hash = "sha256:189ab65149d11ea69a2d775343adf5f49bb2426fc4780f65ee33b423ad2e47f9"},
|
||||
{file = "ruff-0.12.7-py3-none-linux_armv6l.whl", hash = "sha256:76e4f31529899b8c434c3c1dede98c4483b89590e15fb49f2d46183801565303"},
|
||||
{file = "ruff-0.12.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:789b7a03e72507c54fb3ba6209e4bb36517b90f1a3569ea17084e3fd295500fb"},
|
||||
{file = "ruff-0.12.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2e1c2a3b8626339bb6369116e7030a4cf194ea48f49b64bb505732a7fce4f4e3"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32dec41817623d388e645612ec70d5757a6d9c035f3744a52c7b195a57e03860"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47ef751f722053a5df5fa48d412dbb54d41ab9b17875c6840a58ec63ff0c247c"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a828a5fc25a3efd3e1ff7b241fd392686c9386f20e5ac90aa9234a5faa12c423"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5726f59b171111fa6a69d82aef48f00b56598b03a22f0f4170664ff4d8298efb"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74e6f5c04c4dd4aba223f4fe6e7104f79e0eebf7d307e4f9b18c18362124bccd"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d0bfe4e77fba61bf2ccadf8cf005d6133e3ce08793bbe870dd1c734f2699a3e"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06bfb01e1623bf7f59ea749a841da56f8f653d641bfd046edee32ede7ff6c606"},
|
||||
{file = "ruff-0.12.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e41df94a957d50083fd09b916d6e89e497246698c3f3d5c681c8b3e7b9bb4ac8"},
|
||||
{file = "ruff-0.12.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:4000623300563c709458d0ce170c3d0d788c23a058912f28bbadc6f905d67afa"},
|
||||
{file = "ruff-0.12.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:69ffe0e5f9b2cf2b8e289a3f8945b402a1b19eff24ec389f45f23c42a3dd6fb5"},
|
||||
{file = "ruff-0.12.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a07a5c8ffa2611a52732bdc67bf88e243abd84fe2d7f6daef3826b59abbfeda4"},
|
||||
{file = "ruff-0.12.7-py3-none-win32.whl", hash = "sha256:c928f1b2ec59fb77dfdf70e0419408898b63998789cc98197e15f560b9e77f77"},
|
||||
{file = "ruff-0.12.7-py3-none-win_amd64.whl", hash = "sha256:9c18f3d707ee9edf89da76131956aba1270c6348bfee8f6c647de841eac7194f"},
|
||||
{file = "ruff-0.12.7-py3-none-win_arm64.whl", hash = "sha256:dfce05101dbd11833a0776716d5d1578641b7fddb537fe7fa956ab85d1769b69"},
|
||||
{file = "ruff-0.12.7.tar.gz", hash = "sha256:1fc3193f238bc2d7968772c82831a4ff69252f673be371fb49663f0068b7ec71"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2438,7 +2462,7 @@ description = "Python client for Sentry (https://sentry.io)"
|
||||
optional = true
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"sentry\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"sentry\""
|
||||
files = [
|
||||
{file = "sentry_sdk-2.34.1-py2.py3-none-any.whl", hash = "sha256:b7a072e1cdc5abc48101d5146e1ae680fa81fe886d8d95aaa25a0b450c818d32"},
|
||||
{file = "sentry_sdk-2.34.1.tar.gz", hash = "sha256:69274eb8c5c38562a544c3e9f68b5be0a43be4b697f5fd385bf98e4fbe672687"},
|
||||
@@ -2626,7 +2650,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||
files = [
|
||||
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
|
||||
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
|
||||
@@ -2642,7 +2666,7 @@ description = "Python bindings for the Apache Thrift RPC system"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||
files = [
|
||||
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
|
||||
]
|
||||
@@ -2704,7 +2728,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
|
||||
optional = true
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||
files = [
|
||||
{file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"},
|
||||
{file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"},
|
||||
@@ -2722,14 +2746,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "towncrier"
|
||||
version = "25.8.0"
|
||||
version = "24.8.0"
|
||||
description = "Building newsfiles for your project."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "towncrier-25.8.0-py3-none-any.whl", hash = "sha256:b953d133d98f9aeae9084b56a3563fd2519dfc6ec33f61c9cd2c61ff243fb513"},
|
||||
{file = "towncrier-25.8.0.tar.gz", hash = "sha256:eef16d29f831ad57abb3ae32a0565739866219f1ebfbdd297d32894eb9940eb1"},
|
||||
{file = "towncrier-24.8.0-py3-none-any.whl", hash = "sha256:9343209592b839209cdf28c339ba45792fbfe9775b5f9c177462fd693e127d8d"},
|
||||
{file = "towncrier-24.8.0.tar.gz", hash = "sha256:013423ee7eed102b2f393c287d22d95f66f1a3ea10a4baa82d298001a7f18af3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2841,7 +2865,7 @@ description = "non-blocking redis client for python"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"redis\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"redis\""
|
||||
files = [
|
||||
{file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"},
|
||||
{file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"},
|
||||
@@ -2853,14 +2877,14 @@ twisted = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-bleach"
|
||||
version = "6.2.0.20250809"
|
||||
version = "6.2.0.20250514"
|
||||
description = "Typing stubs for bleach"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_bleach-6.2.0.20250809-py3-none-any.whl", hash = "sha256:0b372a75117947d9ac8a31ae733fd0f8d92ec75c4772e7b37093ba3fa5b48fb9"},
|
||||
{file = "types_bleach-6.2.0.20250809.tar.gz", hash = "sha256:188d7a1119f6c953140b513ed57ba4213755695815472c19d0c22ac09c79b90b"},
|
||||
{file = "types_bleach-6.2.0.20250514-py3-none-any.whl", hash = "sha256:380cb74f0db1e3c3b2e0cde217221108e975e07e95ef0970c9d41f7cd4e8ea3c"},
|
||||
{file = "types_bleach-6.2.0.20250514.tar.gz", hash = "sha256:38c2e51d9cac51dc70c1b66121a11f4dad8bbf47fbad494bb7a77d8b8f3c4323"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2895,14 +2919,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-jsonschema"
|
||||
version = "4.25.1.20250822"
|
||||
version = "4.25.0.20250720"
|
||||
description = "Typing stubs for jsonschema"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_jsonschema-4.25.1.20250822-py3-none-any.whl", hash = "sha256:f82c2d7fa1ce1c0b84ba1de4ed6798469768188884db04e66421913a4e181294"},
|
||||
{file = "types_jsonschema-4.25.1.20250822.tar.gz", hash = "sha256:aac69ed4b23f49aaceb7fcb834141d61b9e4e6a7f6008cb2f0d3b831dfa8464a"},
|
||||
{file = "types_jsonschema-4.25.0.20250720-py3-none-any.whl", hash = "sha256:7d7897c715310d8bf9ae27a2cedba78bbb09e4cad83ce06d2aa79b73a88941df"},
|
||||
{file = "types_jsonschema-4.25.0.20250720.tar.gz", hash = "sha256:765a3b6144798fe3161fd8cbe570a756ed3e8c0e5adb7c09693eb49faad39dbd"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2946,14 +2970,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-psycopg2"
|
||||
version = "2.9.21.20250915"
|
||||
version = "2.9.21.20250718"
|
||||
description = "Typing stubs for psycopg2"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_psycopg2-2.9.21.20250915-py3-none-any.whl", hash = "sha256:eefe5ccdc693fc086146e84c9ba437bb278efe1ef330b299a0cb71169dc6c55f"},
|
||||
{file = "types_psycopg2-2.9.21.20250915.tar.gz", hash = "sha256:bfeb8f54c32490e7b5edc46215ab4163693192bc90407b4a023822de9239f5c8"},
|
||||
{file = "types_psycopg2-2.9.21.20250718-py3-none-any.whl", hash = "sha256:bcf085d4293bda48f5943a46dadf0389b2f98f7e8007722f7e1c12ee0f541858"},
|
||||
{file = "types_psycopg2-2.9.21.20250718.tar.gz", hash = "sha256:dc09a97272ef67e739e57b9f4740b761208f4514257e311c0b05c8c7a37d04b4"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2986,14 +3010,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-requests"
|
||||
version = "2.32.4.20250809"
|
||||
version = "2.32.4.20250611"
|
||||
description = "Typing stubs for requests"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_requests-2.32.4.20250809-py3-none-any.whl", hash = "sha256:f73d1832fb519ece02c85b1f09d5f0dd3108938e7d47e7f94bbfa18a6782b163"},
|
||||
{file = "types_requests-2.32.4.20250809.tar.gz", hash = "sha256:d8060de1c8ee599311f56ff58010fb4902f462a1470802cf9f6ed27bc46c4df3"},
|
||||
{file = "types_requests-2.32.4.20250611-py3-none-any.whl", hash = "sha256:ad2fe5d3b0cb3c2c902c8815a70e7fb2302c4b8c1f77bdcd738192cdb3878072"},
|
||||
{file = "types_requests-2.32.4.20250611.tar.gz", hash = "sha256:741c8777ed6425830bf51e54d6abe245f79b4dcb9019f1622b773463946bf826"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3001,14 +3025,14 @@ urllib3 = ">=2"
|
||||
|
||||
[[package]]
|
||||
name = "types-setuptools"
|
||||
version = "80.9.0.20250822"
|
||||
version = "80.9.0.20250809"
|
||||
description = "Typing stubs for setuptools"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_setuptools-80.9.0.20250822-py3-none-any.whl", hash = "sha256:53bf881cb9d7e46ed12c76ef76c0aaf28cfe6211d3fab12e0b83620b1a8642c3"},
|
||||
{file = "types_setuptools-80.9.0.20250822.tar.gz", hash = "sha256:070ea7716968ec67a84c7f7768d9952ff24d28b65b6594797a464f1b3066f965"},
|
||||
{file = "types_setuptools-80.9.0.20250809-py3-none-any.whl", hash = "sha256:7c6539b4c7ac7b4ab4db2be66d8a58fb1e28affa3ee3834be48acafd94f5976a"},
|
||||
{file = "types_setuptools-80.9.0.20250809.tar.gz", hash = "sha256:e986ba37ffde364073d76189e1d79d9928fb6f5278c7d07589cde353d0218864"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3087,7 +3111,7 @@ description = "An XML Schema validator and decoder"
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"saml2\" or extra == \"all\""
|
||||
markers = "extra == \"all\" or extra == \"saml2\""
|
||||
files = [
|
||||
{file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
|
||||
{file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},
|
||||
@@ -3231,4 +3255,4 @@ url-preview = ["lxml"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = "^3.9.0"
|
||||
content-hash = "8783bfa1c998c4cf854e173b3f6745b0e21e655e0c24a8f9cda4be5d7375dc19"
|
||||
content-hash = "600a349d08dde732df251583094a121b5385eb43ae0c6ceff10dcf9749359446"
|
||||
|
||||
@@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.138.0"
|
||||
version = "1.136.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -224,7 +224,7 @@ matrix-common = "^1.3.0"
|
||||
packaging = ">=20.0"
|
||||
# We support pydantic v1 and pydantic v2 via the pydantic.v1 compat module.
|
||||
# See https://github.com/matrix-org/synapse/issues/15858
|
||||
pydantic = ">=2.0.0, <3"
|
||||
pydantic = ">=1.7.4, <3"
|
||||
|
||||
# This is for building the rust components during "poetry install", which
|
||||
# currently ignores the `build-system.requires` directive (c.f.
|
||||
@@ -324,7 +324,7 @@ all = [
|
||||
# failing on new releases. Keeping lower bounds loose here means that dependabot
|
||||
# can bump versions without having to update the content-hash in the lockfile.
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
ruff = "0.12.10"
|
||||
ruff = "0.12.7"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
|
||||
@@ -289,10 +289,10 @@ pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule {
|
||||
default_enabled: true,
|
||||
}];
|
||||
|
||||
pub const BASE_APPEND_POSTCONTENT_RULES: &[PushRule] = &[
|
||||
pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/postcontent/.io.element.msc4306.rule.unsubscribed_thread"),
|
||||
priority_class: 6,
|
||||
rule_id: Cow::Borrowed("global/content/.io.element.msc4306.rule.unsubscribed_thread"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(
|
||||
KnownCondition::Msc4306ThreadSubscription { subscribed: false },
|
||||
)]),
|
||||
@@ -301,8 +301,8 @@ pub const BASE_APPEND_POSTCONTENT_RULES: &[PushRule] = &[
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/postcontent/.io.element.msc4306.rule.subscribed_thread"),
|
||||
priority_class: 6,
|
||||
rule_id: Cow::Borrowed("global/content/.io.element.msc4306.rule.subscribed_thread"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(
|
||||
KnownCondition::Msc4306ThreadSubscription { subscribed: true },
|
||||
)]),
|
||||
@@ -310,9 +310,6 @@ pub const BASE_APPEND_POSTCONTENT_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
];
|
||||
|
||||
pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.call"),
|
||||
priority_class: 1,
|
||||
@@ -729,7 +726,6 @@ lazy_static! {
|
||||
.iter()
|
||||
.chain(BASE_APPEND_OVERRIDE_RULES.iter())
|
||||
.chain(BASE_APPEND_CONTENT_RULES.iter())
|
||||
.chain(BASE_APPEND_POSTCONTENT_RULES.iter())
|
||||
.chain(BASE_APPEND_UNDERRIDE_RULES.iter())
|
||||
.map(|rule| { (&*rule.rule_id, rule) })
|
||||
.collect();
|
||||
|
||||
@@ -527,7 +527,6 @@ impl PushRules {
|
||||
.chain(base_rules::BASE_APPEND_OVERRIDE_RULES.iter())
|
||||
.chain(self.content.iter())
|
||||
.chain(base_rules::BASE_APPEND_CONTENT_RULES.iter())
|
||||
.chain(base_rules::BASE_APPEND_POSTCONTENT_RULES.iter())
|
||||
.chain(self.room.iter())
|
||||
.chain(self.sender.iter())
|
||||
.chain(self.underride.iter())
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
$schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json
|
||||
$id: https://element-hq.github.io/synapse/schema/synapse/v1.138/synapse-config.schema.json
|
||||
$id: https://element-hq.github.io/synapse/schema/synapse/v1.136/synapse-config.schema.json
|
||||
type: object
|
||||
properties:
|
||||
modules:
|
||||
@@ -2415,15 +2415,8 @@ properties:
|
||||
A list of media upload limits defining how much data a given user can
|
||||
upload in a given time period.
|
||||
|
||||
These limits are applied in addition to the `max_upload_size` limit above
|
||||
(which applies to individual uploads).
|
||||
|
||||
|
||||
An empty list means no limits are applied.
|
||||
|
||||
|
||||
These settings can be overridden using the `get_media_upload_limits_for_user`
|
||||
module API [callback](../../modules/media_repository_callbacks.md#get_media_upload_limits_for_user).
|
||||
default: []
|
||||
items:
|
||||
time_period:
|
||||
|
||||
478
scripts-dev/check_pydantic_models.py
Executable file
478
scripts-dev/check_pydantic_models.py
Executable file
@@ -0,0 +1,478 @@
|
||||
#! /usr/bin/env python
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
# Copyright (C) 2023 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
# Originally licensed under the Apache License, Version 2.0:
|
||||
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
||||
#
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
"""
|
||||
A script which enforces that Synapse always uses strict types when defining a Pydantic
|
||||
model.
|
||||
|
||||
Pydantic does not yet offer a strict mode, but it is planned for pydantic v2. See
|
||||
|
||||
https://github.com/pydantic/pydantic/issues/1098
|
||||
https://pydantic-docs.helpmanual.io/blog/pydantic-v2/#strict-mode
|
||||
|
||||
until then, this script is a best effort to stop us from introducing type coersion bugs
|
||||
(like the infamous stringy power levels fixed in room version 10).
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import contextlib
|
||||
import functools
|
||||
import importlib
|
||||
import logging
|
||||
import os
|
||||
import pkgutil
|
||||
import sys
|
||||
import textwrap
|
||||
import traceback
|
||||
import unittest.mock
|
||||
from contextlib import contextmanager
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Generator,
|
||||
List,
|
||||
Set,
|
||||
Type,
|
||||
TypeVar,
|
||||
)
|
||||
|
||||
from parameterized import parameterized
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from synapse._pydantic_compat import (
|
||||
BaseModel as PydanticBaseModel,
|
||||
conbytes,
|
||||
confloat,
|
||||
conint,
|
||||
constr,
|
||||
get_args,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: List[Callable] = [
|
||||
constr,
|
||||
conbytes,
|
||||
conint,
|
||||
confloat,
|
||||
]
|
||||
|
||||
TYPES_THAT_PYDANTIC_WILL_COERCE_TO = [
|
||||
str,
|
||||
bytes,
|
||||
int,
|
||||
float,
|
||||
bool,
|
||||
]
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
class ModelCheckerException(Exception):
|
||||
"""Dummy exception. Allows us to detect unwanted types during a module import."""
|
||||
|
||||
|
||||
class MissingStrictInConstrainedTypeException(ModelCheckerException):
|
||||
factory_name: str
|
||||
|
||||
def __init__(self, factory_name: str):
|
||||
self.factory_name = factory_name
|
||||
|
||||
|
||||
class FieldHasUnwantedTypeException(ModelCheckerException):
|
||||
message: str
|
||||
|
||||
def __init__(self, message: str):
|
||||
self.message = message
|
||||
|
||||
|
||||
def make_wrapper(factory: Callable[P, R]) -> Callable[P, R]:
|
||||
"""We patch `constr` and friends with wrappers that enforce strict=True."""
|
||||
|
||||
@functools.wraps(factory)
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
||||
if "strict" not in kwargs:
|
||||
raise MissingStrictInConstrainedTypeException(factory.__name__)
|
||||
if not kwargs["strict"]:
|
||||
raise MissingStrictInConstrainedTypeException(factory.__name__)
|
||||
return factory(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def field_type_unwanted(type_: Any) -> bool:
|
||||
"""Very rough attempt to detect if a type is unwanted as a Pydantic annotation.
|
||||
|
||||
At present, we exclude types which will coerce, or any generic type involving types
|
||||
which will coerce."""
|
||||
logger.debug("Is %s unwanted?")
|
||||
if type_ in TYPES_THAT_PYDANTIC_WILL_COERCE_TO:
|
||||
logger.debug("yes")
|
||||
return True
|
||||
logger.debug("Maybe. Subargs are %s", get_args(type_))
|
||||
rv = any(field_type_unwanted(t) for t in get_args(type_))
|
||||
logger.debug("Conclusion: %s %s unwanted", type_, "is" if rv else "is not")
|
||||
return rv
|
||||
|
||||
|
||||
class PatchedBaseModel(PydanticBaseModel):
|
||||
"""A patched version of BaseModel that inspects fields after models are defined.
|
||||
|
||||
We complain loudly if we see an unwanted type.
|
||||
|
||||
Beware: ModelField.type_ is presumably private; this is likely to be very brittle.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def __init_subclass__(cls: Type[PydanticBaseModel], **kwargs: object):
|
||||
for field in cls.__fields__.values():
|
||||
# Note that field.type_ and field.outer_type are computed based on the
|
||||
# annotation type, see pydantic.fields.ModelField._type_analysis
|
||||
if field_type_unwanted(field.outer_type_):
|
||||
# TODO: this only reports the first bad field. Can we find all bad ones
|
||||
# and report them all?
|
||||
raise FieldHasUnwantedTypeException(
|
||||
f"{cls.__module__}.{cls.__qualname__} has field '{field.name}' "
|
||||
f"with unwanted type `{field.outer_type_}`"
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def monkeypatch_pydantic() -> Generator[None, None, None]:
|
||||
"""Patch pydantic with our snooping versions of BaseModel and the con* functions.
|
||||
|
||||
If the snooping functions see something they don't like, they'll raise a
|
||||
ModelCheckingException instance.
|
||||
"""
|
||||
with contextlib.ExitStack() as patches:
|
||||
# Most Synapse code ought to import the patched objects directly from
|
||||
# `pydantic`. But we also patch their containing modules `pydantic.main` and
|
||||
# `pydantic.types` for completeness.
|
||||
patch_basemodel = unittest.mock.patch(
|
||||
"synapse._pydantic_compat.BaseModel", new=PatchedBaseModel
|
||||
)
|
||||
patches.enter_context(patch_basemodel)
|
||||
for factory in CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG:
|
||||
wrapper: Callable = make_wrapper(factory)
|
||||
patch = unittest.mock.patch(
|
||||
f"synapse._pydantic_compat.{factory.__name__}", new=wrapper
|
||||
)
|
||||
patches.enter_context(patch)
|
||||
yield
|
||||
|
||||
|
||||
def format_model_checker_exception(e: ModelCheckerException) -> str:
|
||||
"""Work out which line of code caused e. Format the line in a human-friendly way."""
|
||||
# TODO. FieldHasUnwantedTypeException gives better error messages. Can we ditch the
|
||||
# patches of constr() etc, and instead inspect fields to look for ConstrainedStr
|
||||
# with strict=False? There is some difficulty with the inheritance hierarchy
|
||||
# because StrictStr < ConstrainedStr < str.
|
||||
if isinstance(e, FieldHasUnwantedTypeException):
|
||||
return e.message
|
||||
elif isinstance(e, MissingStrictInConstrainedTypeException):
|
||||
frame_summary = traceback.extract_tb(e.__traceback__)[-2]
|
||||
return (
|
||||
f"Missing `strict=True` from {e.factory_name}() call \n"
|
||||
+ traceback.format_list([frame_summary])[0].lstrip()
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown exception {e}") from e
|
||||
|
||||
|
||||
def lint() -> int:
|
||||
"""Try to import all of Synapse and see if we spot any Pydantic type coercions.
|
||||
|
||||
Print any problems, then return a status code suitable for sys.exit."""
|
||||
failures = do_lint()
|
||||
if failures:
|
||||
print(f"Found {len(failures)} problem(s)")
|
||||
for failure in sorted(failures):
|
||||
print(failure)
|
||||
return os.EX_DATAERR if failures else os.EX_OK
|
||||
|
||||
|
||||
def do_lint() -> Set[str]:
|
||||
"""Try to import all of Synapse and see if we spot any Pydantic type coercions."""
|
||||
failures = set()
|
||||
|
||||
with monkeypatch_pydantic():
|
||||
logger.debug("Importing synapse")
|
||||
try:
|
||||
# TODO: make "synapse" an argument so we can target this script at
|
||||
# a subpackage
|
||||
module = importlib.import_module("synapse")
|
||||
except ModelCheckerException as e:
|
||||
logger.warning("Bad annotation found when importing synapse")
|
||||
failures.add(format_model_checker_exception(e))
|
||||
return failures
|
||||
|
||||
try:
|
||||
logger.debug("Fetching subpackages")
|
||||
module_infos = list(
|
||||
pkgutil.walk_packages(module.__path__, f"{module.__name__}.")
|
||||
)
|
||||
except ModelCheckerException as e:
|
||||
logger.warning("Bad annotation found when looking for modules to import")
|
||||
failures.add(format_model_checker_exception(e))
|
||||
return failures
|
||||
|
||||
for module_info in module_infos:
|
||||
logger.debug("Importing %s", module_info.name)
|
||||
try:
|
||||
importlib.import_module(module_info.name)
|
||||
except ModelCheckerException as e:
|
||||
logger.warning(
|
||||
"Bad annotation found when importing %s", module_info.name
|
||||
)
|
||||
failures.add(format_model_checker_exception(e))
|
||||
|
||||
return failures
|
||||
|
||||
|
||||
def run_test_snippet(source: str) -> None:
|
||||
"""Exec a snippet of source code in an isolated environment."""
|
||||
# To emulate `source` being called at the top level of the module,
|
||||
# the globals and locals we provide apparently have to be the same mapping.
|
||||
#
|
||||
# > Remember that at the module level, globals and locals are the same dictionary.
|
||||
# > If exec gets two separate objects as globals and locals, the code will be
|
||||
# > executed as if it were embedded in a class definition.
|
||||
globals_: Dict[str, object]
|
||||
locals_: Dict[str, object]
|
||||
globals_ = locals_ = {}
|
||||
exec(textwrap.dedent(source), globals_, locals_)
|
||||
|
||||
|
||||
class TestConstrainedTypesPatch(unittest.TestCase):
|
||||
def test_expression_without_strict_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
try:
|
||||
from pydantic.v1 import constr
|
||||
except ImportError:
|
||||
from pydantic import constr
|
||||
constr()
|
||||
"""
|
||||
)
|
||||
|
||||
def test_called_as_module_attribute_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
import pydantic
|
||||
pydantic.constr()
|
||||
"""
|
||||
)
|
||||
|
||||
def test_wildcard_import_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
try:
|
||||
from pydantic.v1 import *
|
||||
except ImportError:
|
||||
from pydantic import *
|
||||
constr()
|
||||
"""
|
||||
)
|
||||
|
||||
def test_alternative_import_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
try:
|
||||
from pydantic.v1.types import constr
|
||||
except ImportError:
|
||||
from pydantic.types import constr
|
||||
constr()
|
||||
"""
|
||||
)
|
||||
|
||||
def test_alternative_import_attribute_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
try:
|
||||
from pydantic.v1 import types as pydantic_types
|
||||
except ImportError:
|
||||
from pydantic import types as pydantic_types
|
||||
pydantic_types.constr()
|
||||
"""
|
||||
)
|
||||
|
||||
def test_kwarg_but_no_strict_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
try:
|
||||
from pydantic.v1 import constr
|
||||
except ImportError:
|
||||
from pydantic import constr
|
||||
constr(min_length=10)
|
||||
"""
|
||||
)
|
||||
|
||||
def test_kwarg_strict_False_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
try:
|
||||
from pydantic.v1 import constr
|
||||
except ImportError:
|
||||
from pydantic import constr
|
||||
constr(strict=False)
|
||||
"""
|
||||
)
|
||||
|
||||
def test_kwarg_strict_True_doesnt_raise(self) -> None:
|
||||
with monkeypatch_pydantic():
|
||||
run_test_snippet(
|
||||
"""
|
||||
try:
|
||||
from pydantic.v1 import constr
|
||||
except ImportError:
|
||||
from pydantic import constr
|
||||
constr(strict=True)
|
||||
"""
|
||||
)
|
||||
|
||||
def test_annotation_without_strict_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
try:
|
||||
from pydantic.v1 import constr
|
||||
except ImportError:
|
||||
from pydantic import constr
|
||||
x: constr()
|
||||
"""
|
||||
)
|
||||
|
||||
def test_field_annotation_without_strict_raises(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
try:
|
||||
from pydantic.v1 import BaseModel, conint
|
||||
except ImportError:
|
||||
from pydantic import BaseModel, conint
|
||||
class C:
|
||||
x: conint()
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
class TestFieldTypeInspection(unittest.TestCase):
|
||||
@parameterized.expand(
|
||||
[
|
||||
("str",),
|
||||
("bytes"),
|
||||
("int",),
|
||||
("float",),
|
||||
("bool"),
|
||||
("Optional[str]",),
|
||||
("Union[None, str]",),
|
||||
("List[str]",),
|
||||
("List[List[str]]",),
|
||||
("Dict[StrictStr, str]",),
|
||||
("Dict[str, StrictStr]",),
|
||||
("TypedDict('D', x=int)",),
|
||||
]
|
||||
)
|
||||
def test_field_holding_unwanted_type_raises(self, annotation: str) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
f"""
|
||||
from typing import *
|
||||
try:
|
||||
from pydantic.v1 import *
|
||||
except ImportError:
|
||||
from pydantic import *
|
||||
class C(BaseModel):
|
||||
f: {annotation}
|
||||
"""
|
||||
)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
("StrictStr",),
|
||||
("StrictBytes"),
|
||||
("StrictInt",),
|
||||
("StrictFloat",),
|
||||
("StrictBool"),
|
||||
("constr(strict=True, min_length=10)",),
|
||||
("Optional[StrictStr]",),
|
||||
("Union[None, StrictStr]",),
|
||||
("List[StrictStr]",),
|
||||
("List[List[StrictStr]]",),
|
||||
("Dict[StrictStr, StrictStr]",),
|
||||
("TypedDict('D', x=StrictInt)",),
|
||||
]
|
||||
)
|
||||
def test_field_holding_accepted_type_doesnt_raise(self, annotation: str) -> None:
|
||||
with monkeypatch_pydantic():
|
||||
run_test_snippet(
|
||||
f"""
|
||||
from typing import *
|
||||
try:
|
||||
from pydantic.v1 import *
|
||||
except ImportError:
|
||||
from pydantic import *
|
||||
class C(BaseModel):
|
||||
f: {annotation}
|
||||
"""
|
||||
)
|
||||
|
||||
def test_field_holding_str_raises_with_alternative_import(self) -> None:
|
||||
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
|
||||
run_test_snippet(
|
||||
"""
|
||||
try:
|
||||
from pydantic.v1.main import BaseModel
|
||||
except ImportError:
|
||||
from pydantic.main import BaseModel
|
||||
class C(BaseModel):
|
||||
f: str
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("mode", choices=["lint", "test"], default="lint", nargs="?")
|
||||
parser.add_argument("-v", "--verbose", action="store_true")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parser.parse_args(sys.argv[1:])
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s %(name)s:%(lineno)d %(levelname)s %(message)s",
|
||||
level=logging.DEBUG if args.verbose else logging.INFO,
|
||||
)
|
||||
# suppress logs we don't care about
|
||||
logging.getLogger("xmlschema").setLevel(logging.WARNING)
|
||||
if args.mode == "lint":
|
||||
sys.exit(lint())
|
||||
elif args.mode == "test":
|
||||
unittest.main(argv=sys.argv[:1])
|
||||
@@ -230,7 +230,6 @@ test_packages=(
|
||||
./tests/msc3967
|
||||
./tests/msc4140
|
||||
./tests/msc4155
|
||||
./tests/msc4306
|
||||
)
|
||||
|
||||
# Enable dirty runs, so tests will reuse the same container where possible.
|
||||
|
||||
@@ -134,6 +134,9 @@ fi
|
||||
# Ensure the formatting of Rust code.
|
||||
cargo-fmt
|
||||
|
||||
# Ensure all Pydantic models use strict types.
|
||||
./scripts-dev/check_pydantic_models.py lint
|
||||
|
||||
# Ensure type hints are correct.
|
||||
mypy
|
||||
|
||||
|
||||
@@ -153,13 +153,9 @@ def get_registered_paths_for_default(
|
||||
"""
|
||||
|
||||
hs = MockHomeserver(base_config, worker_app)
|
||||
|
||||
# TODO We only do this to avoid an error, but don't need the database etc
|
||||
hs.setup()
|
||||
registered_paths = get_registered_paths_for_hs(hs)
|
||||
hs.cleanup()
|
||||
|
||||
return registered_paths
|
||||
return get_registered_paths_for_hs(hs)
|
||||
|
||||
|
||||
def elide_http_methods_if_unconflicting(
|
||||
|
||||
@@ -99,7 +99,6 @@ from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util import SYNAPSE_VERSION, Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
# Cast safety: Twisted does some naughty magic which replaces the
|
||||
# twisted.internet.reactor module with a Reactor instance at runtime.
|
||||
@@ -324,7 +323,6 @@ class MockHomeserver:
|
||||
self.config = config
|
||||
self.hostname = config.server.server_name
|
||||
self.version_string = SYNAPSE_VERSION
|
||||
self.instance_id = random_string(5)
|
||||
|
||||
def get_clock(self) -> Clock:
|
||||
return self.clock
|
||||
@@ -332,9 +330,6 @@ class MockHomeserver:
|
||||
def get_reactor(self) -> ISynapseReactor:
|
||||
return reactor
|
||||
|
||||
def get_instance_id(self) -> str:
|
||||
return self.instance_id
|
||||
|
||||
def get_instance_name(self) -> str:
|
||||
return "master"
|
||||
|
||||
@@ -690,15 +685,7 @@ class Porter:
|
||||
)
|
||||
prepare_database(db_conn, engine, config=self.hs_config)
|
||||
# Type safety: ignore that we're using Mock homeservers here.
|
||||
store = Store(
|
||||
DatabasePool(
|
||||
hs, # type: ignore[arg-type]
|
||||
db_config,
|
||||
engine,
|
||||
),
|
||||
db_conn,
|
||||
hs, # type: ignore[arg-type]
|
||||
)
|
||||
store = Store(DatabasePool(hs, db_config, engine), db_conn, hs) # type: ignore[arg-type]
|
||||
db_conn.commit()
|
||||
|
||||
return store
|
||||
|
||||
@@ -120,13 +120,6 @@ def main() -> None:
|
||||
# DB.
|
||||
hs.setup()
|
||||
|
||||
# This will cause all of the relevant storage classes to be instantiated and call
|
||||
# `register_background_update_handler(...)`,
|
||||
# `register_background_index_update(...)`,
|
||||
# `register_background_validate_constraint(...)`, etc so they are available to use
|
||||
# if we are asked to run those background updates.
|
||||
hs.get_storage_controllers()
|
||||
|
||||
if args.run_background_updates:
|
||||
run_background_updates(hs)
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Optional, Set
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from synapse._pydantic_compat import (
|
||||
@@ -57,10 +57,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
# Scope as defined by MSC2967
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/2967
|
||||
UNSTABLE_SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
STABLE_SCOPE_MATRIX_API = "urn:matrix:client:api:*"
|
||||
STABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:client:device:"
|
||||
SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
|
||||
|
||||
class ServerMetadata(BaseModel):
|
||||
@@ -336,10 +334,7 @@ class MasDelegatedAuth(BaseAuth):
|
||||
scope = introspection_result.get_scope_set()
|
||||
|
||||
# Determine type of user based on presence of particular scopes
|
||||
if (
|
||||
UNSTABLE_SCOPE_MATRIX_API not in scope
|
||||
and STABLE_SCOPE_MATRIX_API not in scope
|
||||
):
|
||||
if SCOPE_MATRIX_API not in scope:
|
||||
raise InvalidClientTokenError(
|
||||
"Token doesn't grant access to the Matrix C-S API"
|
||||
)
|
||||
@@ -371,12 +366,11 @@ class MasDelegatedAuth(BaseAuth):
|
||||
# We only allow a single device_id in the scope, so we find them all in the
|
||||
# scope list, and raise if there are more than one. The OIDC server should be
|
||||
# the one enforcing valid scopes, so we raise a 500 if we find an invalid scope.
|
||||
device_ids: Set[str] = set()
|
||||
for tok in scope:
|
||||
if tok.startswith(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX):
|
||||
device_ids.add(tok[len(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX) :])
|
||||
elif tok.startswith(STABLE_SCOPE_MATRIX_DEVICE_PREFIX):
|
||||
device_ids.add(tok[len(STABLE_SCOPE_MATRIX_DEVICE_PREFIX) :])
|
||||
device_ids = [
|
||||
tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :]
|
||||
for tok in scope
|
||||
if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX)
|
||||
]
|
||||
|
||||
if len(device_ids) > 1:
|
||||
raise AuthError(
|
||||
@@ -384,7 +378,7 @@ class MasDelegatedAuth(BaseAuth):
|
||||
"Multiple device IDs in scope",
|
||||
)
|
||||
|
||||
device_id = next(iter(device_ids), None)
|
||||
device_id = device_ids[0] if device_ids else None
|
||||
|
||||
if device_id is not None:
|
||||
# Sanity check the device_id
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from authlib.oauth2 import ClientAuth
|
||||
@@ -34,6 +34,7 @@ from synapse.api.errors import (
|
||||
AuthError,
|
||||
HttpResponseException,
|
||||
InvalidClientTokenError,
|
||||
OAuthInsufficientScopeError,
|
||||
SynapseError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
@@ -62,10 +63,9 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
# Scope as defined by MSC2967
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/2967
|
||||
UNSTABLE_SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
STABLE_SCOPE_MATRIX_API = "urn:matrix:client:api:*"
|
||||
STABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:client:device:"
|
||||
SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
SCOPE_MATRIX_GUEST = "urn:matrix:org.matrix.msc2967.client:api:guest"
|
||||
SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
|
||||
# Scope which allows access to the Synapse admin API
|
||||
SCOPE_SYNAPSE_ADMIN = "urn:synapse:admin:*"
|
||||
@@ -444,6 +444,9 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
if not self._is_access_token_the_admin_token(access_token):
|
||||
await self._record_request(request, requester)
|
||||
|
||||
if not allow_guest and requester.is_guest:
|
||||
raise OAuthInsufficientScopeError([SCOPE_MATRIX_API])
|
||||
|
||||
request.requester = requester
|
||||
|
||||
return requester
|
||||
@@ -525,11 +528,10 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
scope: List[str] = introspection_result.get_scope_list()
|
||||
|
||||
# Determine type of user based on presence of particular scopes
|
||||
has_user_scope = (
|
||||
UNSTABLE_SCOPE_MATRIX_API in scope or STABLE_SCOPE_MATRIX_API in scope
|
||||
)
|
||||
has_user_scope = SCOPE_MATRIX_API in scope
|
||||
has_guest_scope = SCOPE_MATRIX_GUEST in scope
|
||||
|
||||
if not has_user_scope:
|
||||
if not has_user_scope and not has_guest_scope:
|
||||
raise InvalidClientTokenError("No scope in token granting user rights")
|
||||
|
||||
# Match via the sub claim
|
||||
@@ -577,12 +579,11 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
# We only allow a single device_id in the scope, so we find them all in the
|
||||
# scope list, and raise if there are more than one. The OIDC server should be
|
||||
# the one enforcing valid scopes, so we raise a 500 if we find an invalid scope.
|
||||
device_ids: Set[str] = set()
|
||||
for tok in scope:
|
||||
if tok.startswith(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX):
|
||||
device_ids.add(tok[len(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX) :])
|
||||
elif tok.startswith(STABLE_SCOPE_MATRIX_DEVICE_PREFIX):
|
||||
device_ids.add(tok[len(STABLE_SCOPE_MATRIX_DEVICE_PREFIX) :])
|
||||
device_ids = [
|
||||
tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :]
|
||||
for tok in scope
|
||||
if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX)
|
||||
]
|
||||
|
||||
if len(device_ids) > 1:
|
||||
raise AuthError(
|
||||
@@ -590,7 +591,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
"Multiple device IDs in scope",
|
||||
)
|
||||
|
||||
device_id = next(iter(device_ids), None)
|
||||
device_id = device_ids[0] if device_ids else None
|
||||
|
||||
if device_id is not None:
|
||||
# Sanity check the device_id
|
||||
@@ -616,4 +617,5 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
scope=scope,
|
||||
is_guest=(has_guest_scope and not has_user_scope),
|
||||
)
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
"""Contains the URL paths to prefix various aspects of the server with."""
|
||||
|
||||
import hmac
|
||||
import urllib.parse
|
||||
from hashlib import sha256
|
||||
from typing import Optional
|
||||
from urllib.parse import urlencode, urljoin
|
||||
@@ -97,21 +96,11 @@ class LoginSSORedirectURIBuilder:
|
||||
serialized_query_parameters = urlencode({"redirectUrl": client_redirect_url})
|
||||
|
||||
if idp_id:
|
||||
# Since this is a user-controlled string, make it safe to include in a URL path.
|
||||
url_encoded_idp_id = urllib.parse.quote(
|
||||
idp_id,
|
||||
# Since this defaults to `safe="/"`, we have to override it. We're
|
||||
# working with an individual URL path parameter so there shouldn't be
|
||||
# any slashes in it which could change the request path.
|
||||
safe="",
|
||||
encoding="utf8",
|
||||
)
|
||||
|
||||
resultant_url = urljoin(
|
||||
# We have to add a trailing slash to the base URL to ensure that the
|
||||
# last path segment is not stripped away when joining with another path.
|
||||
f"{base_url}/",
|
||||
f"{url_encoded_idp_id}?{serialized_query_parameters}",
|
||||
f"{idp_id}?{serialized_query_parameters}",
|
||||
)
|
||||
else:
|
||||
resultant_url = f"{base_url}?{serialized_query_parameters}"
|
||||
|
||||
@@ -72,7 +72,7 @@ from synapse.events.auto_accept_invites import InviteAutoAccepter
|
||||
from synapse.events.presence_router import load_legacy_presence_router
|
||||
from synapse.handlers.auth import load_legacy_password_auth_providers
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext, PreserveLoggingContext
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.logging.opentracing import init_tracer
|
||||
from synapse.metrics import install_gc_manager, register_threadpool
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -183,23 +183,25 @@ def start_reactor(
|
||||
if gc_thresholds:
|
||||
gc.set_threshold(*gc_thresholds)
|
||||
install_gc_manager()
|
||||
run_command()
|
||||
|
||||
# Reset the logging context when we start the reactor (whenever we yield control
|
||||
# to the reactor, the `sentinel` logging context needs to be set so we don't
|
||||
# leak the current logging context and erroneously apply it to the next task the
|
||||
# reactor event loop picks up)
|
||||
with PreserveLoggingContext():
|
||||
run_command()
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
#
|
||||
# We also need to drop the logcontext before forking if we're daemonizing,
|
||||
# otherwise the cputime metrics get confused about the per-thread resource usage
|
||||
# appearing to go backwards.
|
||||
with PreserveLoggingContext():
|
||||
if daemonize:
|
||||
assert pid_file is not None
|
||||
|
||||
if daemonize:
|
||||
assert pid_file is not None
|
||||
if print_pidfile:
|
||||
print(pid_file)
|
||||
|
||||
if print_pidfile:
|
||||
print(pid_file)
|
||||
|
||||
daemonize_process(pid_file, logger)
|
||||
|
||||
run()
|
||||
daemonize_process(pid_file, logger)
|
||||
run()
|
||||
|
||||
|
||||
def quit_with_error(error_string: str) -> NoReturn:
|
||||
@@ -599,38 +601,18 @@ async def start(hs: "HomeServer") -> None:
|
||||
hs.get_datastores().main.db_pool.start_profiling()
|
||||
hs.get_pusherpool().start()
|
||||
|
||||
def log_shutdown() -> None:
|
||||
with LoggingContext("log_shutdown"):
|
||||
logger.info("Shutting down...")
|
||||
|
||||
# Log when we start the shut down process.
|
||||
hs.get_reactor().addSystemEventTrigger("before", "shutdown", log_shutdown)
|
||||
hs.get_reactor().addSystemEventTrigger(
|
||||
"before", "shutdown", logger.info, "Shutting down..."
|
||||
)
|
||||
|
||||
setup_sentry(hs)
|
||||
setup_sdnotify(hs)
|
||||
|
||||
# Register background tasks required by this server. This must be done
|
||||
# somewhat manually due to the background tasks not being registered
|
||||
# unless handlers are instantiated.
|
||||
#
|
||||
# While we could "start" these before the reactor runs, nothing will happen until
|
||||
# the reactor is running, so we may as well do it here in `start`.
|
||||
#
|
||||
# Additionally, this means we also start them after we daemonize and fork the
|
||||
# process which means we can avoid any potential problems with cputime metrics
|
||||
# getting confused about the per-thread resource usage appearing to go backwards
|
||||
# because we're comparing the resource usage (`rusage`) from the original process to
|
||||
# the forked process.
|
||||
# If background tasks are running on the main process or this is the worker in
|
||||
# charge of them, start collecting the phone home stats and shared usage metrics.
|
||||
if hs.config.worker.run_background_tasks:
|
||||
hs.start_background_tasks()
|
||||
|
||||
# TODO: This should be moved to same pattern we use for other background tasks:
|
||||
# Add to `REQUIRED_ON_BACKGROUND_TASK_STARTUP` and rely on
|
||||
# `start_background_tasks` to start it.
|
||||
await hs.get_common_usage_metrics_manager().setup()
|
||||
|
||||
# TODO: This feels like another pattern that should refactored as one of the
|
||||
# `REQUIRED_ON_BACKGROUND_TASK_STARTUP`
|
||||
start_phone_stats_home(hs)
|
||||
|
||||
# We now freeze all allocated objects in the hopes that (almost)
|
||||
|
||||
@@ -355,12 +355,7 @@ def start(config_options: List[str]) -> None:
|
||||
except Exception as e:
|
||||
handle_startup_exception(e)
|
||||
|
||||
async def start() -> None:
|
||||
# Re-establish log context now that we're back from the reactor
|
||||
with LoggingContext("start"):
|
||||
await _base.start(hs)
|
||||
|
||||
register_start(start)
|
||||
register_start(_base.start, hs)
|
||||
|
||||
# redirect stdio to the logs, if configured.
|
||||
if not hs.config.logging.no_redirect_stdio:
|
||||
|
||||
@@ -377,17 +377,15 @@ def setup(config_options: List[str]) -> SynapseHomeServer:
|
||||
handle_startup_exception(e)
|
||||
|
||||
async def start() -> None:
|
||||
# Re-establish log context now that we're back from the reactor
|
||||
with LoggingContext("start"):
|
||||
# Load the OIDC provider metadatas, if OIDC is enabled.
|
||||
if hs.config.oidc.oidc_enabled:
|
||||
oidc = hs.get_oidc_handler()
|
||||
# Loading the provider metadata also ensures the provider config is valid.
|
||||
await oidc.load_metadata()
|
||||
# Load the OIDC provider metadatas, if OIDC is enabled.
|
||||
if hs.config.oidc.oidc_enabled:
|
||||
oidc = hs.get_oidc_handler()
|
||||
# Loading the provider metadata also ensures the provider config is valid.
|
||||
await oidc.load_metadata()
|
||||
|
||||
await _base.start(hs)
|
||||
await _base.start(hs)
|
||||
|
||||
hs.get_datastores().main.db_pool.updates.start_doing_background_updates()
|
||||
hs.get_datastores().main.db_pool.updates.start_doing_background_updates()
|
||||
|
||||
register_start(start)
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
|
||||
import argparse
|
||||
import errno
|
||||
import importlib.resources as importlib_resources
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
@@ -47,6 +46,7 @@ from typing import (
|
||||
|
||||
import attr
|
||||
import jinja2
|
||||
import pkg_resources
|
||||
import yaml
|
||||
|
||||
from synapse.types import StrSequence
|
||||
@@ -174,8 +174,8 @@ class Config:
|
||||
self.root = root_config
|
||||
|
||||
# Get the path to the default Synapse template directory
|
||||
self.default_template_dir = str(
|
||||
importlib_resources.files("synapse").joinpath("res").joinpath("templates")
|
||||
self.default_template_dir = pkg_resources.resource_filename(
|
||||
"synapse", "res/templates"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -590,5 +590,5 @@ class ExperimentalConfig(Config):
|
||||
self.msc4293_enabled: bool = experimental.get("msc4293_enabled", False)
|
||||
|
||||
# MSC4306: Thread Subscriptions
|
||||
# (and MSC4308: Thread Subscriptions extension to Sliding Sync)
|
||||
# (and MSC4308: sliding sync extension for thread subscriptions)
|
||||
self.msc4306_enabled: bool = experimental.get("msc4306_enabled", False)
|
||||
|
||||
@@ -18,13 +18,13 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
import importlib.resources as importlib_resources
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Dict, Iterable, List, Optional, Pattern
|
||||
from urllib import parse as urlparse
|
||||
|
||||
import attr
|
||||
import pkg_resources
|
||||
|
||||
from synapse.types import JsonDict, StrSequence
|
||||
|
||||
@@ -64,12 +64,7 @@ class OembedConfig(Config):
|
||||
"""
|
||||
# Whether to use the packaged providers.json file.
|
||||
if not oembed_config.get("disable_default_providers") or False:
|
||||
path = (
|
||||
importlib_resources.files("synapse")
|
||||
.joinpath("res")
|
||||
.joinpath("providers.json")
|
||||
)
|
||||
with path.open("r", encoding="utf-8") as s:
|
||||
with pkg_resources.resource_stream("synapse", "res/providers.json") as s:
|
||||
providers = json.load(s)
|
||||
|
||||
yield from self._parse_and_validate_provider(
|
||||
|
||||
@@ -120,19 +120,11 @@ def parse_thumbnail_requirements(
|
||||
|
||||
@attr.s(auto_attribs=True, slots=True, frozen=True)
|
||||
class MediaUploadLimit:
|
||||
"""
|
||||
Represents a limit on the amount of data a user can upload in a given time
|
||||
period.
|
||||
|
||||
These can be configured through the `media_upload_limits` [config option](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#media_upload_limits)
|
||||
or via the `get_media_upload_limits_for_user` module API [callback](https://element-hq.github.io/synapse/latest/modules/media_repository_callbacks.html#get_media_upload_limits_for_user).
|
||||
"""
|
||||
"""A limit on the amount of data a user can upload in a given time
|
||||
period."""
|
||||
|
||||
max_bytes: int
|
||||
"""The maximum number of bytes that can be uploaded in the given time period."""
|
||||
|
||||
time_period_ms: int
|
||||
"""The time period in milliseconds."""
|
||||
|
||||
|
||||
class ContentRepositoryConfig(Config):
|
||||
|
||||
@@ -306,12 +306,6 @@ class EventContext(UnpersistedEventContextBase):
|
||||
)
|
||||
|
||||
|
||||
EventPersistencePair = Tuple[EventBase, EventContext]
|
||||
"""
|
||||
The combination of an event to be persisted and its context.
|
||||
"""
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class UnpersistedEventContext(UnpersistedEventContextBase):
|
||||
"""
|
||||
@@ -369,7 +363,7 @@ class UnpersistedEventContext(UnpersistedEventContextBase):
|
||||
room_id: str,
|
||||
last_known_state_group: int,
|
||||
datastore: "StateGroupDataStore",
|
||||
) -> List[EventPersistencePair]:
|
||||
) -> List[Tuple[EventBase, EventContext]]:
|
||||
"""
|
||||
Takes a list of events and their associated unpersisted contexts and persists
|
||||
the unpersisted contexts, returning a list of events and persisted contexts.
|
||||
|
||||
@@ -59,7 +59,7 @@ from synapse.api.errors import (
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventPersistencePair
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.federation.federation_base import (
|
||||
FederationBase,
|
||||
InvalidEventSignatureError,
|
||||
@@ -914,7 +914,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
async def _on_send_membership_event(
|
||||
self, origin: str, content: JsonDict, membership_type: str, room_id: str
|
||||
) -> EventPersistencePair:
|
||||
) -> Tuple[EventBase, EventContext]:
|
||||
"""Handle an on_send_{join,leave,knock} request
|
||||
|
||||
Does some preliminary validation before passing the request on to the
|
||||
|
||||
@@ -37,7 +37,6 @@ Events are replicated via a separate events stream.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict,
|
||||
@@ -68,25 +67,6 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class QueueNames(str, Enum):
|
||||
PRESENCE_MAP = "presence_map"
|
||||
KEYED_EDU = "keyed_edu"
|
||||
KEYED_EDU_CHANGED = "keyed_edu_changed"
|
||||
EDUS = "edus"
|
||||
POS_TIME = "pos_time"
|
||||
PRESENCE_DESTINATIONS = "presence_destinations"
|
||||
|
||||
|
||||
queue_name_to_gauge_map: Dict[QueueNames, LaterGauge] = {}
|
||||
|
||||
for queue_name in QueueNames:
|
||||
queue_name_to_gauge_map[queue_name] = LaterGauge(
|
||||
name=f"synapse_federation_send_queue_{queue_name.value}_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
|
||||
class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
"""A drop in replacement for FederationSender"""
|
||||
|
||||
@@ -131,16 +111,23 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
# we make a new function, so we need to make a new function so the inner
|
||||
# lambda binds to the queue rather than to the name of the queue which
|
||||
# changes. ARGH.
|
||||
def register(queue_name: QueueNames, queue: Sized) -> None:
|
||||
queue_name_to_gauge_map[queue_name].register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(queue)},
|
||||
def register(name: str, queue: Sized) -> None:
|
||||
LaterGauge(
|
||||
name="synapse_federation_send_queue_%s_size" % (queue_name,),
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(queue)},
|
||||
)
|
||||
|
||||
for queue_name in QueueNames:
|
||||
queue = getattr(self, queue_name.value)
|
||||
assert isinstance(queue, Sized)
|
||||
register(queue_name, queue=queue)
|
||||
for queue_name in [
|
||||
"presence_map",
|
||||
"keyed_edu",
|
||||
"keyed_edu_changed",
|
||||
"edus",
|
||||
"pos_time",
|
||||
"presence_destinations",
|
||||
]:
|
||||
register(queue_name, getattr(self, queue_name))
|
||||
|
||||
self.clock.looping_call(self._clear_queue, 30 * 1000)
|
||||
|
||||
|
||||
@@ -150,7 +150,6 @@ from prometheus_client import Counter
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.metrics
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.sender.per_destination_queue import (
|
||||
@@ -200,24 +199,6 @@ sent_pdus_destination_dist_total = Counter(
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
transaction_queue_pending_destinations_gauge = LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_destinations",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
transaction_queue_pending_pdus_gauge = LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_pdus",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
transaction_queue_pending_edus_gauge = LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_edus",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
# Time (in s) to wait before trying to wake up destinations that have
|
||||
# catch-up outstanding.
|
||||
# Please note that rate limiting still applies, so while the loop is
|
||||
@@ -417,9 +398,11 @@ class FederationSender(AbstractFederationSender):
|
||||
# map from destination to PerDestinationQueue
|
||||
self._per_destination_queues: Dict[str, PerDestinationQueue] = {}
|
||||
|
||||
transaction_queue_pending_destinations_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_destinations",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
(self.server_name,): sum(
|
||||
1
|
||||
for d in self._per_destination_queues.values()
|
||||
@@ -427,17 +410,22 @@ class FederationSender(AbstractFederationSender):
|
||||
)
|
||||
},
|
||||
)
|
||||
transaction_queue_pending_pdus_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_pdus",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
(self.server_name,): sum(
|
||||
d.pending_pdu_count() for d in self._per_destination_queues.values()
|
||||
)
|
||||
},
|
||||
)
|
||||
transaction_queue_pending_edus_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_edus",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
(self.server_name,): sum(
|
||||
d.pending_edu_count() for d in self._per_destination_queues.values()
|
||||
)
|
||||
@@ -656,31 +644,6 @@ class FederationSender(AbstractFederationSender):
|
||||
)
|
||||
return
|
||||
|
||||
# If we've rescinded an invite then we want to tell the
|
||||
# other server.
|
||||
if (
|
||||
event.type == EventTypes.Member
|
||||
and event.membership == Membership.LEAVE
|
||||
and event.sender != event.state_key
|
||||
):
|
||||
# We check if this leave event is rescinding an invite
|
||||
# by looking if there is an invite event for the user in
|
||||
# the auth events. It could otherwise be a kick or
|
||||
# unban, which we don't want to send (if the user wasn't
|
||||
# already in the room).
|
||||
auth_events = await self.store.get_events_as_list(
|
||||
event.auth_event_ids()
|
||||
)
|
||||
for auth_event in auth_events:
|
||||
if (
|
||||
auth_event.type == EventTypes.Member
|
||||
and auth_event.state_key == event.state_key
|
||||
and auth_event.membership == Membership.INVITE
|
||||
):
|
||||
destinations = set(destinations)
|
||||
destinations.add(get_domain_from_id(event.state_key))
|
||||
break
|
||||
|
||||
sharded_destinations = {
|
||||
d
|
||||
for d in destinations
|
||||
|
||||
@@ -26,7 +26,7 @@ from synapse.api.constants import EduTypes
|
||||
from synapse.api.errors import HttpResponseException
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction, serialize_and_filter_pdus
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
from synapse.logging.opentracing import (
|
||||
extract_text_map,
|
||||
set_tag,
|
||||
@@ -119,7 +119,7 @@ class TransactionManager:
|
||||
transaction_id=txn_id,
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
pdus=serialize_and_filter_pdus(pdus),
|
||||
pdus=[p.get_pdu_json() for p in pdus],
|
||||
edus=[edu.get_dict() for edu in edus],
|
||||
)
|
||||
|
||||
|
||||
@@ -135,7 +135,7 @@ class PublicRoomList(BaseFederationServlet):
|
||||
if not self.allow_access:
|
||||
raise FederationDeniedError(origin)
|
||||
|
||||
limit: Optional[int] = parse_integer_from_args(query, "limit", 0)
|
||||
limit = parse_integer_from_args(query, "limit", 0)
|
||||
since_token = parse_string_from_args(query, "since", None)
|
||||
include_all_networks = parse_boolean_from_args(
|
||||
query, "include_all_networks", default=False
|
||||
|
||||
@@ -215,9 +215,9 @@ class DelayedEventsHandler:
|
||||
"Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id
|
||||
)
|
||||
|
||||
event = await self._store.get_event(delta.event_id, allow_none=True)
|
||||
if not event:
|
||||
continue
|
||||
event = await self._store.get_event(
|
||||
delta.event_id, check_room_id=delta.room_id
|
||||
)
|
||||
sender = UserID.from_string(event.sender)
|
||||
|
||||
next_send_ts = await self._store.cancel_delayed_state_events(
|
||||
|
||||
@@ -20,22 +20,12 @@
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Mapping, Optional, Tuple
|
||||
|
||||
import attr
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import VerifyKey, decode_verify_key_bytes
|
||||
from signedjson.sign import SignatureVerifyException, verify_signed_json
|
||||
from typing_extensions import TypeAlias
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from twisted.internet import defer
|
||||
@@ -67,50 +57,10 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
ONE_TIME_KEY_UPLOAD = "one_time_key_upload_lock"
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
||||
class DeviceKeys:
|
||||
algorithms: List[str]
|
||||
"""The encryption algorithms supported by this device."""
|
||||
|
||||
device_id: str
|
||||
"""The ID of the device these keys belong to. Must match the device ID used when logging in."""
|
||||
|
||||
keys: Mapping[str, str]
|
||||
"""
|
||||
Public identity keys. The names of the properties should be in the
|
||||
format `<algorithm>:<device_id>`. The keys themselves should be encoded as
|
||||
specified by the key algorithm.
|
||||
"""
|
||||
|
||||
signatures: Mapping[UserID, Mapping[str, str]]
|
||||
"""Signatures for the device key object. A map from user ID, to a map from "<algorithm>:<device_id>" to the signature."""
|
||||
|
||||
user_id: UserID
|
||||
"""The ID of the user the device belongs to. Must match the user ID used when logging in."""
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
||||
class KeyObject:
|
||||
key: str
|
||||
"""The key, encoded using unpadded base64."""
|
||||
|
||||
signatures: Mapping[UserID, Mapping[str, str]]
|
||||
"""Signature for the device. Mapped from user ID to another map of key signing identifier to the signature itself.
|
||||
|
||||
See the following for more detail: https://spec.matrix.org/v1.16/appendices/#signing-details
|
||||
"""
|
||||
|
||||
fallback: bool = False
|
||||
"""Whether this is a fallback key."""
|
||||
|
||||
|
||||
FallbackKeys: TypeAlias = Mapping[str, Union[str, KeyObject]]
|
||||
OneTimeKeys: TypeAlias = Mapping[str, Union[str, KeyObject]]
|
||||
|
||||
|
||||
class E2eKeysHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.config = hs.config
|
||||
@@ -884,12 +834,7 @@ class E2eKeysHandler:
|
||||
|
||||
@tag_args
|
||||
async def upload_keys_for_user(
|
||||
self,
|
||||
user_id: str,
|
||||
device_id: str,
|
||||
device_keys: Optional[DeviceKeys],
|
||||
fallback_keys: Optional[FallbackKeys],
|
||||
one_time_keys: Optional[OneTimeKeys],
|
||||
self, user_id: str, device_id: str, keys: JsonDict
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Args:
|
||||
@@ -903,27 +848,16 @@ class E2eKeysHandler:
|
||||
"""
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
device_keys = keys.get("device_keys", None)
|
||||
if device_keys:
|
||||
# Validate that user_id and device_id match the requesting user
|
||||
if (
|
||||
device_keys.user_id.to_string() == user_id
|
||||
and device_keys.device_id == device_id
|
||||
):
|
||||
await self.upload_device_keys_for_user(
|
||||
user_id,
|
||||
device_id,
|
||||
device_keys,
|
||||
)
|
||||
else:
|
||||
log_kv(
|
||||
{
|
||||
"message": "Not updating device_keys for user, user_id or device_id mismatch",
|
||||
"user_id": user_id,
|
||||
}
|
||||
)
|
||||
else:
|
||||
log_kv({"message": "Did not update device_keys", "reason": "not a dict"})
|
||||
await self.upload_device_keys_for_user(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
keys={"device_keys": device_keys},
|
||||
)
|
||||
|
||||
one_time_keys = keys.get("one_time_keys", None)
|
||||
if one_time_keys:
|
||||
log_kv(
|
||||
{
|
||||
@@ -935,14 +869,14 @@ class E2eKeysHandler:
|
||||
await self._upload_one_time_keys_for_user(
|
||||
user_id, device_id, time_now, one_time_keys
|
||||
)
|
||||
elif one_time_keys:
|
||||
log_kv({"message": "Did not update device_keys", "reason": "not a dict"})
|
||||
else:
|
||||
log_kv(
|
||||
{"message": "Did not update one_time_keys", "reason": "no keys given"}
|
||||
)
|
||||
|
||||
if fallback_keys:
|
||||
fallback_keys = keys.get("fallback_keys") or keys.get(
|
||||
"org.matrix.msc2732.fallback_keys"
|
||||
)
|
||||
if fallback_keys and isinstance(fallback_keys, dict):
|
||||
log_kv(
|
||||
{
|
||||
"message": "Updating fallback_keys for device.",
|
||||
@@ -951,6 +885,8 @@ class E2eKeysHandler:
|
||||
}
|
||||
)
|
||||
await self.store.set_e2e_fallback_keys(user_id, device_id, fallback_keys)
|
||||
elif fallback_keys:
|
||||
log_kv({"message": "Did not update fallback_keys", "reason": "not a dict"})
|
||||
else:
|
||||
log_kv(
|
||||
{"message": "Did not update fallback_keys", "reason": "no keys given"}
|
||||
@@ -963,10 +899,7 @@ class E2eKeysHandler:
|
||||
|
||||
@tag_args
|
||||
async def upload_device_keys_for_user(
|
||||
self,
|
||||
user_id: str,
|
||||
device_id: str,
|
||||
device_keys: DeviceKeys,
|
||||
self, user_id: str, device_id: str, keys: JsonDict
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
@@ -977,6 +910,7 @@ class E2eKeysHandler:
|
||||
"""
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
device_keys = keys["device_keys"]
|
||||
logger.info(
|
||||
"Updating device_keys for device %r for user %s at %d",
|
||||
device_id,
|
||||
@@ -1006,11 +940,7 @@ class E2eKeysHandler:
|
||||
await self.device_handler.check_device_registered(user_id, device_id)
|
||||
|
||||
async def _upload_one_time_keys_for_user(
|
||||
self,
|
||||
user_id: str,
|
||||
device_id: str,
|
||||
time_now: int,
|
||||
one_time_keys: OneTimeKeys,
|
||||
self, user_id: str, device_id: str, time_now: int, one_time_keys: JsonDict
|
||||
) -> None:
|
||||
# We take out a lock so that we don't have to worry about a client
|
||||
# sending duplicate requests.
|
||||
@@ -1797,20 +1727,20 @@ def _exception_to_failure(e: Exception) -> JsonDict:
|
||||
return {"status": 503, "message": str(e)}
|
||||
|
||||
|
||||
def _one_time_keys_match(old_key_json: str, new_key: Union[str, KeyObject]) -> bool:
|
||||
def _one_time_keys_match(old_key_json: str, new_key: JsonDict) -> bool:
|
||||
old_key = json_decoder.decode(old_key_json)
|
||||
|
||||
# if either is a string rather than an object, they must match exactly
|
||||
if isinstance(old_key, str) or isinstance(new_key, str):
|
||||
if not isinstance(old_key, dict) or not isinstance(new_key, dict):
|
||||
return old_key == new_key
|
||||
|
||||
# new_key must be a `KeyObject`
|
||||
# otherwise, we strip off the 'signatures' if any, because it's legitimate
|
||||
# for different upload attempts to have different signatures.
|
||||
old_key.pop("signatures", None)
|
||||
new_key_copy = dict(new_key)
|
||||
new_key_copy.pop("signatures", None)
|
||||
|
||||
# Otherwise, check whether the embedded keys match.
|
||||
#
|
||||
# We ignore signatures, because it's legitimate for different upload
|
||||
# attempts to have different signatures.
|
||||
return old_key["key"] == new_key.key
|
||||
return old_key == new_key_copy
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
|
||||
@@ -66,11 +66,7 @@ from synapse.event_auth import (
|
||||
validate_event_for_room_version,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import (
|
||||
EventContext,
|
||||
EventPersistencePair,
|
||||
UnpersistedEventContextBase,
|
||||
)
|
||||
from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
|
||||
from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo
|
||||
from synapse.logging.context import nested_logging_context
|
||||
from synapse.logging.opentracing import (
|
||||
@@ -248,10 +244,9 @@ class FederationEventHandler:
|
||||
self.room_queues[room_id].append((pdu, origin))
|
||||
return
|
||||
|
||||
# If we're not in the room just ditch the event entirely (and not
|
||||
# invited). This is probably an old server that has come back and thinks
|
||||
# we're still in the room (or we've been rejoined to the room by a state
|
||||
# reset).
|
||||
# If we're not in the room just ditch the event entirely. This is
|
||||
# probably an old server that has come back and thinks we're still in
|
||||
# the room (or we've been rejoined to the room by a state reset).
|
||||
#
|
||||
# Note that if we were never in the room then we would have already
|
||||
# dropped the event, since we wouldn't know the room version.
|
||||
@@ -259,43 +254,6 @@ class FederationEventHandler:
|
||||
room_id, self.server_name
|
||||
)
|
||||
if not is_in_room:
|
||||
# Check if this is a leave event rescinding an invite
|
||||
if (
|
||||
pdu.type == EventTypes.Member
|
||||
and pdu.membership == Membership.LEAVE
|
||||
and pdu.state_key != pdu.sender
|
||||
and self._is_mine_id(pdu.state_key)
|
||||
):
|
||||
(
|
||||
membership,
|
||||
membership_event_id,
|
||||
) = await self._store.get_local_current_membership_for_user_in_room(
|
||||
pdu.state_key, pdu.room_id
|
||||
)
|
||||
if (
|
||||
membership == Membership.INVITE
|
||||
and membership_event_id
|
||||
and membership_event_id
|
||||
in pdu.auth_event_ids() # The invite should be in the auth events of the rescission.
|
||||
):
|
||||
invite_event = await self._store.get_event(
|
||||
membership_event_id, allow_none=True
|
||||
)
|
||||
|
||||
# We cannot fully auth the rescission event, but we can
|
||||
# check if the sender of the leave event is the same as the
|
||||
# invite.
|
||||
#
|
||||
# Technically, a room admin could rescind the invite, but we
|
||||
# have no way of knowing who is and isn't a room admin.
|
||||
if invite_event and pdu.sender == invite_event.sender:
|
||||
# Handle the rescission event
|
||||
pdu.internal_metadata.outlier = True
|
||||
pdu.internal_metadata.out_of_band_membership = True
|
||||
context = EventContext.for_outlier(self._storage_controllers)
|
||||
await self.persist_events_and_notify(room_id, [(pdu, context)])
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Ignoring PDU from %s as we're not in the room",
|
||||
origin,
|
||||
@@ -383,7 +341,7 @@ class FederationEventHandler:
|
||||
|
||||
async def on_send_membership_event(
|
||||
self, origin: str, event: EventBase
|
||||
) -> EventPersistencePair:
|
||||
) -> Tuple[EventBase, EventContext]:
|
||||
"""
|
||||
We have received a join/leave/knock event for a room via send_join/leave/knock.
|
||||
|
||||
@@ -1754,7 +1712,7 @@ class FederationEventHandler:
|
||||
)
|
||||
auth_map.update(persisted_events)
|
||||
|
||||
events_and_contexts_to_persist: List[EventPersistencePair] = []
|
||||
events_and_contexts_to_persist: List[Tuple[EventBase, EventContext]] = []
|
||||
|
||||
async def prep(event: EventBase) -> None:
|
||||
with nested_logging_context(suffix=event.event_id):
|
||||
@@ -2267,7 +2225,7 @@ class FederationEventHandler:
|
||||
async def persist_events_and_notify(
|
||||
self,
|
||||
room_id: str,
|
||||
event_and_contexts: Sequence[EventPersistencePair],
|
||||
event_and_contexts: Sequence[Tuple[EventBase, EventContext]],
|
||||
backfilled: bool = False,
|
||||
) -> int:
|
||||
"""Persists events and tells the notifier/pushers about them, if
|
||||
|
||||
@@ -57,7 +57,6 @@ from synapse.events import EventBase, relation_from_event
|
||||
from synapse.events.builder import EventBuilder
|
||||
from synapse.events.snapshot import (
|
||||
EventContext,
|
||||
EventPersistencePair,
|
||||
UnpersistedEventContext,
|
||||
UnpersistedEventContextBase,
|
||||
)
|
||||
@@ -1440,7 +1439,7 @@ class EventCreationHandler:
|
||||
async def handle_new_client_event(
|
||||
self,
|
||||
requester: Requester,
|
||||
events_and_context: List[EventPersistencePair],
|
||||
events_and_context: List[Tuple[EventBase, EventContext]],
|
||||
ratelimit: bool = True,
|
||||
extra_users: Optional[List[UserID]] = None,
|
||||
ignore_shadow_ban: bool = False,
|
||||
@@ -1652,7 +1651,7 @@ class EventCreationHandler:
|
||||
async def _persist_events(
|
||||
self,
|
||||
requester: Requester,
|
||||
events_and_context: List[EventPersistencePair],
|
||||
events_and_context: List[Tuple[EventBase, EventContext]],
|
||||
ratelimit: bool = True,
|
||||
extra_users: Optional[List[UserID]] = None,
|
||||
) -> EventBase:
|
||||
@@ -1738,7 +1737,7 @@ class EventCreationHandler:
|
||||
raise
|
||||
|
||||
async def cache_joined_hosts_for_events(
|
||||
self, events_and_context: List[EventPersistencePair]
|
||||
self, events_and_context: List[Tuple[EventBase, EventContext]]
|
||||
) -> None:
|
||||
"""Precalculate the joined hosts at each of the given events, when using Redis, so that
|
||||
external federation senders don't have to recalculate it themselves.
|
||||
@@ -1844,7 +1843,7 @@ class EventCreationHandler:
|
||||
async def persist_and_notify_client_events(
|
||||
self,
|
||||
requester: Requester,
|
||||
events_and_context: List[EventPersistencePair],
|
||||
events_and_context: List[Tuple[EventBase, EventContext]],
|
||||
ratelimit: bool = True,
|
||||
extra_users: Optional[List[UserID]] = None,
|
||||
) -> EventBase:
|
||||
|
||||
@@ -173,18 +173,6 @@ state_transition_counter = Counter(
|
||||
labelnames=["locality", "from", "to", SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
presence_user_to_current_state_size_gauge = LaterGauge(
|
||||
name="synapse_handlers_presence_user_to_current_state_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
presence_wheel_timer_size_gauge = LaterGauge(
|
||||
name="synapse_handlers_presence_wheel_timer_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
|
||||
# "currently_active"
|
||||
LAST_ACTIVE_GRANULARITY = 60 * 1000
|
||||
@@ -791,9 +779,11 @@ class PresenceHandler(BasePresenceHandler):
|
||||
EduTypes.PRESENCE, self.incoming_presence
|
||||
)
|
||||
|
||||
presence_user_to_current_state_size_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(self.user_to_current_state)},
|
||||
LaterGauge(
|
||||
name="synapse_handlers_presence_user_to_current_state_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(self.user_to_current_state)},
|
||||
)
|
||||
|
||||
# The per-device presence state, maps user to devices to per-device presence state.
|
||||
@@ -892,9 +882,11 @@ class PresenceHandler(BasePresenceHandler):
|
||||
60 * 1000,
|
||||
)
|
||||
|
||||
presence_wheel_timer_size_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(self.wheel_timer)},
|
||||
LaterGauge(
|
||||
name="synapse_handlers_presence_wheel_timer_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(self.wheel_timer)},
|
||||
)
|
||||
|
||||
# Used to handle sending of presence to newly joined users/servers
|
||||
|
||||
@@ -211,7 +211,7 @@ class SlidingSyncHandler:
|
||||
|
||||
Args:
|
||||
sync_config: Sync configuration
|
||||
to_token: The latest point in the stream to sync up to.
|
||||
to_token: The point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from. Token of the end of the
|
||||
previous batch. May be `None` if this is the initial sync request.
|
||||
"""
|
||||
|
||||
@@ -27,7 +27,7 @@ from typing import (
|
||||
cast,
|
||||
)
|
||||
|
||||
from typing_extensions import TypeAlias, assert_never
|
||||
from typing_extensions import assert_never
|
||||
|
||||
from synapse.api.constants import AccountDataTypes, EduTypes
|
||||
from synapse.handlers.receipts import ReceiptEventSource
|
||||
@@ -40,7 +40,6 @@ from synapse.types import (
|
||||
SlidingSyncStreamToken,
|
||||
StrCollection,
|
||||
StreamToken,
|
||||
ThreadSubscriptionsToken,
|
||||
)
|
||||
from synapse.types.handlers.sliding_sync import (
|
||||
HaveSentRoomFlag,
|
||||
@@ -55,13 +54,6 @@ from synapse.util.async_helpers import (
|
||||
gather_optional_coroutines,
|
||||
)
|
||||
|
||||
_ThreadSubscription: TypeAlias = (
|
||||
SlidingSyncResult.Extensions.ThreadSubscriptionsExtension.ThreadSubscription
|
||||
)
|
||||
_ThreadUnsubscription: TypeAlias = (
|
||||
SlidingSyncResult.Extensions.ThreadSubscriptionsExtension.ThreadUnsubscription
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
@@ -76,7 +68,6 @@ class SlidingSyncExtensionHandler:
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.push_rules_handler = hs.get_push_rules_handler()
|
||||
self._enable_thread_subscriptions = hs.config.experimental.msc4306_enabled
|
||||
|
||||
@trace
|
||||
async def get_extensions_response(
|
||||
@@ -102,7 +93,7 @@ class SlidingSyncExtensionHandler:
|
||||
actual_room_ids: The actual room IDs in the the Sliding Sync response.
|
||||
actual_room_response_map: A map of room ID to room results in the the
|
||||
Sliding Sync response.
|
||||
to_token: The latest point in the stream to sync up to.
|
||||
to_token: The point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from.
|
||||
"""
|
||||
|
||||
@@ -165,32 +156,18 @@ class SlidingSyncExtensionHandler:
|
||||
from_token=from_token,
|
||||
)
|
||||
|
||||
thread_subs_coro = None
|
||||
if (
|
||||
sync_config.extensions.thread_subscriptions is not None
|
||||
and self._enable_thread_subscriptions
|
||||
):
|
||||
thread_subs_coro = self.get_thread_subscriptions_extension_response(
|
||||
sync_config=sync_config,
|
||||
thread_subscriptions_request=sync_config.extensions.thread_subscriptions,
|
||||
to_token=to_token,
|
||||
from_token=from_token,
|
||||
)
|
||||
|
||||
(
|
||||
to_device_response,
|
||||
e2ee_response,
|
||||
account_data_response,
|
||||
receipts_response,
|
||||
typing_response,
|
||||
thread_subs_response,
|
||||
) = await gather_optional_coroutines(
|
||||
to_device_coro,
|
||||
e2ee_coro,
|
||||
account_data_coro,
|
||||
receipts_coro,
|
||||
typing_coro,
|
||||
thread_subs_coro,
|
||||
)
|
||||
|
||||
return SlidingSyncResult.Extensions(
|
||||
@@ -199,7 +176,6 @@ class SlidingSyncExtensionHandler:
|
||||
account_data=account_data_response,
|
||||
receipts=receipts_response,
|
||||
typing=typing_response,
|
||||
thread_subscriptions=thread_subs_response,
|
||||
)
|
||||
|
||||
def find_relevant_room_ids_for_extension(
|
||||
@@ -901,72 +877,3 @@ class SlidingSyncExtensionHandler:
|
||||
return SlidingSyncResult.Extensions.TypingExtension(
|
||||
room_id_to_typing_map=room_id_to_typing_map,
|
||||
)
|
||||
|
||||
async def get_thread_subscriptions_extension_response(
|
||||
self,
|
||||
sync_config: SlidingSyncConfig,
|
||||
thread_subscriptions_request: SlidingSyncConfig.Extensions.ThreadSubscriptionsExtension,
|
||||
to_token: StreamToken,
|
||||
from_token: Optional[SlidingSyncStreamToken],
|
||||
) -> Optional[SlidingSyncResult.Extensions.ThreadSubscriptionsExtension]:
|
||||
"""Handle Thread Subscriptions extension (MSC4308)
|
||||
|
||||
Args:
|
||||
sync_config: Sync configuration
|
||||
thread_subscriptions_request: The thread_subscriptions extension from the request
|
||||
to_token: The point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from.
|
||||
|
||||
Returns:
|
||||
the response (None if empty or thread subscriptions are disabled)
|
||||
"""
|
||||
if not thread_subscriptions_request.enabled:
|
||||
return None
|
||||
|
||||
limit = thread_subscriptions_request.limit
|
||||
|
||||
if from_token:
|
||||
from_stream_id = from_token.stream_token.thread_subscriptions_key
|
||||
else:
|
||||
from_stream_id = StreamToken.START.thread_subscriptions_key
|
||||
|
||||
to_stream_id = to_token.thread_subscriptions_key
|
||||
|
||||
updates = await self.store.get_latest_updated_thread_subscriptions_for_user(
|
||||
user_id=sync_config.user.to_string(),
|
||||
from_id=from_stream_id,
|
||||
to_id=to_stream_id,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
if len(updates) == 0:
|
||||
return None
|
||||
|
||||
subscribed_threads: Dict[str, Dict[str, _ThreadSubscription]] = {}
|
||||
unsubscribed_threads: Dict[str, Dict[str, _ThreadUnsubscription]] = {}
|
||||
for stream_id, room_id, thread_root_id, subscribed, automatic in updates:
|
||||
if subscribed:
|
||||
subscribed_threads.setdefault(room_id, {})[thread_root_id] = (
|
||||
_ThreadSubscription(
|
||||
automatic=automatic,
|
||||
bump_stamp=stream_id,
|
||||
)
|
||||
)
|
||||
else:
|
||||
unsubscribed_threads.setdefault(room_id, {})[thread_root_id] = (
|
||||
_ThreadUnsubscription(bump_stamp=stream_id)
|
||||
)
|
||||
|
||||
prev_batch = None
|
||||
if len(updates) == limit:
|
||||
# Tell the client about a potential gap where there may be more
|
||||
# thread subscriptions for it to backpaginate.
|
||||
# We subtract one because the 'later in the stream' bound is inclusive,
|
||||
# and we already saw the element at index 0.
|
||||
prev_batch = ThreadSubscriptionsToken(updates[0][0] - 1)
|
||||
|
||||
return SlidingSyncResult.Extensions.ThreadSubscriptionsExtension(
|
||||
subscribed=subscribed_threads,
|
||||
unsubscribed=unsubscribed_threads,
|
||||
prev_batch=prev_batch,
|
||||
)
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#
|
||||
import itertools
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
AbstractSet,
|
||||
@@ -27,11 +28,14 @@ from typing import (
|
||||
Dict,
|
||||
FrozenSet,
|
||||
List,
|
||||
Literal,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
overload,
|
||||
)
|
||||
|
||||
import attr
|
||||
@@ -116,6 +120,25 @@ LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE = 100
|
||||
SyncRequestKey = Tuple[Any, ...]
|
||||
|
||||
|
||||
class SyncVersion(Enum):
|
||||
"""
|
||||
Enum for specifying the version of sync request. This is used to key which type of
|
||||
sync response that we are generating.
|
||||
|
||||
This is different than the `sync_type` you might see used in other code below; which
|
||||
specifies the sub-type sync request (e.g. initial_sync, full_state_sync,
|
||||
incremental_sync) and is really only relevant for the `/sync` v2 endpoint.
|
||||
"""
|
||||
|
||||
# These string values are semantically significant because they are used in the the
|
||||
# metrics
|
||||
|
||||
# Traditional `/sync` endpoint
|
||||
SYNC_V2 = "sync_v2"
|
||||
# Part of MSC3575 Sliding Sync
|
||||
E2EE_SYNC = "e2ee_sync"
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class SyncConfig:
|
||||
user: UserID
|
||||
@@ -285,6 +308,26 @@ class SyncResult:
|
||||
)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class E2eeSyncResult:
|
||||
"""
|
||||
Attributes:
|
||||
next_batch: Token for the next sync
|
||||
to_device: List of direct messages for the device.
|
||||
device_lists: List of user_ids whose devices have changed
|
||||
device_one_time_keys_count: Dict of algorithm to count for one time keys
|
||||
for this device
|
||||
device_unused_fallback_key_types: List of key types that have an unused fallback
|
||||
key
|
||||
"""
|
||||
|
||||
next_batch: StreamToken
|
||||
to_device: List[JsonDict]
|
||||
device_lists: DeviceListUpdates
|
||||
device_one_time_keys_count: JsonMapping
|
||||
device_unused_fallback_key_types: List[str]
|
||||
|
||||
|
||||
class SyncHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname
|
||||
@@ -330,15 +373,52 @@ class SyncHandler:
|
||||
|
||||
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
|
||||
|
||||
@overload
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.SYNC_V2],
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> SyncResult:
|
||||
) -> SyncResult: ...
|
||||
|
||||
@overload
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.E2EE_SYNC],
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> E2eeSyncResult: ...
|
||||
|
||||
@overload
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> Union[SyncResult, E2eeSyncResult]: ...
|
||||
|
||||
async def wait_for_sync_for_user(
|
||||
self,
|
||||
requester: Requester,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
request_key: SyncRequestKey,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
timeout: int = 0,
|
||||
full_state: bool = False,
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
"""Get the sync for a client if we have new data for it now. Otherwise
|
||||
wait for new data to arrive on the server. If the timeout expires, then
|
||||
return an empty sync result.
|
||||
@@ -353,7 +433,8 @@ class SyncHandler:
|
||||
full_state: Whether to return the full state for each room.
|
||||
|
||||
Returns:
|
||||
returns a full `SyncResult`.
|
||||
When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
|
||||
When `SyncVersion.E2EE_SYNC`, returns a `E2eeSyncResult`.
|
||||
"""
|
||||
# If the user is not part of the mau group, then check that limits have
|
||||
# not been exceeded (if not part of the group by this point, almost certain
|
||||
@@ -365,6 +446,7 @@ class SyncHandler:
|
||||
request_key,
|
||||
self._wait_for_sync_for_user,
|
||||
sync_config,
|
||||
sync_version,
|
||||
since_token,
|
||||
timeout,
|
||||
full_state,
|
||||
@@ -373,14 +455,48 @@ class SyncHandler:
|
||||
logger.debug("Returning sync response for %s", user_id)
|
||||
return res
|
||||
|
||||
@overload
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.SYNC_V2],
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> SyncResult:
|
||||
) -> SyncResult: ...
|
||||
|
||||
@overload
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.E2EE_SYNC],
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> E2eeSyncResult: ...
|
||||
|
||||
@overload
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> Union[SyncResult, E2eeSyncResult]: ...
|
||||
|
||||
async def _wait_for_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken],
|
||||
timeout: int,
|
||||
full_state: bool,
|
||||
cache_context: ResponseCacheContext[SyncRequestKey],
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
"""The start of the machinery that produces a /sync response.
|
||||
|
||||
See https://spec.matrix.org/v1.1/client-server-api/#syncing for full details.
|
||||
@@ -401,7 +517,7 @@ class SyncHandler:
|
||||
else:
|
||||
sync_type = "incremental_sync"
|
||||
|
||||
sync_label = f"sync_v2:{sync_type}"
|
||||
sync_label = f"{sync_version}:{sync_type}"
|
||||
|
||||
context = current_context()
|
||||
if context:
|
||||
@@ -462,15 +578,19 @@ class SyncHandler:
|
||||
if timeout == 0 or since_token is None or full_state:
|
||||
# we are going to return immediately, so don't bother calling
|
||||
# notifier.wait_for_events.
|
||||
result = await self.current_sync_for_user(
|
||||
sync_config, since_token, full_state=full_state
|
||||
result: Union[
|
||||
SyncResult, E2eeSyncResult
|
||||
] = await self.current_sync_for_user(
|
||||
sync_config, sync_version, since_token, full_state=full_state
|
||||
)
|
||||
else:
|
||||
# Otherwise, we wait for something to happen and report it to the user.
|
||||
async def current_sync_callback(
|
||||
before_token: StreamToken, after_token: StreamToken
|
||||
) -> SyncResult:
|
||||
return await self.current_sync_for_user(sync_config, since_token)
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
return await self.current_sync_for_user(
|
||||
sync_config, sync_version, since_token
|
||||
)
|
||||
|
||||
result = await self.notifier.wait_for_events(
|
||||
sync_config.user.to_string(),
|
||||
@@ -503,15 +623,43 @@ class SyncHandler:
|
||||
|
||||
return result
|
||||
|
||||
@overload
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.SYNC_V2],
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> SyncResult:
|
||||
) -> SyncResult: ...
|
||||
|
||||
@overload
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: Literal[SyncVersion.E2EE_SYNC],
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> E2eeSyncResult: ...
|
||||
|
||||
@overload
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> Union[SyncResult, E2eeSyncResult]: ...
|
||||
|
||||
async def current_sync_for_user(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
sync_version: SyncVersion,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
full_state: bool = False,
|
||||
) -> Union[SyncResult, E2eeSyncResult]:
|
||||
"""
|
||||
Generates the response body of a sync result, represented as a
|
||||
`SyncResult`.
|
||||
`SyncResult`/`E2eeSyncResult`.
|
||||
|
||||
This is a wrapper around `generate_sync_result` which starts an open tracing
|
||||
span to track the sync. See `generate_sync_result` for the next part of your
|
||||
@@ -524,15 +672,28 @@ class SyncHandler:
|
||||
full_state: Whether to return the full state for each room.
|
||||
|
||||
Returns:
|
||||
returns a full `SyncResult`.
|
||||
When `SyncVersion.SYNC_V2`, returns a full `SyncResult`.
|
||||
When `SyncVersion.E2EE_SYNC`, returns a `E2eeSyncResult`.
|
||||
"""
|
||||
with start_active_span("sync.current_sync_for_user"):
|
||||
log_kv({"since_token": since_token})
|
||||
|
||||
# Go through the `/sync` v2 path
|
||||
sync_result = await self.generate_sync_result(
|
||||
sync_config, since_token, full_state
|
||||
)
|
||||
if sync_version == SyncVersion.SYNC_V2:
|
||||
sync_result: Union[
|
||||
SyncResult, E2eeSyncResult
|
||||
] = await self.generate_sync_result(
|
||||
sync_config, since_token, full_state
|
||||
)
|
||||
# Go through the MSC3575 Sliding Sync `/sync/e2ee` path
|
||||
elif sync_version == SyncVersion.E2EE_SYNC:
|
||||
sync_result = await self.generate_e2ee_sync_result(
|
||||
sync_config, since_token
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
f"Unknown sync_version (this is a Synapse problem): {sync_version}"
|
||||
)
|
||||
|
||||
set_tag(SynapseTags.SYNC_RESULT, bool(sync_result))
|
||||
return sync_result
|
||||
@@ -1807,6 +1968,102 @@ class SyncHandler:
|
||||
next_batch=sync_result_builder.now_token,
|
||||
)
|
||||
|
||||
async def generate_e2ee_sync_result(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
) -> E2eeSyncResult:
|
||||
"""
|
||||
Generates the response body of a MSC3575 Sliding Sync `/sync/e2ee` result.
|
||||
|
||||
This is represented by a `E2eeSyncResult` struct, which is built from small
|
||||
pieces using a `SyncResultBuilder`. The `sync_result_builder` is passed as a
|
||||
mutable ("inout") parameter to various helper functions. These retrieve and
|
||||
process the data which forms the sync body, often writing to the
|
||||
`sync_result_builder` to store their output.
|
||||
|
||||
At the end, we transfer data from the `sync_result_builder` to a new `E2eeSyncResult`
|
||||
instance to signify that the sync calculation is complete.
|
||||
"""
|
||||
user_id = sync_config.user.to_string()
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service:
|
||||
# We no longer support AS users using /sync directly.
|
||||
# See https://github.com/matrix-org/matrix-doc/issues/1144
|
||||
raise NotImplementedError()
|
||||
|
||||
sync_result_builder = await self.get_sync_result_builder(
|
||||
sync_config,
|
||||
since_token,
|
||||
full_state=False,
|
||||
)
|
||||
|
||||
# 1. Calculate `to_device` events
|
||||
await self._generate_sync_entry_for_to_device(sync_result_builder)
|
||||
|
||||
# 2. Calculate `device_lists`
|
||||
# Device list updates are sent if a since token is provided.
|
||||
device_lists = DeviceListUpdates()
|
||||
include_device_list_updates = bool(since_token and since_token.device_list_key)
|
||||
if include_device_list_updates:
|
||||
# Note that _generate_sync_entry_for_rooms sets sync_result_builder.joined, which
|
||||
# is used in calculate_user_changes below.
|
||||
#
|
||||
# TODO: Running `_generate_sync_entry_for_rooms()` is a lot of work just to
|
||||
# figure out the membership changes/derived info needed for
|
||||
# `_generate_sync_entry_for_device_list()`. In the future, we should try to
|
||||
# refactor this away.
|
||||
(
|
||||
newly_joined_rooms,
|
||||
newly_left_rooms,
|
||||
) = await self._generate_sync_entry_for_rooms(sync_result_builder)
|
||||
|
||||
# This uses the sync_result_builder.joined which is set in
|
||||
# `_generate_sync_entry_for_rooms`, if that didn't find any joined
|
||||
# rooms for some reason it is a no-op.
|
||||
(
|
||||
newly_joined_or_invited_or_knocked_users,
|
||||
newly_left_users,
|
||||
) = sync_result_builder.calculate_user_changes()
|
||||
|
||||
# include_device_list_updates can only be True if we have a
|
||||
# since token.
|
||||
assert since_token is not None
|
||||
device_lists = await self._device_handler.generate_sync_entry_for_device_list(
|
||||
user_id=user_id,
|
||||
since_token=since_token,
|
||||
now_token=sync_result_builder.now_token,
|
||||
joined_room_ids=sync_result_builder.joined_room_ids,
|
||||
newly_joined_rooms=newly_joined_rooms,
|
||||
newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
|
||||
newly_left_rooms=newly_left_rooms,
|
||||
newly_left_users=newly_left_users,
|
||||
)
|
||||
|
||||
# 3. Calculate `device_one_time_keys_count` and `device_unused_fallback_key_types`
|
||||
device_id = sync_config.device_id
|
||||
one_time_keys_count: JsonMapping = {}
|
||||
unused_fallback_key_types: List[str] = []
|
||||
if device_id:
|
||||
# TODO: We should have a way to let clients differentiate between the states of:
|
||||
# * no change in OTK count since the provided since token
|
||||
# * the server has zero OTKs left for this device
|
||||
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
|
||||
one_time_keys_count = await self.store.count_e2e_one_time_keys(
|
||||
user_id, device_id
|
||||
)
|
||||
unused_fallback_key_types = list(
|
||||
await self.store.get_e2e_unused_fallback_key_types(user_id, device_id)
|
||||
)
|
||||
|
||||
return E2eeSyncResult(
|
||||
to_device=sync_result_builder.to_device,
|
||||
device_lists=device_lists,
|
||||
device_one_time_keys_count=one_time_keys_count,
|
||||
device_unused_fallback_key_types=unused_fallback_key_types,
|
||||
next_batch=sync_result_builder.now_token,
|
||||
)
|
||||
|
||||
async def get_sync_result_builder(
|
||||
self,
|
||||
sync_config: SyncConfig,
|
||||
|
||||
@@ -9,7 +9,7 @@ from synapse.storage.databases.main.thread_subscriptions import (
|
||||
AutomaticSubscriptionConflicted,
|
||||
ThreadSubscription,
|
||||
)
|
||||
from synapse.types import EventOrderings, StreamKeyType, UserID
|
||||
from synapse.types import EventOrderings, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -22,7 +22,6 @@ class ThreadSubscriptionsHandler:
|
||||
self.store = hs.get_datastores().main
|
||||
self.event_handler = hs.get_event_handler()
|
||||
self.auth = hs.get_auth()
|
||||
self._notifier = hs.get_notifier()
|
||||
|
||||
async def get_thread_subscription_settings(
|
||||
self,
|
||||
@@ -133,15 +132,6 @@ class ThreadSubscriptionsHandler:
|
||||
errcode=Codes.MSC4306_CONFLICTING_UNSUBSCRIPTION,
|
||||
)
|
||||
|
||||
if outcome is not None:
|
||||
# wake up user streams (e.g. sliding sync) on the same worker
|
||||
self._notifier.on_new_event(
|
||||
StreamKeyType.THREAD_SUBSCRIPTIONS,
|
||||
# outcome is a stream_id
|
||||
outcome,
|
||||
users=[user_id.to_string()],
|
||||
)
|
||||
|
||||
return outcome
|
||||
|
||||
async def unsubscribe_user_from_thread(
|
||||
@@ -172,19 +162,8 @@ class ThreadSubscriptionsHandler:
|
||||
logger.info("rejecting thread subscriptions change (thread not accessible)")
|
||||
raise NotFoundError("No such thread root")
|
||||
|
||||
outcome = await self.store.unsubscribe_user_from_thread(
|
||||
return await self.store.unsubscribe_user_from_thread(
|
||||
user_id.to_string(),
|
||||
event.room_id,
|
||||
thread_root_event_id,
|
||||
)
|
||||
|
||||
if outcome is not None:
|
||||
# wake up user streams (e.g. sliding sync) on the same worker
|
||||
self._notifier.on_new_event(
|
||||
StreamKeyType.THREAD_SUBSCRIPTIONS,
|
||||
# outcome is a stream_id
|
||||
outcome,
|
||||
users=[user_id.to_string()],
|
||||
)
|
||||
|
||||
return outcome
|
||||
|
||||
@@ -164,13 +164,11 @@ def _get_in_flight_counts() -> Mapping[Tuple[str, ...], int]:
|
||||
return counts
|
||||
|
||||
|
||||
in_flight_requests = LaterGauge(
|
||||
LaterGauge(
|
||||
name="synapse_http_server_in_flight_requests_count",
|
||||
desc="",
|
||||
labelnames=["method", "servlet", SERVER_NAME_LABEL],
|
||||
)
|
||||
in_flight_requests.register_hook(
|
||||
homeserver_instance_id=None, hook=_get_in_flight_counts
|
||||
caller=_get_in_flight_counts,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -702,10 +702,6 @@ class _ByteProducer:
|
||||
self._request: Optional[Request] = request
|
||||
self._iterator = iterator
|
||||
self._paused = False
|
||||
self.tracing_scope = start_active_span(
|
||||
"write_bytes_to_request",
|
||||
)
|
||||
self.tracing_scope.__enter__()
|
||||
|
||||
try:
|
||||
self._request.registerProducer(self, True)
|
||||
@@ -716,8 +712,8 @@ class _ByteProducer:
|
||||
logger.info("Connection disconnected before response was written: %r", e)
|
||||
|
||||
# We drop our references to data we'll not use.
|
||||
self._request = None
|
||||
self._iterator = iter(())
|
||||
self.tracing_scope.__exit__(type(e), None, e.__traceback__)
|
||||
else:
|
||||
# Start producing if `registerProducer` was successful
|
||||
self.resumeProducing()
|
||||
@@ -731,9 +727,6 @@ class _ByteProducer:
|
||||
self._request.write(b"".join(data))
|
||||
|
||||
def pauseProducing(self) -> None:
|
||||
opentracing_span = active_span()
|
||||
if opentracing_span is not None:
|
||||
opentracing_span.log_kv({"event": "producer_paused"})
|
||||
self._paused = True
|
||||
|
||||
def resumeProducing(self) -> None:
|
||||
@@ -744,10 +737,6 @@ class _ByteProducer:
|
||||
|
||||
self._paused = False
|
||||
|
||||
opentracing_span = active_span()
|
||||
if opentracing_span is not None:
|
||||
opentracing_span.log_kv({"event": "producer_resumed"})
|
||||
|
||||
# Write until there's backpressure telling us to stop.
|
||||
while not self._paused:
|
||||
# Get the next chunk and write it to the request.
|
||||
@@ -782,7 +771,6 @@ class _ByteProducer:
|
||||
def stopProducing(self) -> None:
|
||||
# Clear a circular reference.
|
||||
self._request = None
|
||||
self.tracing_scope.__exit__(None, None, None)
|
||||
|
||||
|
||||
def _encode_json_bytes(json_object: object) -> bytes:
|
||||
@@ -925,9 +913,8 @@ def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None:
|
||||
# once (via `Request.write`) is that doing so starts the timeout for the
|
||||
# next request to be received: so if it takes longer than 60s to stream back
|
||||
# the response to the client, the client never gets it.
|
||||
# c.f https://github.com/twisted/twisted/issues/12498
|
||||
#
|
||||
# One workaround is to use a `Producer`; then the timeout is only
|
||||
# The correct solution is to use a Producer; then the timeout is only
|
||||
# started once all of the content is sent over the TCP connection.
|
||||
|
||||
# To make sure we don't write all of the bytes at once we split it up into
|
||||
|
||||
@@ -130,16 +130,6 @@ def parse_integer(
|
||||
return parse_integer_from_args(args, name, default, required, negative)
|
||||
|
||||
|
||||
@overload
|
||||
def parse_integer_from_args(
|
||||
args: Mapping[bytes, Sequence[bytes]],
|
||||
name: str,
|
||||
default: int,
|
||||
required: Literal[False] = False,
|
||||
negative: bool = False,
|
||||
) -> int: ...
|
||||
|
||||
|
||||
@overload
|
||||
def parse_integer_from_args(
|
||||
args: Mapping[bytes, Sequence[bytes]],
|
||||
|
||||
@@ -34,6 +34,7 @@ import logging
|
||||
import threading
|
||||
import typing
|
||||
import warnings
|
||||
from contextvars import ContextVar
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -56,6 +57,7 @@ from twisted.internet import defer, threads
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.logging.scopecontextmanager import _LogContextScope
|
||||
from synapse.types import ISynapseReactor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -227,24 +229,16 @@ LoggingContextOrSentinel = Union["LoggingContext", "_Sentinel"]
|
||||
|
||||
|
||||
class _Sentinel:
|
||||
"""
|
||||
Sentinel to represent the root context
|
||||
"""Sentinel to represent the root context"""
|
||||
|
||||
This should only be used for tasks outside of Synapse like when we yield control
|
||||
back to the Twisted reactor (event loop) so we don't leak the current logging
|
||||
context to other tasks that are scheduled next in the event loop.
|
||||
|
||||
Nothing from the Synapse homeserver should be logged with the sentinel context. i.e.
|
||||
we should always know which server the logs are coming from.
|
||||
"""
|
||||
|
||||
__slots__ = ["previous_context", "finished", "request", "tag"]
|
||||
__slots__ = ["previous_context", "finished", "request", "scope", "tag"]
|
||||
|
||||
def __init__(self) -> None:
|
||||
# Minimal set for compatibility with LoggingContext
|
||||
self.previous_context = None
|
||||
self.finished = False
|
||||
self.request = None
|
||||
self.scope = None
|
||||
self.tag = None
|
||||
|
||||
def __str__(self) -> str:
|
||||
@@ -297,6 +291,7 @@ class LoggingContext:
|
||||
"finished",
|
||||
"request",
|
||||
"tag",
|
||||
"scope",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
@@ -317,6 +312,7 @@ class LoggingContext:
|
||||
self.main_thread = get_thread_id()
|
||||
self.request = None
|
||||
self.tag = ""
|
||||
self.scope: Optional["_LogContextScope"] = None
|
||||
|
||||
# keep track of whether we have hit the __exit__ block for this context
|
||||
# (suggesting that the the thing that created the context thinks it should
|
||||
@@ -329,6 +325,9 @@ class LoggingContext:
|
||||
# we track the current request_id
|
||||
self.request = self.parent_context.request
|
||||
|
||||
# we also track the current scope:
|
||||
self.scope = self.parent_context.scope
|
||||
|
||||
if request is not None:
|
||||
# the request param overrides the request from the parent context
|
||||
self.request = request
|
||||
@@ -625,17 +624,9 @@ class LoggingContextFilter(logging.Filter):
|
||||
|
||||
|
||||
class PreserveLoggingContext:
|
||||
"""
|
||||
Context manager which replaces the logging context
|
||||
"""Context manager which replaces the logging context
|
||||
|
||||
The previous logging context is restored on exit.
|
||||
|
||||
`make_deferred_yieldable` is pretty equivalent to using `with
|
||||
PreserveLoggingContext():` (using the default sentinel context), i.e. it clears the
|
||||
logcontext before awaiting (and so before execution passes back to the reactor) and
|
||||
restores the old context once the awaitable completes (execution passes from the
|
||||
reactor back to the code).
|
||||
"""
|
||||
The previous logging context is restored on exit."""
|
||||
|
||||
__slots__ = ["_old_context", "_new_context"]
|
||||
|
||||
@@ -670,13 +661,12 @@ class PreserveLoggingContext:
|
||||
)
|
||||
|
||||
|
||||
_thread_local = threading.local()
|
||||
_thread_local.current_context = SENTINEL_CONTEXT
|
||||
_current_context: ContextVar[LoggingContextOrSentinel] = ContextVar("current_context")
|
||||
|
||||
|
||||
def current_context() -> LoggingContextOrSentinel:
|
||||
"""Get the current logging context from thread local storage"""
|
||||
return getattr(_thread_local, "current_context", SENTINEL_CONTEXT)
|
||||
return _current_context.get(SENTINEL_CONTEXT)
|
||||
|
||||
|
||||
def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel:
|
||||
@@ -697,7 +687,7 @@ def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSe
|
||||
if current is not context:
|
||||
rusage = get_thread_resource_usage()
|
||||
current.stop(rusage)
|
||||
_thread_local.current_context = context
|
||||
_current_context.set(context)
|
||||
context.start(rusage)
|
||||
|
||||
return current
|
||||
@@ -801,15 +791,6 @@ def run_in_background(
|
||||
return from the function, and that the sentinel context is set once the
|
||||
deferred returned by the function completes.
|
||||
|
||||
To explain how the log contexts work here:
|
||||
- When `run_in_background` is called, the current context is stored ("original"),
|
||||
we kick off the background task in the current context, and we restore that
|
||||
original context before returning
|
||||
- When the background task finishes, we don't want to leak our context into the
|
||||
reactor which would erroneously get attached to the next operation picked up by
|
||||
the event loop. We add a callback to the deferred which will clear the logging
|
||||
context after it finishes and yields control back to the reactor.
|
||||
|
||||
Useful for wrapping functions that return a deferred or coroutine, which you don't
|
||||
yield or await on (for instance because you want to pass it to
|
||||
deferred.gatherResults()).
|
||||
@@ -821,15 +802,8 @@ def run_in_background(
|
||||
`f` doesn't raise any deferred exceptions, otherwise a scary-looking
|
||||
CRITICAL error about an unhandled error will be logged without much
|
||||
indication about where it came from.
|
||||
|
||||
Returns:
|
||||
Deferred which returns the result of func, or `None` if func raises.
|
||||
Note that the returned Deferred does not follow the synapse logcontext
|
||||
rules.
|
||||
"""
|
||||
calling_context = current_context()
|
||||
try:
|
||||
# (kick off the task in the current context)
|
||||
res = f(*args, **kwargs)
|
||||
except Exception:
|
||||
# the assumption here is that the caller doesn't want to be disturbed
|
||||
@@ -838,9 +812,6 @@ def run_in_background(
|
||||
|
||||
# `res` may be a coroutine, `Deferred`, some other kind of awaitable, or a plain
|
||||
# value. Convert it to a `Deferred`.
|
||||
#
|
||||
# Wrapping the value in a deferred has the side effect of executing the coroutine,
|
||||
# if it is one. If it's already a deferred, then we can just use that.
|
||||
d: "defer.Deferred[R]"
|
||||
if isinstance(res, typing.Coroutine):
|
||||
# Wrap the coroutine in a `Deferred`.
|
||||
@@ -855,32 +826,11 @@ def run_in_background(
|
||||
# `res` is a plain value. Wrap it in a `Deferred`.
|
||||
d = defer.succeed(res)
|
||||
|
||||
# The deferred has already completed
|
||||
if d.called and not d.paused:
|
||||
# The function should have maintained the logcontext, so we can
|
||||
# optimise out the messing about
|
||||
return d
|
||||
|
||||
# The function may have reset the context before returning, so we need to restore it
|
||||
# now.
|
||||
#
|
||||
# Our goal is to have the caller logcontext unchanged after firing off the
|
||||
# background task and returning.
|
||||
set_current_context(calling_context)
|
||||
|
||||
# The original logcontext will be restored when the deferred completes, but
|
||||
# there is nothing waiting for it, so it will get leaked into the reactor (which
|
||||
# would then get picked up by the next thing the reactor does). We therefore
|
||||
# need to reset the logcontext here (set the `sentinel` logcontext) before
|
||||
# yielding control back to the reactor.
|
||||
#
|
||||
# (If this feels asymmetric, consider it this way: we are
|
||||
# effectively forking a new thread of execution. We are
|
||||
# probably currently within a ``with LoggingContext()`` block,
|
||||
# which is supposed to have a single entry and exit point. But
|
||||
# by spawning off another deferred, we are effectively
|
||||
# adding a new exit point.)
|
||||
d.addBoth(_set_context_cb, SENTINEL_CONTEXT)
|
||||
return d
|
||||
|
||||
|
||||
@@ -898,100 +848,21 @@ def run_coroutine_in_background(
|
||||
coroutine directly rather than a function. We can do this because coroutines
|
||||
do not run until called, and so calling an async function without awaiting
|
||||
cannot change the log contexts.
|
||||
|
||||
This is an ergonomic helper so we can do this:
|
||||
```python
|
||||
run_coroutine_in_background(func1(arg1))
|
||||
```
|
||||
Rather than having to do this:
|
||||
```python
|
||||
run_in_background(lambda: func1(arg1))
|
||||
```
|
||||
"""
|
||||
calling_context = current_context()
|
||||
|
||||
# Wrap the coroutine in a deferred, which will have the side effect of executing the
|
||||
# coroutine in the background.
|
||||
d = defer.ensureDeferred(coroutine)
|
||||
|
||||
# The function may have reset the context before returning, so we need to restore it
|
||||
# now.
|
||||
#
|
||||
# Our goal is to have the caller logcontext unchanged after firing off the
|
||||
# background task and returning.
|
||||
set_current_context(calling_context)
|
||||
|
||||
# The original logcontext will be restored when the deferred completes, but
|
||||
# there is nothing waiting for it, so it will get leaked into the reactor (which
|
||||
# would then get picked up by the next thing the reactor does). We therefore
|
||||
# need to reset the logcontext here (set the `sentinel` logcontext) before
|
||||
# yielding control back to the reactor.
|
||||
#
|
||||
# (If this feels asymmetric, consider it this way: we are
|
||||
# effectively forking a new thread of execution. We are
|
||||
# probably currently within a ``with LoggingContext()`` block,
|
||||
# which is supposed to have a single entry and exit point. But
|
||||
# by spawning off another deferred, we are effectively
|
||||
# adding a new exit point.)
|
||||
d.addBoth(_set_context_cb, SENTINEL_CONTEXT)
|
||||
return d
|
||||
return defer.ensureDeferred(coroutine)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]":
|
||||
"""
|
||||
Given a deferred, make it follow the Synapse logcontext rules:
|
||||
|
||||
- If the deferred has completed, essentially does nothing (just returns another
|
||||
completed deferred with the result/failure).
|
||||
- If the deferred has not yet completed, resets the logcontext before returning a
|
||||
incomplete deferred. Then, when the deferred completes, restores the current
|
||||
logcontext before running callbacks/errbacks.
|
||||
|
||||
This means the resultant deferred can be awaited without leaking the current
|
||||
logcontext to the reactor (which would then get erroneously picked up by the next
|
||||
thing the reactor does), and also means that the logcontext is preserved when the
|
||||
deferred completes.
|
||||
|
||||
(This is more-or-less the opposite operation to run_in_background in terms of how it
|
||||
handles log contexts.)
|
||||
|
||||
Pretty much equivalent to using `with PreserveLoggingContext():`, i.e. it clears the
|
||||
logcontext before awaiting (and so before execution passes back to the reactor) and
|
||||
restores the old context once the awaitable completes (execution passes from the
|
||||
reactor back to the code).
|
||||
"""
|
||||
# The deferred has already completed
|
||||
if deferred.called and not deferred.paused:
|
||||
# it looks like this deferred is ready to run any callbacks we give it
|
||||
# immediately. We may as well optimise out the logcontext faffery.
|
||||
return deferred
|
||||
|
||||
# Our goal is to have the caller logcontext unchanged after they yield/await the
|
||||
# returned deferred.
|
||||
#
|
||||
# When the caller yield/await's the returned deferred, it may yield
|
||||
# control back to the reactor. To avoid leaking the current logcontext to the
|
||||
# reactor (which would then get erroneously picked up by the next thing the reactor
|
||||
# does) while the deferred runs in the reactor event loop, we reset the logcontext
|
||||
# and add a callback to the deferred to restore it so the caller's logcontext is
|
||||
# active when the deferred completes.
|
||||
prev_context = set_current_context(SENTINEL_CONTEXT)
|
||||
deferred.addBoth(_set_context_cb, prev_context)
|
||||
return deferred
|
||||
|
||||
|
||||
ResultT = TypeVar("ResultT")
|
||||
|
||||
|
||||
def _set_context_cb(result: ResultT, context: LoggingContextOrSentinel) -> ResultT:
|
||||
"""A callback function which just sets the logging context"""
|
||||
set_current_context(context)
|
||||
return result
|
||||
|
||||
|
||||
def defer_to_thread(
|
||||
reactor: "ISynapseReactor", f: Callable[P, R], *args: P.args, **kwargs: P.kwargs
|
||||
) -> "defer.Deferred[R]":
|
||||
@@ -1063,18 +934,6 @@ def defer_to_threadpool(
|
||||
A Deferred which fires a callback with the result of `f`, or an
|
||||
errback if `f` throws an exception.
|
||||
"""
|
||||
curr_context = current_context()
|
||||
if not curr_context:
|
||||
logger.warning(
|
||||
"Calling defer_to_threadpool from sentinel context: metrics will be lost"
|
||||
)
|
||||
parent_context = None
|
||||
else:
|
||||
assert isinstance(curr_context, LoggingContext)
|
||||
parent_context = curr_context
|
||||
|
||||
def g() -> R:
|
||||
with LoggingContext(str(curr_context), parent_context=parent_context):
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g))
|
||||
return make_deferred_yieldable(
|
||||
threads.deferToThreadPool(reactor, threadpool, f, *args, **kwargs)
|
||||
)
|
||||
|
||||
@@ -251,17 +251,18 @@ class _DummyTagNames:
|
||||
try:
|
||||
import opentracing
|
||||
import opentracing.tags
|
||||
from opentracing.scope_managers.contextvars import ContextVarsScopeManager
|
||||
|
||||
tags = opentracing.tags
|
||||
except ImportError:
|
||||
opentracing = None # type: ignore[assignment]
|
||||
tags = _DummyTagNames # type: ignore[assignment]
|
||||
ContextVarsScopeManager = None # type: ignore
|
||||
try:
|
||||
from jaeger_client import Config as JaegerConfig
|
||||
|
||||
from synapse.logging.scopecontextmanager import LogContextScopeManager
|
||||
except ImportError:
|
||||
JaegerConfig = None # type: ignore
|
||||
LogContextScopeManager = None # type: ignore
|
||||
|
||||
|
||||
try:
|
||||
@@ -483,7 +484,7 @@ def init_tracer(hs: "HomeServer") -> None:
|
||||
config = JaegerConfig(
|
||||
config=jaeger_config,
|
||||
service_name=f"{hs.config.server.server_name} {instance_name_by_type}",
|
||||
scope_manager=ContextVarsScopeManager(),
|
||||
scope_manager=LogContextScopeManager(),
|
||||
metrics_factory=PrometheusMetricsFactory(),
|
||||
)
|
||||
|
||||
|
||||
161
synapse/logging/scopecontextmanager.py
Normal file
161
synapse/logging/scopecontextmanager.py
Normal file
@@ -0,0 +1,161 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
# Copyright (C) 2023 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
# Originally licensed under the Apache License, Version 2.0:
|
||||
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
||||
#
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from opentracing import Scope, ScopeManager, Span
|
||||
|
||||
from synapse.logging.context import (
|
||||
LoggingContext,
|
||||
current_context,
|
||||
nested_logging_context,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LogContextScopeManager(ScopeManager):
|
||||
"""
|
||||
The LogContextScopeManager tracks the active scope in opentracing
|
||||
by using the log contexts which are native to synapse. This is so
|
||||
that the basic opentracing api can be used across twisted defereds.
|
||||
|
||||
It would be nice just to use opentracing's ContextVarsScopeManager,
|
||||
but currently that doesn't work due to https://twistedmatrix.com/trac/ticket/10301.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
@property
|
||||
def active(self) -> Optional[Scope]:
|
||||
"""
|
||||
Returns the currently active Scope which can be used to access the
|
||||
currently active Scope.span.
|
||||
If there is a non-null Scope, its wrapped Span
|
||||
becomes an implicit parent of any newly-created Span at
|
||||
Tracer.start_active_span() time.
|
||||
|
||||
Return:
|
||||
The Scope that is active, or None if not available.
|
||||
"""
|
||||
ctx = current_context()
|
||||
return ctx.scope
|
||||
|
||||
def activate(self, span: Span, finish_on_close: bool) -> Scope:
|
||||
"""
|
||||
Makes a Span active.
|
||||
Args
|
||||
span: the span that should become active.
|
||||
finish_on_close: whether Span should be automatically finished when
|
||||
Scope.close() is called.
|
||||
|
||||
Returns:
|
||||
Scope to control the end of the active period for
|
||||
*span*. It is a programming error to neglect to call
|
||||
Scope.close() on the returned instance.
|
||||
"""
|
||||
|
||||
ctx = current_context()
|
||||
|
||||
if not ctx:
|
||||
logger.error("Tried to activate scope outside of loggingcontext")
|
||||
return Scope(None, span) # type: ignore[arg-type]
|
||||
|
||||
if ctx.scope is not None:
|
||||
# start a new logging context as a child of the existing one.
|
||||
# Doing so -- rather than updating the existing logcontext -- means that
|
||||
# creating several concurrent spans under the same logcontext works
|
||||
# correctly.
|
||||
ctx = nested_logging_context("")
|
||||
enter_logcontext = True
|
||||
else:
|
||||
# if there is no span currently associated with the current logcontext, we
|
||||
# just store the scope in it.
|
||||
#
|
||||
# This feels a bit dubious, but it does hack around a problem where a
|
||||
# span outlasts its parent logcontext (which would otherwise lead to
|
||||
# "Re-starting finished log context" errors).
|
||||
enter_logcontext = False
|
||||
|
||||
scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close)
|
||||
ctx.scope = scope
|
||||
if enter_logcontext:
|
||||
ctx.__enter__()
|
||||
|
||||
return scope
|
||||
|
||||
|
||||
class _LogContextScope(Scope):
|
||||
"""
|
||||
A custom opentracing scope, associated with a LogContext
|
||||
|
||||
* When the scope is closed, the logcontext's active scope is reset to None.
|
||||
and - if enter_logcontext was set - the logcontext is finished too.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
manager: LogContextScopeManager,
|
||||
span: Span,
|
||||
logcontext: LoggingContext,
|
||||
enter_logcontext: bool,
|
||||
finish_on_close: bool,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
manager:
|
||||
the manager that is responsible for this scope.
|
||||
span:
|
||||
the opentracing span which this scope represents the local
|
||||
lifetime for.
|
||||
logcontext:
|
||||
the log context to which this scope is attached.
|
||||
enter_logcontext:
|
||||
if True the log context will be exited when the scope is finished
|
||||
finish_on_close:
|
||||
if True finish the span when the scope is closed
|
||||
"""
|
||||
super().__init__(manager, span)
|
||||
self.logcontext = logcontext
|
||||
self._finish_on_close = finish_on_close
|
||||
self._enter_logcontext = enter_logcontext
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"Scope<{self.span}>"
|
||||
|
||||
def close(self) -> None:
|
||||
active_scope = self.manager.active
|
||||
if active_scope is not self:
|
||||
logger.error(
|
||||
"Closing scope %s which is not the currently-active one %s",
|
||||
self,
|
||||
active_scope,
|
||||
)
|
||||
|
||||
if self._finish_on_close:
|
||||
self.span.finish()
|
||||
|
||||
self.logcontext.scope = None
|
||||
|
||||
if self._enter_logcontext:
|
||||
self.logcontext.__exit__(None, None, None)
|
||||
@@ -179,13 +179,11 @@ class MediaRepository:
|
||||
|
||||
# We get the media upload limits and sort them in descending order of
|
||||
# time period, so that we can apply some optimizations.
|
||||
self.default_media_upload_limits = hs.config.media.media_upload_limits
|
||||
self.default_media_upload_limits.sort(
|
||||
self.media_upload_limits = hs.config.media.media_upload_limits
|
||||
self.media_upload_limits.sort(
|
||||
key=lambda limit: limit.time_period_ms, reverse=True
|
||||
)
|
||||
|
||||
self.media_repository_callbacks = hs.get_module_api_callbacks().media_repository
|
||||
|
||||
def _start_update_recently_accessed(self) -> Deferred:
|
||||
return run_as_background_process(
|
||||
"update_recently_accessed_media",
|
||||
@@ -342,27 +340,16 @@ class MediaRepository:
|
||||
|
||||
# Check that the user has not exceeded any of the media upload limits.
|
||||
|
||||
# Use limits from module API if provided
|
||||
media_upload_limits = (
|
||||
await self.media_repository_callbacks.get_media_upload_limits_for_user(
|
||||
auth_user.to_string()
|
||||
)
|
||||
)
|
||||
|
||||
# Otherwise use the default limits from config
|
||||
if media_upload_limits is None:
|
||||
# Note: the media upload limits are sorted so larger time periods are
|
||||
# first.
|
||||
media_upload_limits = self.default_media_upload_limits
|
||||
|
||||
# This is the total size of media uploaded by the user in the last
|
||||
# `time_period_ms` milliseconds, or None if we haven't checked yet.
|
||||
uploaded_media_size: Optional[int] = None
|
||||
|
||||
for limit in media_upload_limits:
|
||||
# Note: the media upload limits are sorted so larger time periods are
|
||||
# first.
|
||||
for limit in self.media_upload_limits:
|
||||
# We only need to check the amount of media uploaded by the user in
|
||||
# this latest (smaller) time period if the amount of media uploaded
|
||||
# in a previous (larger) time period is below the limit.
|
||||
# in a previous (larger) time period is above the limit.
|
||||
#
|
||||
# This optimization means that in the common case where the user
|
||||
# hasn't uploaded much media, we only need to query the database
|
||||
@@ -376,12 +363,6 @@ class MediaRepository:
|
||||
)
|
||||
|
||||
if uploaded_media_size + content_length > limit.max_bytes:
|
||||
await self.media_repository_callbacks.on_media_upload_limit_exceeded(
|
||||
user_id=auth_user.to_string(),
|
||||
limit=limit,
|
||||
sent_bytes=uploaded_media_size,
|
||||
attempted_bytes=content_length,
|
||||
)
|
||||
raise SynapseError(
|
||||
400, "Media upload limit exceeded", Codes.RESOURCE_LIMIT_EXCEEDED
|
||||
)
|
||||
|
||||
@@ -43,7 +43,7 @@ from typing import (
|
||||
)
|
||||
|
||||
import attr
|
||||
from packaging.version import parse as parse_version
|
||||
from pkg_resources import parse_version
|
||||
from prometheus_client import (
|
||||
CollectorRegistry,
|
||||
Counter,
|
||||
@@ -73,6 +73,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
METRICS_PREFIX = "/_synapse/metrics"
|
||||
|
||||
all_gauges: Dict[str, Collector] = {}
|
||||
|
||||
HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
|
||||
|
||||
SERVER_NAME_LABEL = "server_name"
|
||||
@@ -161,110 +163,42 @@ class LaterGauge(Collector):
|
||||
name: str
|
||||
desc: str
|
||||
labelnames: Optional[StrSequence] = attr.ib(hash=False)
|
||||
_instance_id_to_hook_map: Dict[
|
||||
Optional[str], # instance_id
|
||||
Callable[
|
||||
[], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]]
|
||||
],
|
||||
] = attr.ib(factory=dict, hash=False)
|
||||
"""
|
||||
Map from homeserver instance_id to a callback. Each callback should either return a
|
||||
value (if there are no labels for this metric), or dict mapping from a label tuple
|
||||
to a value.
|
||||
|
||||
We use `instance_id` instead of `server_name` because it's possible to have multiple
|
||||
workers running in the same process with the same `server_name`.
|
||||
"""
|
||||
# callback: should either return a value (if there are no labels for this metric),
|
||||
# or dict mapping from a label tuple to a value
|
||||
caller: Callable[
|
||||
[], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]]
|
||||
]
|
||||
|
||||
def collect(self) -> Iterable[Metric]:
|
||||
# The decision to add `SERVER_NAME_LABEL` is from the `LaterGauge` usage itself
|
||||
# (we don't enforce it here, one level up).
|
||||
g = GaugeMetricFamily(self.name, self.desc, labels=self.labelnames) # type: ignore[missing-server-name-label]
|
||||
|
||||
for homeserver_instance_id, hook in self._instance_id_to_hook_map.items():
|
||||
try:
|
||||
hook_result = hook()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Exception running callback for LaterGauge(%s) for homeserver_instance_id=%s",
|
||||
self.name,
|
||||
homeserver_instance_id,
|
||||
)
|
||||
# Continue to return the rest of the metrics that aren't broken
|
||||
continue
|
||||
try:
|
||||
calls = self.caller()
|
||||
except Exception:
|
||||
logger.exception("Exception running callback for LaterGauge(%s)", self.name)
|
||||
yield g
|
||||
return
|
||||
|
||||
if isinstance(hook_result, (int, float)):
|
||||
g.add_metric([], hook_result)
|
||||
else:
|
||||
for k, v in hook_result.items():
|
||||
g.add_metric(k, v)
|
||||
if isinstance(calls, (int, float)):
|
||||
g.add_metric([], calls)
|
||||
else:
|
||||
for k, v in calls.items():
|
||||
g.add_metric(k, v)
|
||||
|
||||
yield g
|
||||
|
||||
def register_hook(
|
||||
self,
|
||||
*,
|
||||
homeserver_instance_id: Optional[str],
|
||||
hook: Callable[
|
||||
[], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]]
|
||||
],
|
||||
) -> None:
|
||||
"""
|
||||
Register a callback/hook that will be called to generate a metric samples for
|
||||
the gauge.
|
||||
|
||||
Args:
|
||||
homeserver_instance_id: The unique ID for this Synapse process instance
|
||||
(`hs.get_instance_id()`) that this hook is associated with. This can be used
|
||||
later to lookup all hooks associated with a given server name in order to
|
||||
unregister them. This should only be omitted for global hooks that work
|
||||
across all homeservers.
|
||||
hook: A callback that should either return a value (if there are no
|
||||
labels for this metric), or dict mapping from a label tuple to a value
|
||||
"""
|
||||
# We shouldn't have multiple hooks registered for the same homeserver `instance_id`.
|
||||
existing_hook = self._instance_id_to_hook_map.get(homeserver_instance_id)
|
||||
assert existing_hook is None, (
|
||||
f"LaterGauge(name={self.name}) hook already registered for homeserver_instance_id={homeserver_instance_id}. "
|
||||
"This is likely a Synapse bug and you forgot to unregister the previous hooks for "
|
||||
"the server (especially in tests)."
|
||||
)
|
||||
|
||||
self._instance_id_to_hook_map[homeserver_instance_id] = hook
|
||||
|
||||
def unregister_hooks_for_homeserver_instance_id(
|
||||
self, homeserver_instance_id: str
|
||||
) -> None:
|
||||
"""
|
||||
Unregister all hooks associated with the given homeserver `instance_id`. This should be
|
||||
called when a homeserver is shutdown to avoid extra hooks sitting around.
|
||||
|
||||
Args:
|
||||
homeserver_instance_id: The unique ID for this Synapse process instance to
|
||||
unregister hooks for (`hs.get_instance_id()`).
|
||||
"""
|
||||
self._instance_id_to_hook_map.pop(homeserver_instance_id, None)
|
||||
|
||||
def __attrs_post_init__(self) -> None:
|
||||
self._register()
|
||||
|
||||
def _register(self) -> None:
|
||||
if self.name in all_gauges.keys():
|
||||
logger.warning("%s already registered, reregistering", self.name)
|
||||
REGISTRY.unregister(all_gauges.pop(self.name))
|
||||
|
||||
REGISTRY.register(self)
|
||||
|
||||
# We shouldn't have multiple metrics with the same name. Typically, metrics
|
||||
# should be created globally so you shouldn't be running into this and this will
|
||||
# catch any stupid mistakes. The `REGISTRY.register(self)` call above will also
|
||||
# raise an error if the metric already exists but to make things explicit, we'll
|
||||
# also check here.
|
||||
existing_gauge = all_later_gauges_to_clean_up_on_shutdown.get(self.name)
|
||||
assert existing_gauge is None, f"LaterGauge(name={self.name}) already exists. "
|
||||
|
||||
# Keep track of the gauge so we can clean it up later.
|
||||
all_later_gauges_to_clean_up_on_shutdown[self.name] = self
|
||||
|
||||
|
||||
all_later_gauges_to_clean_up_on_shutdown: Dict[str, LaterGauge] = {}
|
||||
"""
|
||||
Track all `LaterGauge` instances so we can remove any associated hooks during homeserver
|
||||
shutdown.
|
||||
"""
|
||||
all_gauges[self.name] = self
|
||||
|
||||
|
||||
# `MetricsEntry` only makes sense when it is a `Protocol`,
|
||||
@@ -316,7 +250,7 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
|
||||
# Protects access to _registrations
|
||||
self._lock = threading.Lock()
|
||||
|
||||
REGISTRY.register(self)
|
||||
self._register_with_collector()
|
||||
|
||||
def register(
|
||||
self,
|
||||
@@ -407,6 +341,14 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
|
||||
gauge.add_metric(labels=key, value=getattr(metrics, name))
|
||||
yield gauge
|
||||
|
||||
def _register_with_collector(self) -> None:
|
||||
if self.name in all_gauges.keys():
|
||||
logger.warning("%s already registered, reregistering", self.name)
|
||||
REGISTRY.unregister(all_gauges.pop(self.name))
|
||||
|
||||
REGISTRY.register(self)
|
||||
all_gauges[self.name] = self
|
||||
|
||||
|
||||
class GaugeHistogramMetricFamilyWithLabels(GaugeHistogramMetricFamily):
|
||||
"""
|
||||
|
||||
@@ -228,11 +228,6 @@ def run_as_background_process(
|
||||
clock.looping_call and friends (or for firing-and-forgetting in the middle of a
|
||||
normal synapse async function).
|
||||
|
||||
Because the returned Deferred does not follow the synapse logcontext rules, awaiting
|
||||
the result of this function will result in the log context being cleared (bad). In
|
||||
order to properly await the result of this function and maintain the current log
|
||||
context, use `make_deferred_yieldable`.
|
||||
|
||||
Args:
|
||||
desc: a description for this background process type
|
||||
server_name: The homeserver name that this background process is being run for
|
||||
@@ -285,20 +280,6 @@ def run_as_background_process(
|
||||
name=desc, **{SERVER_NAME_LABEL: server_name}
|
||||
).dec()
|
||||
|
||||
# To explain how the log contexts work here:
|
||||
# - When `run_as_background_process` is called, the current context is stored
|
||||
# (using `PreserveLoggingContext`), we kick off the background task, and we
|
||||
# restore the original context before returning (also part of
|
||||
# `PreserveLoggingContext`).
|
||||
# - The background task runs in its own new logcontext named after `desc`
|
||||
# - When the background task finishes, we don't want to leak our background context
|
||||
# into the reactor which would erroneously get attached to the next operation
|
||||
# picked up by the event loop. We use `PreserveLoggingContext` to set the
|
||||
# `sentinel` context and means the new `BackgroundProcessLoggingContext` will
|
||||
# remember the `sentinel` context as its previous context to return to when it
|
||||
# exits and yields control back to the reactor.
|
||||
#
|
||||
# TODO: Why can't we simplify to using `return run_in_background(run)`?
|
||||
with PreserveLoggingContext():
|
||||
# Note that we return a Deferred here so that it can be used in a
|
||||
# looping_call and other places that expect a Deferred.
|
||||
|
||||
@@ -50,7 +50,6 @@ from synapse.api.constants import ProfileFields
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config.repository import MediaUploadLimit
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.presence_router import (
|
||||
GET_INTERESTED_USERS_CALLBACK,
|
||||
@@ -95,9 +94,7 @@ from synapse.module_api.callbacks.account_validity_callbacks import (
|
||||
)
|
||||
from synapse.module_api.callbacks.media_repository_callbacks import (
|
||||
GET_MEDIA_CONFIG_FOR_USER_CALLBACK,
|
||||
GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK,
|
||||
IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK,
|
||||
ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK,
|
||||
)
|
||||
from synapse.module_api.callbacks.ratelimit_callbacks import (
|
||||
GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK,
|
||||
@@ -208,7 +205,6 @@ __all__ = [
|
||||
"RoomAlias",
|
||||
"UserProfile",
|
||||
"RatelimitOverride",
|
||||
"MediaUploadLimit",
|
||||
]
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -466,12 +462,6 @@ class ModuleApi:
|
||||
is_user_allowed_to_upload_media_of_size: Optional[
|
||||
IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK
|
||||
] = None,
|
||||
get_media_upload_limits_for_user: Optional[
|
||||
GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK
|
||||
] = None,
|
||||
on_media_upload_limit_exceeded: Optional[
|
||||
ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK
|
||||
] = None,
|
||||
) -> None:
|
||||
"""Registers callbacks for media repository capabilities.
|
||||
Added in Synapse v1.132.0.
|
||||
@@ -479,8 +469,6 @@ class ModuleApi:
|
||||
return self._callbacks.media_repository.register_callbacks(
|
||||
get_media_config_for_user=get_media_config_for_user,
|
||||
is_user_allowed_to_upload_media_of_size=is_user_allowed_to_upload_media_of_size,
|
||||
get_media_upload_limits_for_user=get_media_upload_limits_for_user,
|
||||
on_media_upload_limit_exceeded=on_media_upload_limit_exceeded,
|
||||
)
|
||||
|
||||
def register_third_party_rules_callbacks(
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional
|
||||
|
||||
from synapse.config.repository import MediaUploadLimit
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.async_helpers import delay_cancellation
|
||||
from synapse.util.metrics import Measure
|
||||
@@ -29,14 +28,6 @@ GET_MEDIA_CONFIG_FOR_USER_CALLBACK = Callable[[str], Awaitable[Optional[JsonDict
|
||||
|
||||
IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK = Callable[[str, int], Awaitable[bool]]
|
||||
|
||||
GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK = Callable[
|
||||
[str], Awaitable[Optional[List[MediaUploadLimit]]]
|
||||
]
|
||||
|
||||
ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK = Callable[
|
||||
[str, MediaUploadLimit, int, int], Awaitable[None]
|
||||
]
|
||||
|
||||
|
||||
class MediaRepositoryModuleApiCallbacks:
|
||||
def __init__(self, hs: "HomeServer") -> None:
|
||||
@@ -48,12 +39,6 @@ class MediaRepositoryModuleApiCallbacks:
|
||||
self._is_user_allowed_to_upload_media_of_size_callbacks: List[
|
||||
IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK
|
||||
] = []
|
||||
self._get_media_upload_limits_for_user_callbacks: List[
|
||||
GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK
|
||||
] = []
|
||||
self._on_media_upload_limit_exceeded_callbacks: List[
|
||||
ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK
|
||||
] = []
|
||||
|
||||
def register_callbacks(
|
||||
self,
|
||||
@@ -61,12 +46,6 @@ class MediaRepositoryModuleApiCallbacks:
|
||||
is_user_allowed_to_upload_media_of_size: Optional[
|
||||
IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK
|
||||
] = None,
|
||||
get_media_upload_limits_for_user: Optional[
|
||||
GET_MEDIA_UPLOAD_LIMITS_FOR_USER_CALLBACK
|
||||
] = None,
|
||||
on_media_upload_limit_exceeded: Optional[
|
||||
ON_MEDIA_UPLOAD_LIMIT_EXCEEDED_CALLBACK
|
||||
] = None,
|
||||
) -> None:
|
||||
"""Register callbacks from module for each hook."""
|
||||
if get_media_config_for_user is not None:
|
||||
@@ -77,16 +56,6 @@ class MediaRepositoryModuleApiCallbacks:
|
||||
is_user_allowed_to_upload_media_of_size
|
||||
)
|
||||
|
||||
if get_media_upload_limits_for_user is not None:
|
||||
self._get_media_upload_limits_for_user_callbacks.append(
|
||||
get_media_upload_limits_for_user
|
||||
)
|
||||
|
||||
if on_media_upload_limit_exceeded is not None:
|
||||
self._on_media_upload_limit_exceeded_callbacks.append(
|
||||
on_media_upload_limit_exceeded
|
||||
)
|
||||
|
||||
async def get_media_config_for_user(self, user_id: str) -> Optional[JsonDict]:
|
||||
for callback in self._get_media_config_for_user_callbacks:
|
||||
with Measure(
|
||||
@@ -114,47 +83,3 @@ class MediaRepositoryModuleApiCallbacks:
|
||||
return res
|
||||
|
||||
return True
|
||||
|
||||
async def get_media_upload_limits_for_user(
|
||||
self, user_id: str
|
||||
) -> Optional[List[MediaUploadLimit]]:
|
||||
"""
|
||||
Get the first non-None list of MediaUploadLimits for the user from the registered callbacks.
|
||||
If a list is returned it will be sorted in descending order of duration.
|
||||
"""
|
||||
for callback in self._get_media_upload_limits_for_user_callbacks:
|
||||
with Measure(
|
||||
self.clock,
|
||||
name=f"{callback.__module__}.{callback.__qualname__}",
|
||||
server_name=self.server_name,
|
||||
):
|
||||
res: Optional[List[MediaUploadLimit]] = await delay_cancellation(
|
||||
callback(user_id)
|
||||
)
|
||||
if res is not None: # to allow [] to be returned meaning no limit
|
||||
# We sort them in descending order of time period
|
||||
res.sort(key=lambda limit: limit.time_period_ms, reverse=True)
|
||||
return res
|
||||
|
||||
return None
|
||||
|
||||
async def on_media_upload_limit_exceeded(
|
||||
self,
|
||||
user_id: str,
|
||||
limit: MediaUploadLimit,
|
||||
sent_bytes: int,
|
||||
attempted_bytes: int,
|
||||
) -> None:
|
||||
for callback in self._on_media_upload_limit_exceeded_callbacks:
|
||||
with Measure(
|
||||
self.clock,
|
||||
name=f"{callback.__module__}.{callback.__qualname__}",
|
||||
server_name=self.server_name,
|
||||
):
|
||||
# Use a copy of the data in case the module modifies it
|
||||
limit_copy = MediaUploadLimit(
|
||||
max_bytes=limit.max_bytes, time_period_ms=limit.time_period_ms
|
||||
)
|
||||
await delay_cancellation(
|
||||
callback(user_id, limit_copy, sent_bytes, attempted_bytes)
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user