Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
f7103be392 Bump gitpython from 3.1.44 to 3.1.45
Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.1.44 to 3.1.45.
- [Release notes](https://github.com/gitpython-developers/GitPython/releases)
- [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES)
- [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.44...3.1.45)

---
updated-dependencies:
- dependency-name: gitpython
  dependency-version: 3.1.45
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-07-28 16:15:46 +00:00
384 changed files with 2448 additions and 8613 deletions

View File

@@ -61,7 +61,7 @@ poetry run update_synapse_database --database-config .ci/postgres-config-unporte
echo "+++ Comparing ported schema with unported schema" echo "+++ Comparing ported schema with unported schema"
# Ignore the tables that portdb creates. (Should it tidy them up when the porting is completed?) # Ignore the tables that portdb creates. (Should it tidy them up when the porting is completed?)
psql synapse -c "DROP TABLE port_from_sqlite3;" psql synapse -c "DROP TABLE port_from_sqlite3;"
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner --restrict-key=TESTING synapse_unported > unported.sql pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse_unported > unported.sql
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner --restrict-key=TESTING synapse > ported.sql pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse > ported.sql
# By default, `diff` returns zero if there are no changes and nonzero otherwise # By default, `diff` returns zero if there are no changes and nonzero otherwise
diff -u unported.sql ported.sql | tee schema_diff diff -u unported.sql ported.sql | tee schema_diff

View File

@@ -31,7 +31,7 @@ jobs:
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Extract version from pyproject.toml - name: Extract version from pyproject.toml
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see # Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
@@ -95,7 +95,7 @@ jobs:
- build - build
steps: steps:
- name: Download digests - name: Download digests
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with: with:
path: ${{ runner.temp }}/digests path: ${{ runner.temp }}/digests
pattern: digests-* pattern: digests-*
@@ -123,7 +123,7 @@ jobs:
uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # v3.9.2 uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # v3.9.2
- name: Calculate docker image tag - name: Calculate docker image tag
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0 uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
with: with:
images: ${{ matrix.repository }} images: ${{ matrix.repository }}
flavor: | flavor: |

View File

@@ -13,7 +13,7 @@ jobs:
name: GitHub Pages name: GitHub Pages
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
# Fetch all history so that the schema_versions script works. # Fetch all history so that the schema_versions script works.
fetch-depth: 0 fetch-depth: 0
@@ -50,7 +50,7 @@ jobs:
name: Check links in documentation name: Check links in documentation
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup mdbook - name: Setup mdbook
uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0 uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0

View File

@@ -50,7 +50,7 @@ jobs:
needs: needs:
- pre - pre
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
# Fetch all history so that the schema_versions script works. # Fetch all history so that the schema_versions script works.
fetch-depth: 0 fetch-depth: 0

View File

@@ -18,7 +18,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master

View File

@@ -42,7 +42,7 @@ jobs:
if: needs.check_repo.outputs.should_run_workflow == 'true' if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
with: with:
@@ -77,7 +77,7 @@ jobs:
postgres-version: "14" postgres-version: "14"
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
@@ -152,7 +152,7 @@ jobs:
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }} BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
@@ -202,7 +202,7 @@ jobs:
steps: steps:
- name: Check out synapse codebase - name: Check out synapse codebase
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
path: synapse path: synapse
@@ -234,7 +234,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2 - uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -16,7 +16,7 @@ jobs:
name: "Check locked dependencies have sdists" name: "Check locked dependencies have sdists"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with: with:
python-version: '3.x' python-version: '3.x'

View File

@@ -33,17 +33,17 @@ jobs:
packages: write packages: write
steps: steps:
- name: Checkout specific branch (debug build) - name: Checkout specific branch (debug build)
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
with: with:
ref: ${{ inputs.branch }} ref: ${{ inputs.branch }}
- name: Checkout clean copy of develop (scheduled build) - name: Checkout clean copy of develop (scheduled build)
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
if: github.event_name == 'schedule' if: github.event_name == 'schedule'
with: with:
ref: develop ref: develop
- name: Checkout clean copy of master (on-push) - name: Checkout clean copy of master (on-push)
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
if: github.event_name == 'push' if: github.event_name == 'push'
with: with:
ref: master ref: master
@@ -55,7 +55,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Work out labels for complement image - name: Work out labels for complement image
id: meta id: meta
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0 uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
with: with:
images: ghcr.io/${{ github.repository }}/complement-synapse images: ghcr.io/${{ github.repository }}/complement-synapse
tags: | tags: |

View File

@@ -27,7 +27,7 @@ jobs:
name: "Calculate list of debian distros" name: "Calculate list of debian distros"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with: with:
python-version: "3.x" python-version: "3.x"
@@ -55,7 +55,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
path: src path: src
@@ -66,7 +66,7 @@ jobs:
install: true install: true
- name: Set up docker layer caching - name: Set up docker layer caching
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with: with:
path: /tmp/.buildx-cache path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }} key: ${{ runner.os }}-buildx-${{ github.sha }}
@@ -132,7 +132,7 @@ jobs:
os: "ubuntu-24.04-arm" os: "ubuntu-24.04-arm"
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with: with:
@@ -165,7 +165,7 @@ jobs:
if: ${{ !startsWith(github.ref, 'refs/pull/') }} if: ${{ !startsWith(github.ref, 'refs/pull/') }}
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with: with:
python-version: "3.10" python-version: "3.10"
@@ -191,7 +191,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Download all workflow run artifacts - name: Download all workflow run artifacts
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
- name: Build a tarball for the debs - name: Build a tarball for the debs
# We need to merge all the debs uploads into one folder, then compress # We need to merge all the debs uploads into one folder, then compress
# that. # that.

View File

@@ -14,7 +14,7 @@ jobs:
name: Ensure Synapse config schema is valid name: Ensure Synapse config schema is valid
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with: with:
python-version: "3.x" python-version: "3.x"
@@ -40,7 +40,7 @@ jobs:
name: Ensure generated documentation is up-to-date name: Ensure generated documentation is up-to-date
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with: with:
python-version: "3.x" python-version: "3.x"

View File

@@ -86,7 +86,7 @@ jobs:
if: ${{ needs.changes.outputs.linting == 'true' }} if: ${{ needs.changes.outputs.linting == 'true' }}
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
with: with:
@@ -106,7 +106,7 @@ jobs:
if: ${{ needs.changes.outputs.linting == 'true' }} if: ${{ needs.changes.outputs.linting == 'true' }}
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with: with:
python-version: "3.x" python-version: "3.x"
@@ -116,7 +116,7 @@ jobs:
check-lockfile: check-lockfile:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with: with:
python-version: "3.x" python-version: "3.x"
@@ -129,7 +129,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Poetry - name: Setup Poetry
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
@@ -151,7 +151,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
@@ -174,7 +174,7 @@ jobs:
# Cribbed from # Cribbed from
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17 # https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
- name: Restore/persist mypy's cache - name: Restore/persist mypy's cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with: with:
path: | path: |
.mypy_cache .mypy_cache
@@ -187,7 +187,7 @@ jobs:
lint-crlf: lint-crlf:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Check line endings - name: Check line endings
run: scripts-dev/check_line_terminators.sh run: scripts-dev/check_line_terminators.sh
@@ -195,7 +195,7 @@ jobs:
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }} if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0 fetch-depth: 0
@@ -213,7 +213,7 @@ jobs:
if: ${{ needs.changes.outputs.linting == 'true' }} if: ${{ needs.changes.outputs.linting == 'true' }}
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
- name: Install Rust - name: Install Rust
@@ -233,7 +233,7 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }} if: ${{ needs.changes.outputs.rust == 'true' }}
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
@@ -252,7 +252,7 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }} if: ${{ needs.changes.outputs.rust == 'true' }}
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
@@ -270,7 +270,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
@@ -306,7 +306,7 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }} if: ${{ needs.changes.outputs.rust == 'true' }}
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
@@ -326,7 +326,7 @@ jobs:
needs: changes needs: changes
if: ${{ needs.changes.outputs.linting_readme == 'true' }} if: ${{ needs.changes.outputs.linting_readme == 'true' }}
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with: with:
python-version: "3.x" python-version: "3.x"
@@ -376,7 +376,7 @@ jobs:
needs: linting-done needs: linting-done
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with: with:
python-version: "3.x" python-version: "3.x"
@@ -397,7 +397,7 @@ jobs:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }} job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- run: sudo apt-get -qq install xmlsec1 - run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }} - name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }} if: ${{ matrix.job.postgres-version }}
@@ -453,7 +453,7 @@ jobs:
- changes - changes
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
@@ -518,7 +518,7 @@ jobs:
extras: ["all"] extras: ["all"]
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
# Install libs necessary for PyPy to build binary wheels for dependencies # Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev - run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
@@ -568,7 +568,7 @@ jobs:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }} job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Prepare test blacklist - name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
@@ -615,7 +615,7 @@ jobs:
--health-retries 5 --health-retries 5
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- run: sudo apt-get -qq install xmlsec1 postgresql-client - run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with: with:
@@ -659,7 +659,7 @@ jobs:
--health-retries 5 --health-retries 5
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Add PostgreSQL apt repository - name: Add PostgreSQL apt repository
# We need a version of pg_dump that can handle the version of # We need a version of pg_dump that can handle the version of
# PostgreSQL being tested against. The Ubuntu package repository lags # PostgreSQL being tested against. The Ubuntu package repository lags
@@ -714,7 +714,7 @@ jobs:
steps: steps:
- name: Checkout synapse codebase - name: Checkout synapse codebase
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
path: synapse path: synapse
@@ -750,7 +750,7 @@ jobs:
- changes - changes
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
@@ -770,7 +770,7 @@ jobs:
- changes - changes
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master

View File

@@ -11,15 +11,11 @@ jobs:
if: > if: >
contains(github.event.issue.labels.*.name, 'X-Needs-Info') contains(github.event.issue.labels.*.name, 'X-Needs-Info')
steps: steps:
- uses: actions/add-to-project@c0c5949b017d0d4a39f7ba888255881bdac2a823 # v1.0.2 - uses: actions/add-to-project@5b1a254a3546aef88e0a7724a77a623fa2e47c36 # main (v1.0.2 + 10 commits)
id: add_project id: add_project
with: with:
project-url: "https://github.com/orgs/matrix-org/projects/67" project-url: "https://github.com/orgs/matrix-org/projects/67"
github-token: ${{ secrets.ELEMENT_BOT_TOKEN }} github-token: ${{ secrets.ELEMENT_BOT_TOKEN }}
# This action will error if the issue already exists on the project. Which is
# common as `X-Needs-Info` will often be added to issues that are already in
# the triage queue. Prevent the whole job from failing in this case.
continue-on-error: true
- name: Set status - name: Set status
env: env:
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }} GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}

View File

@@ -43,7 +43,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
@@ -70,7 +70,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- run: sudo apt-get -qq install xmlsec1 - run: sudo apt-get -qq install xmlsec1
- name: Install Rust - name: Install Rust
@@ -117,7 +117,7 @@ jobs:
- ${{ github.workspace }}:/src - ${{ github.workspace }}:/src
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
@@ -175,7 +175,7 @@ jobs:
steps: steps:
- name: Run actions/checkout@v4 for synapse - name: Run actions/checkout@v4 for synapse
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with: with:
path: synapse path: synapse
@@ -217,7 +217,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2 - uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,173 +1,3 @@
# Synapse 1.137.0 (2025-08-26)
No significant changes since 1.137.0rc1.
# Synapse 1.137.0rc1 (2025-08-19)
### Bugfixes
- Fix a bug which could corrupt auth chains making it impossible to perform state resolution. ([\#18746](https://github.com/element-hq/synapse/issues/18746))
- Fix error message in `register_new_matrix_user` utility script for empty `registration_shared_secret`. ([\#18780](https://github.com/element-hq/synapse/issues/18780))
- Allow enabling [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108) when the stable Matrix Authentication Service integration is enabled. ([\#18832](https://github.com/element-hq/synapse/issues/18832))
### Improved Documentation
- Include IPv6 networks in `denied-peer-ips` of coturn setup. Contributed by @litetex. ([\#18781](https://github.com/element-hq/synapse/issues/18781))
### Internal Changes
- Update tests to ensure all database tables are emptied when purging a room. ([\#18794](https://github.com/element-hq/synapse/issues/18794))
- Instrument the `encode_response` part of Sliding Sync requests for more complete traces in Jaeger. ([\#18815](https://github.com/element-hq/synapse/issues/18815))
- Tag Sliding Sync traces when we `wait_for_events`. ([\#18816](https://github.com/element-hq/synapse/issues/18816))
- Fix `portdb` CI by hardcoding the new `pg_dump` restrict key that was added due to [CVE-2025-8714](https://nvd.nist.gov/vuln/detail/cve-2025-8714). ([\#18824](https://github.com/element-hq/synapse/issues/18824))
### Updates to locked dependencies
* Bump actions/add-to-project from 5b1a254a3546aef88e0a7724a77a623fa2e47c36 to 0c37450c4be3b6a7582b2fb013c9ebfd9c8e9300. ([\#18557](https://github.com/element-hq/synapse/issues/18557))
* Bump actions/cache from 4.2.3 to 4.2.4. ([\#18799](https://github.com/element-hq/synapse/issues/18799))
* Bump actions/checkout from 4.2.2 to 4.3.0. ([\#18800](https://github.com/element-hq/synapse/issues/18800))
* Bump actions/download-artifact from 4.3.0 to 5.0.0. ([\#18801](https://github.com/element-hq/synapse/issues/18801))
* Bump docker/metadata-action from 5.7.0 to 5.8.0. ([\#18773](https://github.com/element-hq/synapse/issues/18773))
* Bump mypy from 1.16.1 to 1.17.1. ([\#18775](https://github.com/element-hq/synapse/issues/18775))
* Bump phonenumbers from 9.0.10 to 9.0.11. ([\#18797](https://github.com/element-hq/synapse/issues/18797))
* Bump pygithub from 2.6.1 to 2.7.0. ([\#18779](https://github.com/element-hq/synapse/issues/18779))
* Bump serde_json from 1.0.141 to 1.0.142. ([\#18776](https://github.com/element-hq/synapse/issues/18776))
* Bump slab from 0.4.10 to 0.4.11. ([\#18809](https://github.com/element-hq/synapse/issues/18809))
* Bump tokio from 1.47.0 to 1.47.1. ([\#18774](https://github.com/element-hq/synapse/issues/18774))
* Bump types-pyyaml from 6.0.12.20250516 to 6.0.12.20250809. ([\#18798](https://github.com/element-hq/synapse/issues/18798))
* Bump types-setuptools from 80.9.0.20250529 to 80.9.0.20250809. ([\#18796](https://github.com/element-hq/synapse/issues/18796))
# Synapse 1.136.0 (2025-08-12)
Note: This release includes the security fixes from `1.135.2` and `1.136.0rc2`, detailed below.
### Bugfixes
- Fix bug introduced in 1.135.2 and 1.136.0rc2 where the [Make Room Admin API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#make-room-admin-api) would not treat a room v12's creator power level as the highest in room. ([\#18805](https://github.com/element-hq/synapse/issues/18805))
# Synapse 1.135.2 (2025-08-11)
This is the Synapse portion of the [Matrix coordinated security release](https://matrix.org/blog/2025/07/security-predisclosure/). This release includes support for [room version](https://spec.matrix.org/v1.15/rooms/) 12 which fixes a number of security vulnerabilities, including [CVE-2025-49090](https://www.cve.org/CVERecord?id=CVE-2025-49090).
The default room version is not changed. Not all clients will support room version 12 immediately, and not all users will be using the latest version of their clients. Large, public rooms are advised to wait a few weeks before upgrading to room version 12 to allow users throughout the Matrix ecosystem to update their clients.
Note: release 1.135.1 was skipped due to issues discovered during the release process.
Two patched Synapse releases are now available:
* `1.135.2`: stable release comprised of `1.135.0` + security patches
* Upgrade to this release **if you are currently running 1.135.0 or below**.
* `1.136.0rc2`: unstable release candidate comprised of `1.136.0rc1` + security patches.
* Upgrade to this release **only if you are on 1.136.0rc1**.
### Bugfixes
- Fix invalidation of storage cache that was broken in 1.135.0. ([\#18786](https://github.com/element-hq/synapse/issues/18786))
### Internal Changes
- Add a parameter to `upgrade_rooms(..)` to allow auto join local users. ([\#82](https://github.com/element-hq/synapse/issues/82))
- Speed up upgrading a room with large numbers of banned users. ([\#18574](https://github.com/element-hq/synapse/issues/18574))
# Synapse 1.136.0rc2 (2025-08-11)
- Update MSC4293 redaction logic for room v12. ([\#80](https://github.com/element-hq/synapse/issues/80))
### Internal Changes
- Add a parameter to `upgrade_rooms(..)` to allow auto join local users. ([\#83](https://github.com/element-hq/synapse/issues/83))
# Synapse 1.136.0rc1 (2025-08-05)
Please check [the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/develop/docs/upgrade.md#upgrading-to-v11360) as this release contains changes to MAS support, metrics labels and the module API which may require your attention when upgrading.
### Features
- Add configurable rate limiting for the creation of rooms. ([\#18514](https://github.com/element-hq/synapse/issues/18514))
- Add support for [MSC4293](https://github.com/matrix-org/matrix-spec-proposals/pull/4293) - Redact on Kick/Ban. ([\#18540](https://github.com/element-hq/synapse/issues/18540))
- When admins enable themselves to see soft-failed events, they will also see if the cause is due to the policy server flagging them as spam via `unsigned`. ([\#18585](https://github.com/element-hq/synapse/issues/18585))
- Add ability to configure forward/outbound proxy via homeserver config instead of environment variables. See `http_proxy`, `https_proxy`, `no_proxy_hosts`. ([\#18686](https://github.com/element-hq/synapse/issues/18686))
- Advertise experimental support for [MSC4306](https://github.com/matrix-org/matrix-spec-proposals/pull/4306) (Thread Subscriptions) through `/_matrix/clients/versions` if enabled. ([\#18722](https://github.com/element-hq/synapse/issues/18722))
- Stabilise support for delegating authentication to [Matrix Authentication Service](https://github.com/element-hq/matrix-authentication-service/). ([\#18759](https://github.com/element-hq/synapse/issues/18759))
- Implement the push rules for experimental [MSC4306: Thread Subscriptions](https://github.com/matrix-org/matrix-doc/issues/4306). ([\#18762](https://github.com/element-hq/synapse/issues/18762))
### Bugfixes
- Allow return code 403 (allowed by C2S Spec since v1.2) when fetching profiles via federation. ([\#18696](https://github.com/element-hq/synapse/issues/18696))
- Register the MSC4306 (Thread Subscriptions) endpoints in the CS API when the experimental feature is enabled. ([\#18726](https://github.com/element-hq/synapse/issues/18726))
- Fix a long-standing bug where suspended users could not have server notices sent to them (a 403 was returned to the admin). ([\#18750](https://github.com/element-hq/synapse/issues/18750))
- Fix an issue that could cause logcontexts to be lost on rate-limited requests. Found by @realtyem. ([\#18763](https://github.com/element-hq/synapse/issues/18763))
- Fix invalidation of storage cache that was broken in 1.135.0. ([\#18786](https://github.com/element-hq/synapse/issues/18786))
### Improved Documentation
- Minor improvements to README. ([\#18700](https://github.com/element-hq/synapse/issues/18700))
- Document that there can be multiple workers handling the `receipts` stream. ([\#18760](https://github.com/element-hq/synapse/issues/18760))
- Improve worker documentation for some device paths. ([\#18761](https://github.com/element-hq/synapse/issues/18761))
### Deprecations and Removals
- Deprecate `run_as_background_process` exported as part of the module API interface in favor of `ModuleApi.run_as_background_process`. See [the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/develop/docs/upgrade.md#upgrading-to-v11360) for more information. ([\#18737](https://github.com/element-hq/synapse/issues/18737))
### Internal Changes
- Add debug logging for HMAC digest verification failures when using the admin API to register users. ([\#18474](https://github.com/element-hq/synapse/issues/18474))
- Speed up upgrading a room with large numbers of banned users. ([\#18574](https://github.com/element-hq/synapse/issues/18574))
- Fix config documentation generation script on Windows by enforcing UTF-8. ([\#18580](https://github.com/element-hq/synapse/issues/18580))
- Refactor cache, background process, `Counter`, `LaterGauge`, `GaugeBucketCollector`, `Histogram`, and `Gauge` metrics to be homeserver-scoped. ([\#18656](https://github.com/element-hq/synapse/issues/18656), [\#18714](https://github.com/element-hq/synapse/issues/18714), [\#18715](https://github.com/element-hq/synapse/issues/18715), [\#18724](https://github.com/element-hq/synapse/issues/18724), [\#18753](https://github.com/element-hq/synapse/issues/18753), [\#18725](https://github.com/element-hq/synapse/issues/18725), [\#18670](https://github.com/element-hq/synapse/issues/18670), [\#18748](https://github.com/element-hq/synapse/issues/18748), [\#18751](https://github.com/element-hq/synapse/issues/18751))
- Reduce database usage in Sliding Sync by not querying for background update completion after the update is known to be complete. ([\#18718](https://github.com/element-hq/synapse/issues/18718))
- Improve order of validation and ratelimiting in room creation. ([\#18723](https://github.com/element-hq/synapse/issues/18723))
- Bump minimum version bound on Twisted to 21.2.0. ([\#18727](https://github.com/element-hq/synapse/issues/18727), [\#18729](https://github.com/element-hq/synapse/issues/18729))
- Use `twisted.internet.testing` module in tests instead of deprecated `twisted.test.proto_helpers`. ([\#18728](https://github.com/element-hq/synapse/issues/18728))
- Remove obsolete `/send_event` replication endpoint. ([\#18730](https://github.com/element-hq/synapse/issues/18730))
- Update metrics linting to be able to handle custom metrics. ([\#18733](https://github.com/element-hq/synapse/issues/18733))
- Work around `twisted.protocols.amp.TooLong` error by reducing logging in some tests. ([\#18736](https://github.com/element-hq/synapse/issues/18736))
- Prevent "Move labelled issues to correct projects" GitHub Actions workflow from failing when an issue is already on the project board. ([\#18755](https://github.com/element-hq/synapse/issues/18755))
- Bump minimum supported Rust version (MSRV) to 1.82.0. Missed in [#18553](https://github.com/element-hq/synapse/pull/18553) (released in Synapse 1.134.0). ([\#18757](https://github.com/element-hq/synapse/issues/18757))
- Make `Clock.sleep(...)` return a coroutine, so that mypy can catch places where we don't await on it. ([\#18772](https://github.com/element-hq/synapse/issues/18772))
- Update implementation of [MSC4306: Thread Subscriptions](https://github.com/matrix-org/matrix-doc/issues/4306) to include automatic subscription conflict prevention as introduced in later drafts. ([\#18756](https://github.com/element-hq/synapse/issues/18756))
### Updates to locked dependencies
* Bump gitpython from 3.1.44 to 3.1.45. ([\#18743](https://github.com/element-hq/synapse/issues/18743))
* Bump mypy-zope from 1.0.12 to 1.0.13. ([\#18744](https://github.com/element-hq/synapse/issues/18744))
* Bump phonenumbers from 9.0.9 to 9.0.10. ([\#18741](https://github.com/element-hq/synapse/issues/18741))
* Bump ruff from 0.12.4 to 0.12.5. ([\#18742](https://github.com/element-hq/synapse/issues/18742))
* Bump sentry-sdk from 2.32.0 to 2.33.2. ([\#18745](https://github.com/element-hq/synapse/issues/18745))
* Bump tokio from 1.46.1 to 1.47.0. ([\#18740](https://github.com/element-hq/synapse/issues/18740))
* Bump types-jsonschema from 4.24.0.20250708 to 4.25.0.20250720. ([\#18703](https://github.com/element-hq/synapse/issues/18703))
* Bump types-psycopg2 from 2.9.21.20250516 to 2.9.21.20250718. ([\#18706](https://github.com/element-hq/synapse/issues/18706))
# Synapse 1.135.0 (2025-08-01)
No significant changes since 1.135.0rc2.
# Synapse 1.135.0rc2 (2025-07-30)
### Bugfixes
- Fix user failing to deactivate with MAS when `/_synapse/mas` is handled by a worker. ([\#18716](https://github.com/element-hq/synapse/issues/18716))
### Internal Changes
- Fix performance regression introduced in [#18238](https://github.com/element-hq/synapse/issues/18238) by adding a cache to `is_server_admin`. ([\#18747](https://github.com/element-hq/synapse/issues/18747))
# Synapse 1.135.0rc1 (2025-07-22) # Synapse 1.135.0rc1 (2025-07-22)
### Features ### Features

578
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -8,7 +8,7 @@
Synapse is an open source `Matrix <https://matrix.org>`__ homeserver Synapse is an open source `Matrix <https://matrix.org>`__ homeserver
implementation, written and maintained by `Element <https://element.io>`_. implementation, written and maintained by `Element <https://element.io>`_.
`Matrix <https://github.com/matrix-org>`__ is the open standard for `Matrix <https://github.com/matrix-org>`__ is the open standard for
secure and interoperable real-time communications. You can directly run secure and interoperable real time communications. You can directly run
and manage the source code in this repository, available under an AGPL and manage the source code in this repository, available under an AGPL
license (or alternatively under a commercial license from Element). license (or alternatively under a commercial license from Element).
There is no support provided by Element unless you have a There is no support provided by Element unless you have a
@@ -23,13 +23,13 @@ ESS builds on Synapse to offer a complete Matrix-based backend including the ful
`Admin Console product <https://element.io/enterprise-functionality/admin-console>`_, `Admin Console product <https://element.io/enterprise-functionality/admin-console>`_,
giving admins the power to easily manage an organization-wide giving admins the power to easily manage an organization-wide
deployment. It includes advanced identity management, auditing, deployment. It includes advanced identity management, auditing,
moderation and data retention options as well as Long-Term Support and moderation and data retention options as well as Long Term Support and
SLAs. ESS supports any Matrix-compatible client. SLAs. ESS can be used to support any Matrix-based frontend client.
.. contents:: .. contents::
🛠️ Installation and configuration 🛠️ Installing and configuration
================================== ===============================
The Synapse documentation describes `how to install Synapse <https://element-hq.github.io/synapse/latest/setup/installation.html>`_. We recommend using The Synapse documentation describes `how to install Synapse <https://element-hq.github.io/synapse/latest/setup/installation.html>`_. We recommend using
`Docker images <https://element-hq.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org `Docker images <https://element-hq.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org
@@ -133,7 +133,7 @@ connect from a client: see
An easy way to get started is to login or register via Element at An easy way to get started is to login or register via Element at
https://app.element.io/#/login or https://app.element.io/#/register respectively. https://app.element.io/#/login or https://app.element.io/#/register respectively.
You will need to change the server you are logging into from ``matrix.org`` You will need to change the server you are logging into from ``matrix.org``
and instead specify a homeserver URL of ``https://<server_name>:8448`` and instead specify a Homeserver URL of ``https://<server_name>:8448``
(or just ``https://<server_name>`` if you are using a reverse proxy). (or just ``https://<server_name>`` if you are using a reverse proxy).
If you prefer to use another client, refer to our If you prefer to use another client, refer to our
`client breakdown <https://matrix.org/ecosystem/clients/>`_. `client breakdown <https://matrix.org/ecosystem/clients/>`_.
@@ -162,15 +162,16 @@ the public internet. Without it, anyone can freely register accounts on your hom
This can be exploited by attackers to create spambots targeting the rest of the Matrix This can be exploited by attackers to create spambots targeting the rest of the Matrix
federation. federation.
Your new Matrix ID will be formed partly from the ``server_name``, and partly Your new user name will be formed partly from the ``server_name``, and partly
from a localpart you specify when you create the account in the form of:: from a localpart you specify when you create the account. Your name will take
the form of::
@localpart:my.domain.name @localpart:my.domain.name
(pronounced "at localpart on my dot domain dot name"). (pronounced "at localpart on my dot domain dot name").
As when logging in, you will need to specify a "Custom server". Specify your As when logging in, you will need to specify a "Custom server". Specify your
desired ``localpart`` in the 'Username' box. desired ``localpart`` in the 'User name' box.
🎯 Troubleshooting and support 🎯 Troubleshooting and support
============================== ==============================
@@ -208,10 +209,10 @@ Identity servers have the job of mapping email addresses and other 3rd Party
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
before creating that mapping. before creating that mapping.
**Identity servers do not store accounts or credentials - these are stored and managed on homeservers. **They are not where accounts or credentials are stored - these live on home
Identity Servers are just for mapping 3rd Party IDs to Matrix IDs.** servers. Identity Servers are just for mapping 3rd party IDs to matrix IDs.**
This process is highly security-sensitive, as there is an obvious risk of spam if it This process is very security-sensitive, as there is obvious risk of spam if it
is too easy to sign up for Matrix accounts or harvest 3PID data. In the longer is too easy to sign up for Matrix accounts or harvest 3PID data. In the longer
term, we hope to create a decentralised system to manage it (`matrix-doc #712 term, we hope to create a decentralised system to manage it (`matrix-doc #712
<https://github.com/matrix-org/matrix-doc/issues/712>`_), but in the meantime, <https://github.com/matrix-org/matrix-doc/issues/712>`_), but in the meantime,
@@ -237,9 +238,9 @@ email address.
We welcome contributions to Synapse from the community! We welcome contributions to Synapse from the community!
The best place to get started is our The best place to get started is our
`guide for contributors <https://element-hq.github.io/synapse/latest/development/contributing_guide.html>`_. `guide for contributors <https://element-hq.github.io/synapse/latest/development/contributing_guide.html>`_.
This is part of our broader `documentation <https://element-hq.github.io/synapse/latest>`_, which includes This is part of our larger `documentation <https://element-hq.github.io/synapse/latest>`_, which includes
information for Synapse developers as well as Synapse administrators.
information for Synapse developers as well as Synapse administrators.
Developers might be particularly interested in: Developers might be particularly interested in:
* `Synapse's database schema <https://element-hq.github.io/synapse/latest/development/database_schema.html>`_, * `Synapse's database schema <https://element-hq.github.io/synapse/latest/development/database_schema.html>`_,

View File

@@ -19,17 +19,17 @@ def build(setup_kwargs: Dict[str, Any]) -> None:
# This flag is a no-op in the latest versions. Instead, we need to # This flag is a no-op in the latest versions. Instead, we need to
# specify this in the `bdist_wheel` config below. # specify this in the `bdist_wheel` config below.
py_limited_api=True, py_limited_api=True,
# We always build in release mode, as we can't distinguish # We force always building in release mode, as we can't tell the
# between using `poetry` in development vs production. # difference between using `poetry` in development vs production.
debug=False, debug=False,
) )
setup_kwargs.setdefault("rust_extensions", []).append(extension) setup_kwargs.setdefault("rust_extensions", []).append(extension)
setup_kwargs["zip_safe"] = False setup_kwargs["zip_safe"] = False
# We look up the minimum supported Python version with # We lookup the minimum supported python version by looking at
# `python_requires` (e.g. ">=3.9.0,<4.0.0") and finding the first Python # `python_requires` (e.g. ">=3.9.0,<4.0.0") and finding the first python
# version that matches. We then convert that into the `py_limited_api` form, # version that matches. We then convert that into the `py_limited_api` form,
# e.g. cp39 for Python 3.9. # e.g. cp39 for python 3.9.
py_limited_api: str py_limited_api: str
python_bounds = SpecifierSet(setup_kwargs["python_requires"]) python_bounds = SpecifierSet(setup_kwargs["python_requires"])
for minor_version in itertools.count(start=8): for minor_version in itertools.count(start=8):

1
changelog.d/18474.misc Normal file
View File

@@ -0,0 +1 @@
Add debug logging for HMAC digest verification failures when using the admin API to register users.

View File

@@ -0,0 +1 @@
Add configurable rate limiting for the creation of rooms.

View File

@@ -0,0 +1 @@
Add support for [MSC4293](https://github.com/matrix-org/matrix-spec-proposals/pull/4293) - Redact on Kick/Ban.

1
changelog.d/18580.misc Normal file
View File

@@ -0,0 +1 @@
Fix config documentation generation script on Windows by enforcing UTF-8.

1
changelog.d/18656.misc Normal file
View File

@@ -0,0 +1 @@
Refactor `Counter` metrics to be homeserver-scoped.

1
changelog.d/18670.misc Normal file
View File

@@ -0,0 +1 @@
Refactor background process metrics to be homeserver-scoped.

View File

@@ -0,0 +1 @@
Add ability to configure forward/outbound proxy via homeserver config instead of environment variables. See `http_proxy`, `https_proxy`, `no_proxy_hosts`.

1
changelog.d/18696.bugfix Normal file
View File

@@ -0,0 +1 @@
Allow return code 403 (allowed by C2S Spec since v1.2) when fetching profiles via federation.

1
changelog.d/18718.misc Normal file
View File

@@ -0,0 +1 @@
Reduce database usage in Sliding Sync by not querying for background update completion after the update is known to be complete.

1
changelog.d/18726.bugfix Normal file
View File

@@ -0,0 +1 @@
Register the MSC4306 endpoints in the CS API when the experimental feature is enabled.

1
changelog.d/18727.misc Normal file
View File

@@ -0,0 +1 @@
Bump minimum version bound on Twisted to 21.2.0.

View File

@@ -4396,7 +4396,7 @@
"exemplar": false, "exemplar": false,
"expr": "(time() - max without (job, index, host) (avg_over_time(synapse_federation_last_received_pdu_time[10m]))) / 60", "expr": "(time() - max without (job, index, host) (avg_over_time(synapse_federation_last_received_pdu_time[10m]))) / 60",
"instant": false, "instant": false,
"legendFormat": "{{origin_server_name}} ", "legendFormat": "{{server_name}} ",
"range": true, "range": true,
"refId": "A" "refId": "A"
} }
@@ -4518,7 +4518,7 @@
"exemplar": false, "exemplar": false,
"expr": "(time() - max without (job, index, host) (avg_over_time(synapse_federation_last_sent_pdu_time[10m]))) / 60", "expr": "(time() - max without (job, index, host) (avg_over_time(synapse_federation_last_sent_pdu_time[10m]))) / 60",
"instant": false, "instant": false,
"legendFormat": "{{destination_server_name}}", "legendFormat": "{{server_name}}",
"range": true, "range": true,
"refId": "A" "refId": "A"
} }

54
debian/changelog vendored
View File

@@ -1,57 +1,3 @@
matrix-synapse-py3 (1.137.0) stable; urgency=medium
* New Synapse release 1.137.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Aug 2025 10:23:41 +0100
matrix-synapse-py3 (1.137.0~rc1) stable; urgency=medium
* New Synapse release 1.137.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Aug 2025 10:55:22 +0100
matrix-synapse-py3 (1.136.0) stable; urgency=medium
* New Synapse release 1.136.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Aug 2025 13:18:03 +0100
matrix-synapse-py3 (1.136.0~rc2) stable; urgency=medium
* New Synapse release 1.136.0rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 11 Aug 2025 12:18:52 -0600
matrix-synapse-py3 (1.136.0~rc1) stable; urgency=medium
* New Synapse release 1.136.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Aug 2025 08:13:30 -0600
matrix-synapse-py3 (1.135.2) stable; urgency=medium
* New Synapse release 1.135.2.
-- Synapse Packaging team <packages@matrix.org> Mon, 11 Aug 2025 11:52:01 -0600
matrix-synapse-py3 (1.135.1) stable; urgency=medium
* New Synapse release 1.135.1.
-- Synapse Packaging team <packages@matrix.org> Mon, 11 Aug 2025 11:13:15 -0600
matrix-synapse-py3 (1.135.0) stable; urgency=medium
* New Synapse release 1.135.0.
-- Synapse Packaging team <packages@matrix.org> Fri, 01 Aug 2025 13:12:28 +0100
matrix-synapse-py3 (1.135.0~rc2) stable; urgency=medium
* New Synapse release 1.135.0rc2.
-- Synapse Packaging team <packages@matrix.org> Wed, 30 Jul 2025 12:19:14 +0100
matrix-synapse-py3 (1.135.0~rc1) stable; urgency=medium matrix-synapse-py3 (1.135.0~rc1) stable; urgency=medium
* New Synapse release 1.135.0rc1. * New Synapse release 1.135.0rc1.

View File

@@ -178,7 +178,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable)/login$", "^/_matrix/client/(api/v1|r0|v3|unstable)/login$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$", "^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/whoami$", "^/_matrix/client/(api/v1|r0|v3|unstable)/account/whoami$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/deactivate$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/devices(/|$)", "^/_matrix/client/(api/v1|r0|v3|unstable)/devices(/|$)",
"^/_matrix/client/(r0|v3)/delete_devices$", "^/_matrix/client/(r0|v3)/delete_devices$",
"^/_matrix/client/versions$", "^/_matrix/client/versions$",

View File

@@ -22,46 +22,4 @@ To receive soft failed events in APIs like `/sync` and `/messages`, set `return_
to `true` in the admin client config. When `false`, the normal behaviour of these endpoints is to to `true` in the admin client config. When `false`, the normal behaviour of these endpoints is to
exclude soft failed events. exclude soft failed events.
**Note**: If the policy server flagged the event as spam and that caused soft failure, that will be indicated
in the event's `unsigned` content like so:
```json
{
"type": "m.room.message",
"other": "event_fields_go_here",
"unsigned": {
"io.element.synapse.soft_failed": true,
"io.element.synapse.policy_server_spammy": true
}
}
```
Default: `false` Default: `false`
## See events marked spammy by policy servers
Learn more about policy servers from [MSC4284](https://github.com/matrix-org/matrix-spec-proposals/pull/4284).
Similar to `return_soft_failed_events`, clients logged in with admin accounts can see events which were
flagged by the policy server as spammy (and thus soft failed) by setting `return_policy_server_spammy_events`
to `true`.
`return_policy_server_spammy_events` may be `true` while `return_soft_failed_events` is `false` to only see
policy server-flagged events. When `return_soft_failed_events` is `true` however, `return_policy_server_spammy_events`
is always `true`.
Events which were flagged by the policy will be flagged as `io.element.synapse.policy_server_spammy` in the
event's `unsigned` content, like so:
```json
{
"type": "m.room.message",
"other": "event_fields_go_here",
"unsigned": {
"io.element.synapse.soft_failed": true,
"io.element.synapse.policy_server_spammy": true
}
}
```
Default: `true` if `return_soft_failed_events` is `true`, otherwise `false`

View File

@@ -88,8 +88,7 @@ This will install and start a systemd service called `coturn`.
denied-peer-ip=172.16.0.0-172.31.255.255 denied-peer-ip=172.16.0.0-172.31.255.255
# recommended additional local peers to block, to mitigate external access to internal services. # recommended additional local peers to block, to mitigate external access to internal services.
# https://www.enablesecurity.com/blog/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability # https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
# https://www.enablesecurity.com/blog/cve-2020-26262-bypass-of-coturns-access-control-protection/#further-concerns-what-else
no-multicast-peers no-multicast-peers
denied-peer-ip=0.0.0.0-0.255.255.255 denied-peer-ip=0.0.0.0-0.255.255.255
denied-peer-ip=100.64.0.0-100.127.255.255 denied-peer-ip=100.64.0.0-100.127.255.255
@@ -102,14 +101,6 @@ This will install and start a systemd service called `coturn`.
denied-peer-ip=198.51.100.0-198.51.100.255 denied-peer-ip=198.51.100.0-198.51.100.255
denied-peer-ip=203.0.113.0-203.0.113.255 denied-peer-ip=203.0.113.0-203.0.113.255
denied-peer-ip=240.0.0.0-255.255.255.255 denied-peer-ip=240.0.0.0-255.255.255.255
denied-peer-ip=::1
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
# special case the turn server itself so that client->TURN->TURN->client flows work # special case the turn server itself so that client->TURN->TURN->client flows work
# this should be one of the turn server's listening IPs # this should be one of the turn server's listening IPs

View File

@@ -117,77 +117,6 @@ each upgrade are complete before moving on to the next upgrade, to avoid
stacking them up. You can monitor the currently running background updates with stacking them up. You can monitor the currently running background updates with
[the Admin API](usage/administration/admin_api/background_updates.html#status). [the Admin API](usage/administration/admin_api/background_updates.html#status).
# Upgrading to v1.136.0
## Deprecate `run_as_background_process` exported as part of the module API interface in favor of `ModuleApi.run_as_background_process`
The `run_as_background_process` function is now a method of the `ModuleApi` class. If
you were using the function directly from the module API, it will continue to work fine
but the background process metrics will not include an accurate `server_name` label.
This kind of metric labeling isn't relevant for many use cases and is used to
differentiate Synapse instances running in the same Python process (relevant to Synapse
Pro: Small Hosts). We recommend updating your usage to use the new
`ModuleApi.run_as_background_process` method to stay on top of future changes.
<details>
<summary>Example <code>run_as_background_process</code> upgrade</summary>
Before:
```python
class MyModule:
def __init__(self, module_api: ModuleApi) -> None:
run_as_background_process(__name__ + ":setup_database", self.setup_database)
```
After:
```python
class MyModule:
def __init__(self, module_api: ModuleApi) -> None:
module_api.run_as_background_process(__name__ + ":setup_database", self.setup_database)
```
</details>
## Metric labels have changed on `synapse_federation_last_received_pdu_time` and `synapse_federation_last_sent_pdu_time`
Previously, the `synapse_federation_last_received_pdu_time` and
`synapse_federation_last_sent_pdu_time` metrics both used the `server_name` label to
differentiate between different servers that we send and receive events from.
Since we're now using the `server_name` label to differentiate between different Synapse
homeserver instances running in the same process, these metrics have been changed as follows:
- `synapse_federation_last_received_pdu_time` now uses the `origin_server_name` label
- `synapse_federation_last_sent_pdu_time` now uses the `destination_server_name` label
The Grafana dashboard JSON in `contrib/grafana/synapse.json` has been updated to reflect
this change but you will need to manually update your own existing Grafana dashboards
using these metrics.
## Stable integration with Matrix Authentication Service
Support for [Matrix Authentication Service (MAS)](https://github.com/element-hq/matrix-authentication-service) is now stable, with a simplified configuration.
This stable integration requires MAS 0.20.0 or later.
The existing `experimental_features.msc3861` configuration option is now deprecated and will be removed in Synapse v1.137.0.
Synapse deployments already using MAS should now use the new configuration options:
```yaml
matrix_authentication_service:
# Enable the MAS integration
enabled: true
# The base URL where Synapse will contact MAS
endpoint: http://localhost:8080
# The shared secret used to authenticate MAS requests, must be the same as `matrix.secret` in the MAS configuration
# See https://element-hq.github.io/matrix-authentication-service/reference/configuration.html#matrix
secret: "asecurerandomsecretstring"
```
They must remove the `experimental_features.msc3861` configuration option from their configuration.
They can also remove the client previously used by Synapse [in the MAS configuration](https://element-hq.github.io/matrix-authentication-service/reference/configuration.html#clients) as it is no longer in use.
# Upgrading to v1.135.0 # Upgrading to v1.135.0
## `on_user_registration` module API callback may now run on any worker ## `on_user_registration` module API callback may now run on any worker

View File

@@ -643,28 +643,6 @@ no_proxy_hosts:
- 172.30.0.0/16 - 172.30.0.0/16
``` ```
--- ---
### `matrix_authentication_service`
*(object)* The `matrix_authentication_service` setting configures integration with [Matrix Authentication Service (MAS)](https://github.com/element-hq/matrix-authentication-service).
This setting has the following sub-options:
* `enabled` (boolean): Whether or not to enable the MAS integration. If this is set to `false`, Synapse will use its legacy internal authentication API. Defaults to `false`.
* `endpoint` (string): The URL where Synapse can reach MAS. This *must* have the `discovery` and `oauth` resources mounted. Defaults to `"http://localhost:8080"`.
* `secret` (string|null): A shared secret that will be used to authenticate requests from and to MAS.
* `secret_path` (string|null): Alternative to `secret`, reading the shared secret from a file. The file should be a plain text file, containing only the secret. Synapse reads the secret from the given file once at startup.
Example configuration:
```yaml
matrix_authentication_service:
enabled: true
secret: someverysecuresecret
endpoint: http://localhost:8080
```
---
### `dummy_events_threshold` ### `dummy_events_threshold`
*(integer)* Forward extremities can build up in a room due to networking delays between homeservers. Once this happens in a large room, calculation of the state of that room can become quite expensive. To mitigate this, once the number of forward extremities reaches a given threshold, Synapse will send an `org.matrix.dummy_event` event, which will reduce the forward extremities in the room. *(integer)* Forward extremities can build up in a room due to networking delays between homeservers. Once this happens in a large room, calculation of the state of that room can become quite expensive. To mitigate this, once the number of forward extremities reaches a given threshold, Synapse will send an `org.matrix.dummy_event` event, which will reduce the forward extremities in the room.
@@ -4174,7 +4152,7 @@ The default power levels for each preset are:
"m.room.history_visibility": 100 "m.room.history_visibility": 100
"m.room.canonical_alias": 50 "m.room.canonical_alias": 50
"m.room.avatar": 50 "m.room.avatar": 50
"m.room.tombstone": 100 (150 if MSC4289 is used) "m.room.tombstone": 100
"m.room.server_acl": 100 "m.room.server_acl": 100
"m.room.encryption": 100 "m.room.encryption": 100
``` ```

View File

@@ -238,7 +238,6 @@ information.
^/_matrix/client/unstable/im.nheko.summary/summary/.*$ ^/_matrix/client/unstable/im.nheko.summary/summary/.*$
^/_matrix/client/(r0|v3|unstable)/account/3pid$ ^/_matrix/client/(r0|v3|unstable)/account/3pid$
^/_matrix/client/(r0|v3|unstable)/account/whoami$ ^/_matrix/client/(r0|v3|unstable)/account/whoami$
^/_matrix/client/(r0|v3|unstable)/account/deactivate$
^/_matrix/client/(r0|v3)/delete_devices$ ^/_matrix/client/(r0|v3)/delete_devices$
^/_matrix/client/(api/v1|r0|v3|unstable)/devices(/|$) ^/_matrix/client/(api/v1|r0|v3|unstable)/devices(/|$)
^/_matrix/client/versions$ ^/_matrix/client/versions$
@@ -260,7 +259,7 @@ information.
^/_matrix/client/(r0|v3|unstable)/keys/claim$ ^/_matrix/client/(r0|v3|unstable)/keys/claim$
^/_matrix/client/(r0|v3|unstable)/room_keys/ ^/_matrix/client/(r0|v3|unstable)/room_keys/
^/_matrix/client/(r0|v3|unstable)/keys/upload ^/_matrix/client/(r0|v3|unstable)/keys/upload
^/_matrix/client/(api/v1|r0|v3|unstable)/keys/device_signing/upload$ ^/_matrix/client/(api/v1|r0|v3|unstable/keys/device_signing/upload$
^/_matrix/client/(api/v1|r0|v3|unstable)/keys/signatures/upload$ ^/_matrix/client/(api/v1|r0|v3|unstable)/keys/signatures/upload$
# Registration/login requests # Registration/login requests
@@ -532,9 +531,8 @@ the stream writer for the `account_data` stream:
##### The `receipts` stream ##### The `receipts` stream
The `receipts` stream supports multiple writers. The following endpoints The following endpoints should be routed directly to the worker configured as
can be handled by any worker, but should be routed directly to one of the workers the stream writer for the `receipts` stream:
configured as stream writer for the `receipts` stream:
^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt ^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt
^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers ^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers
@@ -556,13 +554,13 @@ the stream writer for the `push_rules` stream:
##### The `device_lists` stream ##### The `device_lists` stream
The `device_lists` stream supports multiple writers. The following endpoints The `device_lists` stream supports multiple writers. The following endpoints
can be handled by any worker, but should be routed directly to one of the workers can be handled by any worker, but should be routed directly one of the workers
configured as stream writer for the `device_lists` stream: configured as stream writer for the `device_lists` stream:
^/_matrix/client/(r0|v3)/delete_devices$ ^/_matrix/client/(r0|v3)/delete_devices$
^/_matrix/client/(api/v1|r0|v3|unstable)/devices(/|$) ^/_matrix/client/(api/v1|r0|v3|unstable)/devices/
^/_matrix/client/(r0|v3|unstable)/keys/upload ^/_matrix/client/(r0|v3|unstable)/keys/upload
^/_matrix/client/(api/v1|r0|v3|unstable)/keys/device_signing/upload$ ^/_matrix/client/(api/v1|r0|v3|unstable/keys/device_signing/upload$
^/_matrix/client/(api/v1|r0|v3|unstable)/keys/signatures/upload$ ^/_matrix/client/(api/v1|r0|v3|unstable)/keys/signatures/upload$
#### Restrict outbound federation traffic to a specific set of workers #### Restrict outbound federation traffic to a specific set of workers

View File

@@ -1,17 +1,6 @@
[mypy] [mypy]
namespace_packages = True namespace_packages = True
# Our custom mypy plugin should remain first in this list. plugins = pydantic.mypy, mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py
#
# mypy has a limitation where it only chooses the first plugin that returns a non-None
# value for each hook (known-limitation, c.f.
# https://github.com/python/mypy/issues/19524). We workaround this by putting our custom
# plugin first in the plugin order and then manually calling any other conflicting
# plugin hooks in our own plugin followed by our own checks.
#
# If you add a new plugin, make sure to check whether the hooks being used conflict with
# our custom plugin hooks and if so, manually call the other plugin's hooks in our
# custom plugin. (also applies to if the plugin is updated in the future)
plugins = scripts-dev/mypy_synapse_plugin.py, pydantic.mypy, mypy_zope:plugin
follow_imports = normal follow_imports = normal
show_error_codes = True show_error_codes = True
show_traceback = True show_traceback = True
@@ -110,6 +99,3 @@ ignore_missing_imports = True
[mypy-multipart.*] [mypy-multipart.*]
ignore_missing_imports = True ignore_missing_imports = True
[mypy-mypy_zope.*]
ignore_missing_imports = True

256
poetry.lock generated
View File

@@ -441,6 +441,24 @@ files = [
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
] ]
[[package]]
name = "deprecated"
version = "1.2.13"
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
groups = ["dev"]
files = [
{file = "Deprecated-1.2.13-py2.py3-none-any.whl", hash = "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"},
{file = "Deprecated-1.2.13.tar.gz", hash = "sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d"},
]
[package.dependencies]
wrapt = ">=1.10,<2"
[package.extras]
dev = ["PyTest (<5) ; python_version < \"3.6\"", "PyTest ; python_version >= \"3.6\"", "PyTest-Cov (<2.6) ; python_version < \"3.6\"", "PyTest-Cov ; python_version >= \"3.6\"", "bump2version (<1)", "configparser (<5) ; python_version < \"3\"", "importlib-metadata (<3) ; python_version < \"3\"", "importlib-resources (<4) ; python_version < \"3\"", "sphinx (<2)", "sphinxcontrib-websupport (<2) ; python_version < \"3\"", "tox", "zipp (<2) ; python_version < \"3\""]
[[package]] [[package]]
name = "docutils" name = "docutils"
version = "0.19" version = "0.19"
@@ -1369,50 +1387,44 @@ docs = ["sphinx (>=8,<9)", "sphinx-autobuild"]
[[package]] [[package]]
name = "mypy" name = "mypy"
version = "1.17.1" version = "1.16.1"
description = "Optional static typing for Python" description = "Optional static typing for Python"
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
groups = ["dev"] groups = ["dev"]
files = [ files = [
{file = "mypy-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3fbe6d5555bf608c47203baa3e72dbc6ec9965b3d7c318aa9a4ca76f465bd972"}, {file = "mypy-1.16.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b4f0fed1022a63c6fec38f28b7fc77fca47fd490445c69d0a66266c59dd0b88a"},
{file = "mypy-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80ef5c058b7bce08c83cac668158cb7edea692e458d21098c7d3bce35a5d43e7"}, {file = "mypy-1.16.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:86042bbf9f5a05ea000d3203cf87aa9d0ccf9a01f73f71c58979eb9249f46d72"},
{file = "mypy-1.17.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4a580f8a70c69e4a75587bd925d298434057fe2a428faaf927ffe6e4b9a98df"}, {file = "mypy-1.16.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ea7469ee5902c95542bea7ee545f7006508c65c8c54b06dc2c92676ce526f3ea"},
{file = "mypy-1.17.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dd86bb649299f09d987a2eebb4d52d10603224500792e1bee18303bbcc1ce390"}, {file = "mypy-1.16.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:352025753ef6a83cb9e7f2427319bb7875d1fdda8439d1e23de12ab164179574"},
{file = "mypy-1.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a76906f26bd8d51ea9504966a9c25419f2e668f012e0bdf3da4ea1526c534d94"}, {file = "mypy-1.16.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ff9fa5b16e4c1364eb89a4d16bcda9987f05d39604e1e6c35378a2987c1aac2d"},
{file = "mypy-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:e79311f2d904ccb59787477b7bd5d26f3347789c06fcd7656fa500875290264b"}, {file = "mypy-1.16.1-cp310-cp310-win_amd64.whl", hash = "sha256:1256688e284632382f8f3b9e2123df7d279f603c561f099758e66dd6ed4e8bd6"},
{file = "mypy-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad37544be07c5d7fba814eb370e006df58fed8ad1ef33ed1649cb1889ba6ff58"}, {file = "mypy-1.16.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:472e4e4c100062488ec643f6162dd0d5208e33e2f34544e1fc931372e806c0cc"},
{file = "mypy-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:064e2ff508e5464b4bd807a7c1625bc5047c5022b85c70f030680e18f37273a5"}, {file = "mypy-1.16.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea16e2a7d2714277e349e24d19a782a663a34ed60864006e8585db08f8ad1782"},
{file = "mypy-1.17.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70401bbabd2fa1aa7c43bb358f54037baf0586f41e83b0ae67dd0534fc64edfd"}, {file = "mypy-1.16.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08e850ea22adc4d8a4014651575567b0318ede51e8e9fe7a68f25391af699507"},
{file = "mypy-1.17.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e92bdc656b7757c438660f775f872a669b8ff374edc4d18277d86b63edba6b8b"}, {file = "mypy-1.16.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22d76a63a42619bfb90122889b903519149879ddbf2ba4251834727944c8baca"},
{file = "mypy-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c1fdf4abb29ed1cb091cf432979e162c208a5ac676ce35010373ff29247bcad5"}, {file = "mypy-1.16.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2c7ce0662b6b9dc8f4ed86eb7a5d505ee3298c04b40ec13b30e572c0e5ae17c4"},
{file = "mypy-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:ff2933428516ab63f961644bc49bc4cbe42bbffb2cd3b71cc7277c07d16b1a8b"}, {file = "mypy-1.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:211287e98e05352a2e1d4e8759c5490925a7c784ddc84207f4714822f8cf99b6"},
{file = "mypy-1.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:69e83ea6553a3ba79c08c6e15dbd9bfa912ec1e493bf75489ef93beb65209aeb"}, {file = "mypy-1.16.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:af4792433f09575d9eeca5c63d7d90ca4aeceda9d8355e136f80f8967639183d"},
{file = "mypy-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b16708a66d38abb1e6b5702f5c2c87e133289da36f6a1d15f6a5221085c6403"}, {file = "mypy-1.16.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:66df38405fd8466ce3517eda1f6640611a0b8e70895e2a9462d1d4323c5eb4b9"},
{file = "mypy-1.17.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:89e972c0035e9e05823907ad5398c5a73b9f47a002b22359b177d40bdaee7056"}, {file = "mypy-1.16.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44e7acddb3c48bd2713994d098729494117803616e116032af192871aed80b79"},
{file = "mypy-1.17.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:03b6d0ed2b188e35ee6d5c36b5580cffd6da23319991c49ab5556c023ccf1341"}, {file = "mypy-1.16.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0ab5eca37b50188163fa7c1b73c685ac66c4e9bdee4a85c9adac0e91d8895e15"},
{file = "mypy-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c837b896b37cd103570d776bda106eabb8737aa6dd4f248451aecf53030cdbeb"}, {file = "mypy-1.16.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb6229b2c9086247e21a83c309754b9058b438704ad2f6807f0d8227f6ebdd"},
{file = "mypy-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:665afab0963a4b39dff7c1fa563cc8b11ecff7910206db4b2e64dd1ba25aed19"}, {file = "mypy-1.16.1-cp312-cp312-win_amd64.whl", hash = "sha256:1f0435cf920e287ff68af3d10a118a73f212deb2ce087619eb4e648116d1fe9b"},
{file = "mypy-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93378d3203a5c0800c6b6d850ad2f19f7a3cdf1a3701d3416dbf128805c6a6a7"}, {file = "mypy-1.16.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ddc91eb318c8751c69ddb200a5937f1232ee8efb4e64e9f4bc475a33719de438"},
{file = "mypy-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15d54056f7fe7a826d897789f53dd6377ec2ea8ba6f776dc83c2902b899fee81"}, {file = "mypy-1.16.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:87ff2c13d58bdc4bbe7dc0dedfe622c0f04e2cb2a492269f3b418df2de05c536"},
{file = "mypy-1.17.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:209a58fed9987eccc20f2ca94afe7257a8f46eb5df1fb69958650973230f91e6"}, {file = "mypy-1.16.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a7cfb0fe29fe5a9841b7c8ee6dffb52382c45acdf68f032145b75620acfbd6f"},
{file = "mypy-1.17.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:099b9a5da47de9e2cb5165e581f158e854d9e19d2e96b6698c0d64de911dd849"}, {file = "mypy-1.16.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:051e1677689c9d9578b9c7f4d206d763f9bbd95723cd1416fad50db49d52f359"},
{file = "mypy-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ffadfbe6994d724c5a1bb6123a7d27dd68fc9c059561cd33b664a79578e14"}, {file = "mypy-1.16.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d5d2309511cc56c021b4b4e462907c2b12f669b2dbeb68300110ec27723971be"},
{file = "mypy-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:9a2b7d9180aed171f033c9f2fc6c204c1245cf60b0cb61cf2e7acc24eea78e0a"}, {file = "mypy-1.16.1-cp313-cp313-win_amd64.whl", hash = "sha256:4f58ac32771341e38a853c5d0ec0dfe27e18e27da9cdb8bbc882d2249c71a3ee"},
{file = "mypy-1.17.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:15a83369400454c41ed3a118e0cc58bd8123921a602f385cb6d6ea5df050c733"}, {file = "mypy-1.16.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7fc688329af6a287567f45cc1cefb9db662defeb14625213a5b7da6e692e2069"},
{file = "mypy-1.17.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55b918670f692fc9fba55c3298d8a3beae295c5cded0a55dccdc5bbead814acd"}, {file = "mypy-1.16.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e198ab3f55924c03ead626ff424cad1732d0d391478dfbf7bb97b34602395da"},
{file = "mypy-1.17.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:62761474061feef6f720149d7ba876122007ddc64adff5ba6f374fda35a018a0"}, {file = "mypy-1.16.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09aa4f91ada245f0a45dbc47e548fd94e0dd5a8433e0114917dc3b526912a30c"},
{file = "mypy-1.17.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c49562d3d908fd49ed0938e5423daed8d407774a479b595b143a3d7f87cdae6a"}, {file = "mypy-1.16.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13c7cd5b1cb2909aa318a90fd1b7e31f17c50b242953e7dd58345b2a814f6383"},
{file = "mypy-1.17.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:397fba5d7616a5bc60b45c7ed204717eaddc38f826e3645402c426057ead9a91"}, {file = "mypy-1.16.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:58e07fb958bc5d752a280da0e890c538f1515b79a65757bbdc54252ba82e0b40"},
{file = "mypy-1.17.1-cp314-cp314-win_amd64.whl", hash = "sha256:9d6b20b97d373f41617bd0708fd46aa656059af57f2ef72aa8c7d6a2b73b74ed"}, {file = "mypy-1.16.1-cp39-cp39-win_amd64.whl", hash = "sha256:f895078594d918f93337a505f8add9bd654d1a24962b4c6ed9390e12531eb31b"},
{file = "mypy-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5d1092694f166a7e56c805caaf794e0585cabdbf1df36911c414e4e9abb62ae9"}, {file = "mypy-1.16.1-py3-none-any.whl", hash = "sha256:5fc2ac4027d0ef28d6ba69a0343737a23c4d1b83672bf38d1fe237bdc0643b37"},
{file = "mypy-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79d44f9bfb004941ebb0abe8eff6504223a9c1ac51ef967d1263c6572bbebc99"}, {file = "mypy-1.16.1.tar.gz", hash = "sha256:6bd00a0a2094841c5e47e7374bb42b83d64c527a502e3334e1173a0c24437bab"},
{file = "mypy-1.17.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b01586eed696ec905e61bd2568f48740f7ac4a45b3a468e6423a03d3788a51a8"},
{file = "mypy-1.17.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43808d9476c36b927fbcd0b0255ce75efe1b68a080154a38ae68a7e62de8f0f8"},
{file = "mypy-1.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:feb8cc32d319edd5859da2cc084493b3e2ce5e49a946377663cc90f6c15fb259"},
{file = "mypy-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d7598cf74c3e16539d4e2f0b8d8c318e00041553d83d4861f87c7a72e95ac24d"},
{file = "mypy-1.17.1-py3-none-any.whl", hash = "sha256:a9f52c0351c21fe24c21d8c0eb1f62967b262d6729393397b6f443c3b773c3b9"},
{file = "mypy-1.17.1.tar.gz", hash = "sha256:25e01ec741ab5bb3eec8ba9cdb0f769230368a22c959c4937360efb89b7e9f01"},
] ]
[package.dependencies] [package.dependencies]
@@ -1442,18 +1454,18 @@ files = [
[[package]] [[package]]
name = "mypy-zope" name = "mypy-zope"
version = "1.0.13" version = "1.0.12"
description = "Plugin for mypy to support zope interfaces" description = "Plugin for mypy to support zope interfaces"
optional = false optional = false
python-versions = "*" python-versions = "*"
groups = ["dev"] groups = ["dev"]
files = [ files = [
{file = "mypy_zope-1.0.13-py3-none-any.whl", hash = "sha256:13740c4cbc910cca2c143c6709e1c483c991abeeeb7b629ad6f73d8ac1edad15"}, {file = "mypy_zope-1.0.12-py3-none-any.whl", hash = "sha256:f2ecf169f886fbc266e9339db0c2f3818528a7536b9bb4f5ece1d5854dc2f27c"},
{file = "mypy_zope-1.0.13.tar.gz", hash = "sha256:63fb4d035ea874baf280dc69e714dcde4bd2a4a4837a0fd8d90ce91bea510f99"}, {file = "mypy_zope-1.0.12.tar.gz", hash = "sha256:d6f8f99eb5644885553b4ec7afc8d68f5daf412c9bf238ec3c36b65d97df6cbe"},
] ]
[package.dependencies] [package.dependencies]
mypy = ">=1.0.0,<1.18.0" mypy = ">=1.0.0,<1.17.0"
"zope.interface" = "*" "zope.interface" = "*"
"zope.schema" = "*" "zope.schema" = "*"
@@ -1531,14 +1543,14 @@ files = [
[[package]] [[package]]
name = "phonenumbers" name = "phonenumbers"
version = "9.0.11" version = "9.0.9"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
optional = false optional = false
python-versions = "*" python-versions = "*"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "phonenumbers-9.0.11-py2.py3-none-any.whl", hash = "sha256:a8ebb2136f1f14dfdbadb98be01cb71b96f880dea011eb5e0921967fe3a23abf"}, {file = "phonenumbers-9.0.9-py2.py3-none-any.whl", hash = "sha256:13b91aa153f87675902829b38a556bad54824f9c121b89588bbb5fa8550d97ef"},
{file = "phonenumbers-9.0.11.tar.gz", hash = "sha256:6573858dcf0a7a2753a071375e154d9fc11791546c699b575af95d2ba7d84a1d"}, {file = "phonenumbers-9.0.9.tar.gz", hash = "sha256:c640545019a07e68b0bea57a5fede6eef45c7391165d28935f45615f9a567a5b"},
] ]
[[package]] [[package]]
@@ -1908,21 +1920,22 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]] [[package]]
name = "pygithub" name = "pygithub"
version = "2.7.0" version = "2.6.1"
description = "Use the full Github API v3" description = "Use the full Github API v3"
optional = false optional = false
python-versions = ">=3.8" python-versions = ">=3.8"
groups = ["dev"] groups = ["dev"]
files = [ files = [
{file = "pygithub-2.7.0-py3-none-any.whl", hash = "sha256:40ecbfe26dc55cc34ab4b0ffa1d455e6f816ef9a2bc8d6f5ad18ce572f163700"}, {file = "PyGithub-2.6.1-py3-none-any.whl", hash = "sha256:6f2fa6d076ccae475f9fc392cc6cdbd54db985d4f69b8833a28397de75ed6ca3"},
{file = "pygithub-2.7.0.tar.gz", hash = "sha256:7cd6eafabb09b5369afba3586d86b1f1ad6f1326d2ff01bc47bb26615dce4cbb"}, {file = "pygithub-2.6.1.tar.gz", hash = "sha256:b5c035392991cca63959e9453286b41b54d83bf2de2daa7d7ff7e4312cebf3bf"},
] ]
[package.dependencies] [package.dependencies]
Deprecated = "*"
pyjwt = {version = ">=2.4.0", extras = ["crypto"]} pyjwt = {version = ">=2.4.0", extras = ["crypto"]}
pynacl = ">=1.4.0" pynacl = ">=1.4.0"
requests = ">=2.14.0" requests = ">=2.14.0"
typing-extensions = ">=4.5.0" typing-extensions = ">=4.0.0"
urllib3 = ">=1.26.0" urllib3 = ">=1.26.0"
[[package]] [[package]]
@@ -2396,30 +2409,30 @@ files = [
[[package]] [[package]]
name = "ruff" name = "ruff"
version = "0.12.7" version = "0.12.4"
description = "An extremely fast Python linter and code formatter, written in Rust." description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false optional = false
python-versions = ">=3.7" python-versions = ">=3.7"
groups = ["dev"] groups = ["dev"]
files = [ files = [
{file = "ruff-0.12.7-py3-none-linux_armv6l.whl", hash = "sha256:76e4f31529899b8c434c3c1dede98c4483b89590e15fb49f2d46183801565303"}, {file = "ruff-0.12.4-py3-none-linux_armv6l.whl", hash = "sha256:cb0d261dac457ab939aeb247e804125a5d521b21adf27e721895b0d3f83a0d0a"},
{file = "ruff-0.12.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:789b7a03e72507c54fb3ba6209e4bb36517b90f1a3569ea17084e3fd295500fb"}, {file = "ruff-0.12.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:55c0f4ca9769408d9b9bac530c30d3e66490bd2beb2d3dae3e4128a1f05c7442"},
{file = "ruff-0.12.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2e1c2a3b8626339bb6369116e7030a4cf194ea48f49b64bb505732a7fce4f4e3"}, {file = "ruff-0.12.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:a8224cc3722c9ad9044da7f89c4c1ec452aef2cfe3904365025dd2f51daeae0e"},
{file = "ruff-0.12.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32dec41817623d388e645612ec70d5757a6d9c035f3744a52c7b195a57e03860"}, {file = "ruff-0.12.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9949d01d64fa3672449a51ddb5d7548b33e130240ad418884ee6efa7a229586"},
{file = "ruff-0.12.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47ef751f722053a5df5fa48d412dbb54d41ab9b17875c6840a58ec63ff0c247c"}, {file = "ruff-0.12.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:be0593c69df9ad1465e8a2d10e3defd111fdb62dcd5be23ae2c06da77e8fcffb"},
{file = "ruff-0.12.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a828a5fc25a3efd3e1ff7b241fd392686c9386f20e5ac90aa9234a5faa12c423"}, {file = "ruff-0.12.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7dea966bcb55d4ecc4cc3270bccb6f87a337326c9dcd3c07d5b97000dbff41c"},
{file = "ruff-0.12.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5726f59b171111fa6a69d82aef48f00b56598b03a22f0f4170664ff4d8298efb"}, {file = "ruff-0.12.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:afcfa3ab5ab5dd0e1c39bf286d829e042a15e966b3726eea79528e2e24d8371a"},
{file = "ruff-0.12.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74e6f5c04c4dd4aba223f4fe6e7104f79e0eebf7d307e4f9b18c18362124bccd"}, {file = "ruff-0.12.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c057ce464b1413c926cdb203a0f858cd52f3e73dcb3270a3318d1630f6395bb3"},
{file = "ruff-0.12.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d0bfe4e77fba61bf2ccadf8cf005d6133e3ce08793bbe870dd1c734f2699a3e"}, {file = "ruff-0.12.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e64b90d1122dc2713330350626b10d60818930819623abbb56535c6466cce045"},
{file = "ruff-0.12.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06bfb01e1623bf7f59ea749a841da56f8f653d641bfd046edee32ede7ff6c606"}, {file = "ruff-0.12.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2abc48f3d9667fdc74022380b5c745873499ff827393a636f7a59da1515e7c57"},
{file = "ruff-0.12.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e41df94a957d50083fd09b916d6e89e497246698c3f3d5c681c8b3e7b9bb4ac8"}, {file = "ruff-0.12.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2b2449dc0c138d877d629bea151bee8c0ae3b8e9c43f5fcaafcd0c0d0726b184"},
{file = "ruff-0.12.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:4000623300563c709458d0ce170c3d0d788c23a058912f28bbadc6f905d67afa"}, {file = "ruff-0.12.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:56e45bb11f625db55f9b70477062e6a1a04d53628eda7784dce6e0f55fd549eb"},
{file = "ruff-0.12.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:69ffe0e5f9b2cf2b8e289a3f8945b402a1b19eff24ec389f45f23c42a3dd6fb5"}, {file = "ruff-0.12.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:478fccdb82ca148a98a9ff43658944f7ab5ec41c3c49d77cd99d44da019371a1"},
{file = "ruff-0.12.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a07a5c8ffa2611a52732bdc67bf88e243abd84fe2d7f6daef3826b59abbfeda4"}, {file = "ruff-0.12.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:0fc426bec2e4e5f4c4f182b9d2ce6a75c85ba9bcdbe5c6f2a74fcb8df437df4b"},
{file = "ruff-0.12.7-py3-none-win32.whl", hash = "sha256:c928f1b2ec59fb77dfdf70e0419408898b63998789cc98197e15f560b9e77f77"}, {file = "ruff-0.12.4-py3-none-win32.whl", hash = "sha256:4de27977827893cdfb1211d42d84bc180fceb7b72471104671c59be37041cf93"},
{file = "ruff-0.12.7-py3-none-win_amd64.whl", hash = "sha256:9c18f3d707ee9edf89da76131956aba1270c6348bfee8f6c647de841eac7194f"}, {file = "ruff-0.12.4-py3-none-win_amd64.whl", hash = "sha256:fe0b9e9eb23736b453143d72d2ceca5db323963330d5b7859d60d101147d461a"},
{file = "ruff-0.12.7-py3-none-win_arm64.whl", hash = "sha256:dfce05101dbd11833a0776716d5d1578641b7fddb537fe7fa956ab85d1769b69"}, {file = "ruff-0.12.4-py3-none-win_arm64.whl", hash = "sha256:0618ec4442a83ab545e5b71202a5c0ed7791e8471435b94e655b570a5031a98e"},
{file = "ruff-0.12.7.tar.gz", hash = "sha256:1fc3193f238bc2d7968772c82831a4ff69252f673be371fb49663f0068b7ec71"}, {file = "ruff-0.12.4.tar.gz", hash = "sha256:13efa16df6c6eeb7d0f091abae50f58e9522f3843edb40d56ad52a5a4a4b6873"},
] ]
[[package]] [[package]]
@@ -2457,15 +2470,15 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]] [[package]]
name = "sentry-sdk" name = "sentry-sdk"
version = "2.34.1" version = "2.32.0"
description = "Python client for Sentry (https://sentry.io)" description = "Python client for Sentry (https://sentry.io)"
optional = true optional = true
python-versions = ">=3.6" python-versions = ">=3.6"
groups = ["main"] groups = ["main"]
markers = "extra == \"all\" or extra == \"sentry\"" markers = "extra == \"all\" or extra == \"sentry\""
files = [ files = [
{file = "sentry_sdk-2.34.1-py2.py3-none-any.whl", hash = "sha256:b7a072e1cdc5abc48101d5146e1ae680fa81fe886d8d95aaa25a0b450c818d32"}, {file = "sentry_sdk-2.32.0-py2.py3-none-any.whl", hash = "sha256:6cf51521b099562d7ce3606da928c473643abe99b00ce4cb5626ea735f4ec345"},
{file = "sentry_sdk-2.34.1.tar.gz", hash = "sha256:69274eb8c5c38562a544c3e9f68b5be0a43be4b697f5fd385bf98e4fbe672687"}, {file = "sentry_sdk-2.32.0.tar.gz", hash = "sha256:9016c75d9316b0f6921ac14c8cd4fb938f26002430ac5be9945ab280f78bec6b"},
] ]
[package.dependencies] [package.dependencies]
@@ -2998,14 +3011,14 @@ types-cffi = "*"
[[package]] [[package]]
name = "types-pyyaml" name = "types-pyyaml"
version = "6.0.12.20250809" version = "6.0.12.20250516"
description = "Typing stubs for PyYAML" description = "Typing stubs for PyYAML"
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
groups = ["dev"] groups = ["dev"]
files = [ files = [
{file = "types_pyyaml-6.0.12.20250809-py3-none-any.whl", hash = "sha256:032b6003b798e7de1a1ddfeefee32fac6486bdfe4845e0ae0e7fb3ee4512b52f"}, {file = "types_pyyaml-6.0.12.20250516-py3-none-any.whl", hash = "sha256:8478208feaeb53a34cb5d970c56a7cd76b72659442e733e268a94dc72b2d0530"},
{file = "types_pyyaml-6.0.12.20250809.tar.gz", hash = "sha256:af4a1aca028f18e75297da2ee0da465f799627370d74073e96fee876524f61b5"}, {file = "types_pyyaml-6.0.12.20250516.tar.gz", hash = "sha256:9f21a70216fc0fa1b216a8176db5f9e0af6eb35d2f2932acb87689d03a5bf6ba"},
] ]
[[package]] [[package]]
@@ -3025,14 +3038,14 @@ urllib3 = ">=2"
[[package]] [[package]]
name = "types-setuptools" name = "types-setuptools"
version = "80.9.0.20250809" version = "80.9.0.20250529"
description = "Typing stubs for setuptools" description = "Typing stubs for setuptools"
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
groups = ["dev"] groups = ["dev"]
files = [ files = [
{file = "types_setuptools-80.9.0.20250809-py3-none-any.whl", hash = "sha256:7c6539b4c7ac7b4ab4db2be66d8a58fb1e28affa3ee3834be48acafd94f5976a"}, {file = "types_setuptools-80.9.0.20250529-py3-none-any.whl", hash = "sha256:00dfcedd73e333a430e10db096e4d46af93faf9314f832f13b6bbe3d6757e95f"},
{file = "types_setuptools-80.9.0.20250809.tar.gz", hash = "sha256:e986ba37ffde364073d76189e1d79d9928fb6f5278c7d07589cde353d0218864"}, {file = "types_setuptools-80.9.0.20250529.tar.gz", hash = "sha256:79e088ba0cba2186c8d6499cbd3e143abb142d28a44b042c28d3148b1e353c91"},
] ]
[[package]] [[package]]
@@ -3104,6 +3117,91 @@ files = [
{file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"},
] ]
[[package]]
name = "wrapt"
version = "1.15.0"
description = "Module for decorators, wrappers and monkey patching."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
groups = ["dev"]
files = [
{file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"},
{file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"},
{file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"},
{file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"},
{file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"},
{file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"},
{file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"},
{file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"},
{file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"},
{file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"},
{file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"},
{file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"},
{file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"},
{file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"},
{file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"},
{file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"},
{file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"},
{file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"},
{file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"},
{file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"},
{file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"},
{file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"},
{file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"},
{file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"},
{file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"},
{file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"},
{file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"},
{file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"},
{file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"},
{file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"},
{file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"},
{file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"},
{file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"},
{file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"},
{file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"},
{file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"},
{file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"},
{file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"},
{file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"},
{file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"},
{file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"},
{file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"},
{file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"},
{file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"},
{file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"},
{file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"},
{file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"},
{file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"},
{file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"},
{file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"},
{file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"},
{file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"},
{file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"},
{file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"},
{file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"},
{file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"},
{file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"},
{file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"},
{file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"},
{file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"},
{file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"},
{file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"},
{file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"},
{file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"},
{file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"},
{file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"},
{file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"},
{file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"},
{file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"},
{file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"},
{file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"},
{file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"},
{file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"},
{file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"},
{file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"},
]
[[package]] [[package]]
name = "xmlschema" name = "xmlschema"
version = "2.4.0" version = "2.4.0"
@@ -3255,4 +3353,4 @@ url-preview = ["lxml"]
[metadata] [metadata]
lock-version = "2.1" lock-version = "2.1"
python-versions = "^3.9.0" python-versions = "^3.9.0"
content-hash = "600a349d08dde732df251583094a121b5385eb43ae0c6ceff10dcf9749359446" content-hash = "d2560fb09c99bf87690749ad902753cfa3f3063bd14cd9d0c0f37ca9e89a7757"

View File

@@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry] [tool.poetry]
name = "matrix-synapse" name = "matrix-synapse"
version = "1.137.0" version = "1.135.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol" description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later" license = "AGPL-3.0-or-later"
@@ -324,7 +324,7 @@ all = [
# failing on new releases. Keeping lower bounds loose here means that dependabot # failing on new releases. Keeping lower bounds loose here means that dependabot
# can bump versions without having to update the content-hash in the lockfile. # can bump versions without having to update the content-hash in the lockfile.
# This helps prevents merge conflicts when running a batch of dependabot updates. # This helps prevents merge conflicts when running a batch of dependabot updates.
ruff = "0.12.7" ruff = "0.12.4"
# Type checking only works with the pydantic.v1 compat module from pydantic v2 # Type checking only works with the pydantic.v1 compat module from pydantic v2
pydantic = "^2" pydantic = "^2"

View File

@@ -7,7 +7,7 @@ name = "synapse"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
rust-version = "1.82.0" rust-version = "1.81.0"
[lib] [lib]
name = "synapse" name = "synapse"

View File

@@ -61,7 +61,6 @@ fn bench_match_exact(b: &mut Bencher) {
vec![], vec![],
false, false,
false, false,
false,
) )
.unwrap(); .unwrap();
@@ -72,10 +71,10 @@ fn bench_match_exact(b: &mut Bencher) {
}, },
)); ));
let matched = eval.match_condition(&condition, None, None, None).unwrap(); let matched = eval.match_condition(&condition, None, None).unwrap();
assert!(matched, "Didn't match"); assert!(matched, "Didn't match");
b.iter(|| eval.match_condition(&condition, None, None, None).unwrap()); b.iter(|| eval.match_condition(&condition, None, None).unwrap());
} }
#[bench] #[bench]
@@ -108,7 +107,6 @@ fn bench_match_word(b: &mut Bencher) {
vec![], vec![],
false, false,
false, false,
false,
) )
.unwrap(); .unwrap();
@@ -119,10 +117,10 @@ fn bench_match_word(b: &mut Bencher) {
}, },
)); ));
let matched = eval.match_condition(&condition, None, None, None).unwrap(); let matched = eval.match_condition(&condition, None, None).unwrap();
assert!(matched, "Didn't match"); assert!(matched, "Didn't match");
b.iter(|| eval.match_condition(&condition, None, None, None).unwrap()); b.iter(|| eval.match_condition(&condition, None, None).unwrap());
} }
#[bench] #[bench]
@@ -155,7 +153,6 @@ fn bench_match_word_miss(b: &mut Bencher) {
vec![], vec![],
false, false,
false, false,
false,
) )
.unwrap(); .unwrap();
@@ -166,10 +163,10 @@ fn bench_match_word_miss(b: &mut Bencher) {
}, },
)); ));
let matched = eval.match_condition(&condition, None, None, None).unwrap(); let matched = eval.match_condition(&condition, None, None).unwrap();
assert!(!matched, "Didn't match"); assert!(!matched, "Didn't match");
b.iter(|| eval.match_condition(&condition, None, None, None).unwrap()); b.iter(|| eval.match_condition(&condition, None, None).unwrap());
} }
#[bench] #[bench]
@@ -202,7 +199,6 @@ fn bench_eval_message(b: &mut Bencher) {
vec![], vec![],
false, false,
false, false,
false,
) )
.unwrap(); .unwrap();
@@ -214,8 +210,7 @@ fn bench_eval_message(b: &mut Bencher) {
false, false,
false, false,
false, false,
false,
); );
b.iter(|| eval.run(&rules, Some("bob"), Some("person"), None)); b.iter(|| eval.run(&rules, Some("bob"), Some("person")));
} }

View File

@@ -54,7 +54,6 @@ enum EventInternalMetadataData {
RecheckRedaction(bool), RecheckRedaction(bool),
SoftFailed(bool), SoftFailed(bool),
ProactivelySend(bool), ProactivelySend(bool),
PolicyServerSpammy(bool),
Redacted(bool), Redacted(bool),
TxnId(Box<str>), TxnId(Box<str>),
TokenId(i64), TokenId(i64),
@@ -97,13 +96,6 @@ impl EventInternalMetadataData {
.to_owned() .to_owned()
.into_any(), .into_any(),
), ),
EventInternalMetadataData::PolicyServerSpammy(o) => (
pyo3::intern!(py, "policy_server_spammy"),
o.into_pyobject(py)
.unwrap_infallible()
.to_owned()
.into_any(),
),
EventInternalMetadataData::Redacted(o) => ( EventInternalMetadataData::Redacted(o) => (
pyo3::intern!(py, "redacted"), pyo3::intern!(py, "redacted"),
o.into_pyobject(py) o.into_pyobject(py)
@@ -163,11 +155,6 @@ impl EventInternalMetadataData {
.extract() .extract()
.with_context(|| format!("'{key_str}' has invalid type"))?, .with_context(|| format!("'{key_str}' has invalid type"))?,
), ),
"policy_server_spammy" => EventInternalMetadataData::PolicyServerSpammy(
value
.extract()
.with_context(|| format!("'{key_str}' has invalid type"))?,
),
"redacted" => EventInternalMetadataData::Redacted( "redacted" => EventInternalMetadataData::Redacted(
value value
.extract() .extract()
@@ -440,17 +427,6 @@ impl EventInternalMetadata {
set_property!(self, ProactivelySend, obj); set_property!(self, ProactivelySend, obj);
} }
#[getter]
fn get_policy_server_spammy(&self) -> PyResult<bool> {
Ok(get_property_opt!(self, PolicyServerSpammy)
.copied()
.unwrap_or(false))
}
#[setter]
fn set_policy_server_spammy(&mut self, obj: bool) {
set_property!(self, PolicyServerSpammy, obj);
}
#[getter] #[getter]
fn get_redacted(&self) -> PyResult<bool> { fn get_redacted(&self) -> PyResult<bool> {
let bool = get_property!(self, Redacted)?; let bool = get_property!(self, Redacted)?;

View File

@@ -290,26 +290,6 @@ pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule {
}]; }];
pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
PushRule {
rule_id: Cow::Borrowed("global/content/.io.element.msc4306.rule.unsubscribed_thread"),
priority_class: 1,
conditions: Cow::Borrowed(&[Condition::Known(
KnownCondition::Msc4306ThreadSubscription { subscribed: false },
)]),
actions: Cow::Borrowed(&[]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/content/.io.element.msc4306.rule.subscribed_thread"),
priority_class: 1,
conditions: Cow::Borrowed(&[Condition::Known(
KnownCondition::Msc4306ThreadSubscription { subscribed: true },
)]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]),
default: true,
default_enabled: true,
},
PushRule { PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.call"), rule_id: Cow::Borrowed("global/underride/.m.rule.call"),
priority_class: 1, priority_class: 1,

View File

@@ -106,11 +106,8 @@ pub struct PushRuleEvaluator {
/// flag as MSC1767 (extensible events core). /// flag as MSC1767 (extensible events core).
msc3931_enabled: bool, msc3931_enabled: bool,
/// If MSC4210 (remove legacy mentions) is enabled. // If MSC4210 (remove legacy mentions) is enabled.
msc4210_enabled: bool, msc4210_enabled: bool,
/// If MSC4306 (thread subscriptions) is enabled.
msc4306_enabled: bool,
} }
#[pymethods] #[pymethods]
@@ -129,7 +126,6 @@ impl PushRuleEvaluator {
room_version_feature_flags, room_version_feature_flags,
msc3931_enabled, msc3931_enabled,
msc4210_enabled, msc4210_enabled,
msc4306_enabled,
))] ))]
pub fn py_new( pub fn py_new(
flattened_keys: BTreeMap<String, JsonValue>, flattened_keys: BTreeMap<String, JsonValue>,
@@ -142,7 +138,6 @@ impl PushRuleEvaluator {
room_version_feature_flags: Vec<String>, room_version_feature_flags: Vec<String>,
msc3931_enabled: bool, msc3931_enabled: bool,
msc4210_enabled: bool, msc4210_enabled: bool,
msc4306_enabled: bool,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let body = match flattened_keys.get("content.body") { let body = match flattened_keys.get("content.body") {
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(), Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(),
@@ -161,7 +156,6 @@ impl PushRuleEvaluator {
room_version_feature_flags, room_version_feature_flags,
msc3931_enabled, msc3931_enabled,
msc4210_enabled, msc4210_enabled,
msc4306_enabled,
}) })
} }
@@ -173,19 +167,12 @@ impl PushRuleEvaluator {
/// ///
/// Returns the set of actions, if any, that match (filtering out any /// Returns the set of actions, if any, that match (filtering out any
/// `dont_notify` and `coalesce` actions). /// `dont_notify` and `coalesce` actions).
/// #[pyo3(signature = (push_rules, user_id=None, display_name=None))]
/// msc4306_thread_subscription_state: (Only populated if MSC4306 is enabled)
/// The thread subscription state corresponding to the thread containing this event.
/// - `None` if the event is not in a thread, or if MSC4306 is disabled.
/// - `Some(true)` if the event is in a thread and the user has a subscription for that thread
/// - `Some(false)` if the event is in a thread and the user does NOT have a subscription for that thread
#[pyo3(signature = (push_rules, user_id=None, display_name=None, msc4306_thread_subscription_state=None))]
pub fn run( pub fn run(
&self, &self,
push_rules: &FilteredPushRules, push_rules: &FilteredPushRules,
user_id: Option<&str>, user_id: Option<&str>,
display_name: Option<&str>, display_name: Option<&str>,
msc4306_thread_subscription_state: Option<bool>,
) -> Vec<Action> { ) -> Vec<Action> {
'outer: for (push_rule, enabled) in push_rules.iter() { 'outer: for (push_rule, enabled) in push_rules.iter() {
if !enabled { if !enabled {
@@ -217,12 +204,7 @@ impl PushRuleEvaluator {
Condition::Known(KnownCondition::RoomVersionSupports { feature: _ }), Condition::Known(KnownCondition::RoomVersionSupports { feature: _ }),
); );
match self.match_condition( match self.match_condition(condition, user_id, display_name) {
condition,
user_id,
display_name,
msc4306_thread_subscription_state,
) {
Ok(true) => {} Ok(true) => {}
Ok(false) => continue 'outer, Ok(false) => continue 'outer,
Err(err) => { Err(err) => {
@@ -255,20 +237,14 @@ impl PushRuleEvaluator {
} }
/// Check if the given condition matches. /// Check if the given condition matches.
#[pyo3(signature = (condition, user_id=None, display_name=None, msc4306_thread_subscription_state=None))] #[pyo3(signature = (condition, user_id=None, display_name=None))]
fn matches( fn matches(
&self, &self,
condition: Condition, condition: Condition,
user_id: Option<&str>, user_id: Option<&str>,
display_name: Option<&str>, display_name: Option<&str>,
msc4306_thread_subscription_state: Option<bool>,
) -> bool { ) -> bool {
match self.match_condition( match self.match_condition(&condition, user_id, display_name) {
&condition,
user_id,
display_name,
msc4306_thread_subscription_state,
) {
Ok(true) => true, Ok(true) => true,
Ok(false) => false, Ok(false) => false,
Err(err) => { Err(err) => {
@@ -286,7 +262,6 @@ impl PushRuleEvaluator {
condition: &Condition, condition: &Condition,
user_id: Option<&str>, user_id: Option<&str>,
display_name: Option<&str>, display_name: Option<&str>,
msc4306_thread_subscription_state: Option<bool>,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let known_condition = match condition { let known_condition = match condition {
Condition::Known(known) => known, Condition::Known(known) => known,
@@ -418,13 +393,6 @@ impl PushRuleEvaluator {
&& self.room_version_feature_flags.contains(&flag) && self.room_version_feature_flags.contains(&flag)
} }
} }
KnownCondition::Msc4306ThreadSubscription { subscribed } => {
if !self.msc4306_enabled {
false
} else {
msc4306_thread_subscription_state == Some(*subscribed)
}
}
}; };
Ok(result) Ok(result)
@@ -568,11 +536,10 @@ fn push_rule_evaluator() {
vec![], vec![],
true, true,
false, false,
false,
) )
.unwrap(); .unwrap();
let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"), None); let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
assert_eq!(result.len(), 3); assert_eq!(result.len(), 3);
} }
@@ -599,7 +566,6 @@ fn test_requires_room_version_supports_condition() {
flags, flags,
true, true,
false, false,
false,
) )
.unwrap(); .unwrap();
@@ -609,7 +575,6 @@ fn test_requires_room_version_supports_condition() {
&FilteredPushRules::default(), &FilteredPushRules::default(),
Some("@bob:example.org"), Some("@bob:example.org"),
None, None,
None,
); );
assert_eq!(result.len(), 3); assert_eq!(result.len(), 3);
@@ -628,17 +593,7 @@ fn test_requires_room_version_supports_condition() {
}; };
let rules = PushRules::new(vec![custom_rule]); let rules = PushRules::new(vec![custom_rule]);
result = evaluator.run( result = evaluator.run(
&FilteredPushRules::py_new( &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false),
rules,
BTreeMap::new(),
true,
false,
true,
false,
false,
false,
),
None,
None, None,
None, None,
); );

View File

@@ -369,10 +369,6 @@ pub enum KnownCondition {
RoomVersionSupports { RoomVersionSupports {
feature: Cow<'static, str>, feature: Cow<'static, str>,
}, },
#[serde(rename = "io.element.msc4306.thread_subscription")]
Msc4306ThreadSubscription {
subscribed: bool,
},
} }
impl<'source> IntoPyObject<'source> for Condition { impl<'source> IntoPyObject<'source> for Condition {
@@ -551,13 +547,11 @@ pub struct FilteredPushRules {
msc3664_enabled: bool, msc3664_enabled: bool,
msc4028_push_encrypted_events: bool, msc4028_push_encrypted_events: bool,
msc4210_enabled: bool, msc4210_enabled: bool,
msc4306_enabled: bool,
} }
#[pymethods] #[pymethods]
impl FilteredPushRules { impl FilteredPushRules {
#[new] #[new]
#[allow(clippy::too_many_arguments)]
pub fn py_new( pub fn py_new(
push_rules: PushRules, push_rules: PushRules,
enabled_map: BTreeMap<String, bool>, enabled_map: BTreeMap<String, bool>,
@@ -566,7 +560,6 @@ impl FilteredPushRules {
msc3664_enabled: bool, msc3664_enabled: bool,
msc4028_push_encrypted_events: bool, msc4028_push_encrypted_events: bool,
msc4210_enabled: bool, msc4210_enabled: bool,
msc4306_enabled: bool,
) -> Self { ) -> Self {
Self { Self {
push_rules, push_rules,
@@ -576,7 +569,6 @@ impl FilteredPushRules {
msc3664_enabled, msc3664_enabled,
msc4028_push_encrypted_events, msc4028_push_encrypted_events,
msc4210_enabled, msc4210_enabled,
msc4306_enabled,
} }
} }
@@ -627,10 +619,6 @@ impl FilteredPushRules {
return false; return false;
} }
if !self.msc4306_enabled && rule.rule_id.contains("/.io.element.msc4306.rule.") {
return false;
}
true true
}) })
.map(|r| { .map(|r| {

View File

@@ -1,5 +1,5 @@
$schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json $schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json
$id: https://element-hq.github.io/synapse/schema/synapse/v1.137/synapse-config.schema.json $id: https://element-hq.github.io/synapse/schema/synapse/v1.135/synapse-config.schema.json
type: object type: object
properties: properties:
modules: modules:
@@ -656,43 +656,6 @@ properties:
- - master.hostname.example.com - - master.hostname.example.com
- 10.1.0.0/16 - 10.1.0.0/16
- 172.30.0.0/16 - 172.30.0.0/16
matrix_authentication_service:
type: object
description: >-
The `matrix_authentication_service` setting configures integration with
[Matrix Authentication Service (MAS)](https://github.com/element-hq/matrix-authentication-service).
properties:
enabled:
type: boolean
description: >-
Whether or not to enable the MAS integration. If this is set to
`false`, Synapse will use its legacy internal authentication API.
default: false
endpoint:
type: string
format: uri
description: >-
The URL where Synapse can reach MAS. This *must* have the `discovery`
and `oauth` resources mounted.
default: http://localhost:8080
secret:
type: ["string", "null"]
description: >-
A shared secret that will be used to authenticate requests from and to MAS.
secret_path:
type: ["string", "null"]
description: >-
Alternative to `secret`, reading the shared secret from a file.
The file should be a plain text file, containing only the secret.
Synapse reads the secret from the given file once at startup.
examples:
- enabled: true
secret: someverysecuresecret
endpoint: http://localhost:8080
dummy_events_threshold: dummy_events_threshold:
type: integer type: integer
description: >- description: >-
@@ -5184,7 +5147,7 @@ properties:
"m.room.avatar": 50 "m.room.avatar": 50
"m.room.tombstone": 100 (150 if MSC4289 is used) "m.room.tombstone": 100
"m.room.server_acl": 100 "m.room.server_acl": 100

View File

@@ -23,21 +23,16 @@
can crop up, e.g the cache descriptors. can crop up, e.g the cache descriptors.
""" """
import enum from typing import Callable, Optional, Tuple, Type, Union
from typing import Callable, Mapping, Optional, Tuple, Type, Union
import attr
import mypy.types import mypy.types
from mypy.erasetype import remove_instance_last_known_values from mypy.erasetype import remove_instance_last_known_values
from mypy.errorcodes import ErrorCode from mypy.errorcodes import ErrorCode
from mypy.nodes import ARG_NAMED_OPT, ListExpr, NameExpr, TempNode, TupleExpr, Var from mypy.nodes import ARG_NAMED_OPT, ListExpr, NameExpr, TempNode, Var
from mypy.plugin import ( from mypy.plugin import (
ClassDefContext,
Context,
FunctionLike, FunctionLike,
FunctionSigContext, FunctionSigContext,
MethodSigContext, MethodSigContext,
MypyFile,
Plugin, Plugin,
) )
from mypy.typeops import bind_self from mypy.typeops import bind_self
@@ -46,15 +41,12 @@ from mypy.types import (
CallableType, CallableType,
Instance, Instance,
NoneType, NoneType,
Options,
TupleType, TupleType,
TypeAliasType, TypeAliasType,
TypeVarType, TypeVarType,
UninhabitedType, UninhabitedType,
UnionType, UnionType,
) )
from mypy_zope import plugin as mypy_zope_plugin
from pydantic.mypy import plugin as mypy_pydantic_plugin
PROMETHEUS_METRIC_MISSING_SERVER_NAME_LABEL = ErrorCode( PROMETHEUS_METRIC_MISSING_SERVER_NAME_LABEL = ErrorCode(
"missing-server-name-label", "missing-server-name-label",
@@ -62,153 +54,17 @@ PROMETHEUS_METRIC_MISSING_SERVER_NAME_LABEL = ErrorCode(
category="per-homeserver-tenant-metrics", category="per-homeserver-tenant-metrics",
) )
PROMETHEUS_METRIC_MISSING_FROM_LIST_TO_CHECK = ErrorCode(
"metric-type-missing-from-list",
"Every Prometheus metric type must be included in the `prometheus_metric_fullname_to_label_arg_map`.",
category="per-homeserver-tenant-metrics",
)
class Sentinel(enum.Enum):
# defining a sentinel in this way allows mypy to correctly handle the
# type of a dictionary lookup and subsequent type narrowing.
UNSET_SENTINEL = object()
@attr.s(auto_attribs=True)
class ArgLocation:
keyword_name: str
"""
The keyword argument name for this argument
"""
position: int
"""
The 0-based positional index of this argument
"""
prometheus_metric_fullname_to_label_arg_map: Mapping[str, Optional[ArgLocation]] = {
# `Collector` subclasses:
"prometheus_client.metrics.MetricWrapperBase": ArgLocation("labelnames", 2),
"prometheus_client.metrics.Counter": ArgLocation("labelnames", 2),
"prometheus_client.metrics.Histogram": ArgLocation("labelnames", 2),
"prometheus_client.metrics.Gauge": ArgLocation("labelnames", 2),
"prometheus_client.metrics.Summary": ArgLocation("labelnames", 2),
"prometheus_client.metrics.Info": ArgLocation("labelnames", 2),
"prometheus_client.metrics.Enum": ArgLocation("labelnames", 2),
"synapse.metrics.LaterGauge": ArgLocation("labelnames", 2),
"synapse.metrics.InFlightGauge": ArgLocation("labels", 2),
"synapse.metrics.GaugeBucketCollector": ArgLocation("labelnames", 2),
"prometheus_client.registry.Collector": None,
"prometheus_client.registry._EmptyCollector": None,
"prometheus_client.registry.CollectorRegistry": None,
"prometheus_client.process_collector.ProcessCollector": None,
"prometheus_client.platform_collector.PlatformCollector": None,
"prometheus_client.gc_collector.GCCollector": None,
"synapse.metrics._gc.GCCounts": None,
"synapse.metrics._gc.PyPyGCStats": None,
"synapse.metrics._reactor_metrics.ReactorLastSeenMetric": None,
"synapse.metrics.CPUMetrics": None,
"synapse.metrics.jemalloc.JemallocCollector": None,
"synapse.util.metrics.DynamicCollectorRegistry": None,
"synapse.metrics.background_process_metrics._Collector": None,
#
# `Metric` subclasses:
"prometheus_client.metrics_core.Metric": None,
"prometheus_client.metrics_core.UnknownMetricFamily": ArgLocation("labels", 3),
"prometheus_client.metrics_core.CounterMetricFamily": ArgLocation("labels", 3),
"prometheus_client.metrics_core.GaugeMetricFamily": ArgLocation("labels", 3),
"prometheus_client.metrics_core.SummaryMetricFamily": ArgLocation("labels", 3),
"prometheus_client.metrics_core.InfoMetricFamily": ArgLocation("labels", 3),
"prometheus_client.metrics_core.HistogramMetricFamily": ArgLocation("labels", 3),
"prometheus_client.metrics_core.GaugeHistogramMetricFamily": ArgLocation(
"labels", 4
),
"prometheus_client.metrics_core.StateSetMetricFamily": ArgLocation("labels", 3),
"synapse.metrics.GaugeHistogramMetricFamilyWithLabels": ArgLocation(
"labelnames", 4
),
}
"""
Map from the fullname of the Prometheus `Metric`/`Collector` classes to the keyword
argument name and positional index of the label names. This map is useful because
different metrics have different signatures for passing in label names and we just need
to know where to look.
This map should include any metrics that we collect with Prometheus. Which corresponds
to anything that inherits from `prometheus_client.registry.Collector`
(`synapse.metrics._types.Collector`) or `prometheus_client.metrics_core.Metric`. The
exhaustiveness of this list is enforced by `analyze_prometheus_metric_classes`.
The entries with `None` always fail the lint because they don't have a `labelnames`
argument (therefore, no `SERVER_NAME_LABEL`), but we include them here so that people
can notice and manually allow via a type ignore comment as the source of truth
should be in the source code.
"""
# Unbound at this point because we don't know the mypy version yet.
# This is set in the `plugin(...)` function below.
MypyPydanticPluginClass: Type[Plugin]
MypyZopePluginClass: Type[Plugin]
class SynapsePlugin(Plugin): class SynapsePlugin(Plugin):
def __init__(self, options: Options):
super().__init__(options)
self.mypy_pydantic_plugin = MypyPydanticPluginClass(options)
self.mypy_zope_plugin = MypyZopePluginClass(options)
def set_modules(self, modules: dict[str, MypyFile]) -> None:
"""
This is called by mypy internals. We have to override this to ensure it's also
called for any other plugins that we're manually handling.
Here is how mypy describes it:
> [`self._modules`] can't be set in `__init__` because it is executed too soon
> in `build.py`. Therefore, `build.py` *must* set it later before graph processing
> starts by calling `set_modules()`.
"""
super().set_modules(modules)
self.mypy_pydantic_plugin.set_modules(modules)
self.mypy_zope_plugin.set_modules(modules)
def get_base_class_hook(
self, fullname: str
) -> Optional[Callable[[ClassDefContext], None]]:
def _get_base_class_hook(ctx: ClassDefContext) -> None:
# Run any `get_base_class_hook` checks from other plugins first.
#
# Unfortunately, because mypy only chooses the first plugin that returns a
# non-None value (known-limitation, c.f.
# https://github.com/python/mypy/issues/19524), we workaround this by
# putting our custom plugin first in the plugin order and then calling the
# other plugin's hook manually followed by our own checks.
if callback := self.mypy_pydantic_plugin.get_base_class_hook(fullname):
callback(ctx)
if callback := self.mypy_zope_plugin.get_base_class_hook(fullname):
callback(ctx)
# Now run our own checks
analyze_prometheus_metric_classes(ctx)
return _get_base_class_hook
def get_function_signature_hook( def get_function_signature_hook(
self, fullname: str self, fullname: str
) -> Optional[Callable[[FunctionSigContext], FunctionLike]]: ) -> Optional[Callable[[FunctionSigContext], FunctionLike]]:
# Strip off the unique identifier for classes that are dynamically created inside if fullname in (
# functions. ex. `synapse.metrics.jemalloc.JemallocCollector@185` (this is the line "prometheus_client.metrics.Counter",
# number) # TODO: Add other prometheus_client metrics that need checking as we
if "@" in fullname: # refactor, see https://github.com/element-hq/synapse/issues/18592
fullname = fullname.split("@", 1)[0] ):
return check_prometheus_metric_instantiation
# Look for any Prometheus metrics to make sure they have the `SERVER_NAME_LABEL`
# label.
if fullname in prometheus_metric_fullname_to_label_arg_map.keys():
# Because it's difficult to determine the `fullname` of the function in the
# callback, let's just pass it in while we have it.
return lambda ctx: check_prometheus_metric_instantiation(ctx, fullname)
return None return None
@@ -232,44 +88,7 @@ class SynapsePlugin(Plugin):
return None return None
def analyze_prometheus_metric_classes(ctx: ClassDefContext) -> None: def check_prometheus_metric_instantiation(ctx: FunctionSigContext) -> CallableType:
"""
Cross-check the list of Prometheus metric classes against the
`prometheus_metric_fullname_to_label_arg_map` to ensure the list is exhaustive and
up-to-date.
"""
fullname = ctx.cls.fullname
# Strip off the unique identifier for classes that are dynamically created inside
# functions. ex. `synapse.metrics.jemalloc.JemallocCollector@185` (this is the line
# number)
if "@" in fullname:
fullname = fullname.split("@", 1)[0]
if any(
ancestor_type.fullname
in (
# All of the Prometheus metric classes inherit from the `Collector`.
"prometheus_client.registry.Collector",
"synapse.metrics._types.Collector",
# And custom metrics that inherit from `Metric`.
"prometheus_client.metrics_core.Metric",
)
for ancestor_type in ctx.cls.info.mro
):
if fullname not in prometheus_metric_fullname_to_label_arg_map:
ctx.api.fail(
f"Expected {fullname} to be in `prometheus_metric_fullname_to_label_arg_map`, "
f"but it was not found. This is a problem with our custom mypy plugin. "
f"Please add it to the map.",
Context(),
code=PROMETHEUS_METRIC_MISSING_FROM_LIST_TO_CHECK,
)
def check_prometheus_metric_instantiation(
ctx: FunctionSigContext, fullname: str
) -> CallableType:
""" """
Ensure that the `prometheus_client` metrics include the `SERVER_NAME_LABEL` label Ensure that the `prometheus_client` metrics include the `SERVER_NAME_LABEL` label
when instantiated. when instantiated.
@@ -279,52 +98,21 @@ def check_prometheus_metric_instantiation(
ensures metrics are correctly separated by homeserver. ensures metrics are correctly separated by homeserver.
There are also some metrics that apply at the process level, such as CPU usage, There are also some metrics that apply at the process level, such as CPU usage,
Python garbage collection, and Twisted reactor tick time, which shouldn't have the Python garbage collection, Twisted reactor tick time which shouldn't have the
`SERVER_NAME_LABEL`. In those cases, use a type ignore comment to disable the `SERVER_NAME_LABEL`. In those cases, use use a type ignore comment to disable the
check, e.g. `# type: ignore[missing-server-name-label]`. check, e.g. `# type: ignore[missing-server-name-label]`.
Args:
ctx: The `FunctionSigContext` from mypy.
fullname: The fully qualified name of the function being called,
e.g. `"prometheus_client.metrics.Counter"`
""" """
# The true signature, this isn't being modified so this is what will be returned. # The true signature, this isn't being modified so this is what will be returned.
signature = ctx.default_signature signature: CallableType = ctx.default_signature
# Find where the label names argument is in the function signature.
arg_location = prometheus_metric_fullname_to_label_arg_map.get(
fullname, Sentinel.UNSET_SENTINEL
)
assert arg_location is not Sentinel.UNSET_SENTINEL, (
f"Expected to find {fullname} in `prometheus_metric_fullname_to_label_arg_map`, "
f"but it was not found. This is a problem with our custom mypy plugin. "
f"Please add it to the map. Context: {ctx.context}"
)
# People should be using `# type: ignore[missing-server-name-label]` for
# process-level metrics that should not have the `SERVER_NAME_LABEL`.
if arg_location is None:
ctx.api.fail(
f"{signature.name} does not have a `labelnames`/`labels` argument "
"(if this is untrue, update `prometheus_metric_fullname_to_label_arg_map` "
"in our custom mypy plugin) and should probably have a type ignore comment, "
"e.g. `# type: ignore[missing-server-name-label]`. The reason we don't "
"automatically ignore this is the source of truth should be in the source code.",
ctx.context,
code=PROMETHEUS_METRIC_MISSING_SERVER_NAME_LABEL,
)
return signature
# Sanity check the arguments are still as expected in this version of # Sanity check the arguments are still as expected in this version of
# `prometheus_client`. ex. `Counter(name, documentation, labelnames, ...)` # `prometheus_client`. ex. `Counter(name, documentation, labelnames, ...)`
# #
# `signature.arg_names` should be: ["name", "documentation", "labelnames", ...] # `signature.arg_names` should be: ["name", "documentation", "labelnames", ...]
if ( if len(signature.arg_names) < 3 or signature.arg_names[2] != "labelnames":
len(signature.arg_names) < (arg_location.position + 1)
or signature.arg_names[arg_location.position] != arg_location.keyword_name
):
ctx.api.fail( ctx.api.fail(
f"Expected argument number {arg_location.position + 1} of {signature.name} to be `labelnames`/`labels`, " f"Expected the 3rd argument of {signature.name} to be 'labelnames', but got "
f"but got {signature.arg_names[arg_location.position]}", f"{signature.arg_names[2]}",
ctx.context, ctx.context,
) )
return signature return signature
@@ -347,12 +135,8 @@ def check_prometheus_metric_instantiation(
# ... # ...
# ] # ]
# ``` # ```
labelnames_arg_expression = ( labelnames_arg_expression = ctx.args[2][0] if len(ctx.args[2]) > 0 else None
ctx.args[arg_location.position][0] if isinstance(labelnames_arg_expression, ListExpr):
if len(ctx.args[arg_location.position]) > 0
else None
)
if isinstance(labelnames_arg_expression, (ListExpr, TupleExpr)):
# Check if the `labelnames` argument includes the `server_name` label (`SERVER_NAME_LABEL`). # Check if the `labelnames` argument includes the `server_name` label (`SERVER_NAME_LABEL`).
for labelname_expression in labelnames_arg_expression.items: for labelname_expression in labelnames_arg_expression.items:
if ( if (
@@ -690,13 +474,10 @@ def is_cacheable(
def plugin(version: str) -> Type[SynapsePlugin]: def plugin(version: str) -> Type[SynapsePlugin]:
global MypyPydanticPluginClass, MypyZopePluginClass
# This is the entry point of the plugin, and lets us deal with the fact # This is the entry point of the plugin, and lets us deal with the fact
# that the mypy plugin interface is *not* stable by looking at the version # that the mypy plugin interface is *not* stable by looking at the version
# string. # string.
# #
# However, since we pin the version of mypy Synapse uses in CI, we don't # However, since we pin the version of mypy Synapse uses in CI, we don't
# really care. # really care.
MypyPydanticPluginClass = mypy_pydantic_plugin(version)
MypyZopePluginClass = mypy_zope_plugin(version)
return SynapsePlugin return SynapsePlugin

View File

@@ -45,6 +45,16 @@ if py_version < (3, 9):
# Allow using the asyncio reactor via env var. # Allow using the asyncio reactor via env var.
if strtobool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", "0")): if strtobool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", "0")):
from incremental import Version
import twisted
# We need a bugfix that is included in Twisted 21.2.0:
# https://twistedmatrix.com/trac/ticket/9787
if twisted.version < Version("Twisted", 21, 2, 0):
print("Using asyncio reactor requires Twisted>=21.2.0")
sys.exit(1)
import asyncio import asyncio
from twisted.internet import asyncioreactor from twisted.internet import asyncioreactor

View File

@@ -34,11 +34,9 @@ HAS_PYDANTIC_V2: bool = Version(pydantic_version).major == 2
if TYPE_CHECKING or HAS_PYDANTIC_V2: if TYPE_CHECKING or HAS_PYDANTIC_V2:
from pydantic.v1 import ( from pydantic.v1 import (
AnyHttpUrl,
BaseModel, BaseModel,
Extra, Extra,
Field, Field,
FilePath,
MissingError, MissingError,
PydanticValueError, PydanticValueError,
StrictBool, StrictBool,
@@ -57,11 +55,9 @@ if TYPE_CHECKING or HAS_PYDANTIC_V2:
from pydantic.v1.typing import get_args from pydantic.v1.typing import get_args
else: else:
from pydantic import ( from pydantic import (
AnyHttpUrl,
BaseModel, BaseModel,
Extra, Extra,
Field, Field,
FilePath,
MissingError, MissingError,
PydanticValueError, PydanticValueError,
StrictBool, StrictBool,
@@ -81,7 +77,6 @@ else:
__all__ = ( __all__ = (
"HAS_PYDANTIC_V2", "HAS_PYDANTIC_V2",
"AnyHttpUrl",
"BaseModel", "BaseModel",
"constr", "constr",
"conbytes", "conbytes",
@@ -90,7 +85,6 @@ __all__ = (
"ErrorWrapper", "ErrorWrapper",
"Extra", "Extra",
"Field", "Field",
"FilePath",
"get_args", "get_args",
"MissingError", "MissingError",
"parse_obj_as", "parse_obj_as",

View File

@@ -30,7 +30,6 @@ from typing import Any, Callable, Dict, Optional
import requests import requests
import yaml import yaml
from typing_extensions import Never
_CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\ _CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\
Conflicting options 'registration_shared_secret' and 'registration_shared_secret_path' Conflicting options 'registration_shared_secret' and 'registration_shared_secret_path'
@@ -41,10 +40,6 @@ _NO_SHARED_SECRET_OPTS_ERROR = """\
No 'registration_shared_secret' or 'registration_shared_secret_path' defined in config. No 'registration_shared_secret' or 'registration_shared_secret_path' defined in config.
""" """
_EMPTY_SHARED_SECRET_PATH_OPTS_ERROR = """\
The secret given via `registration_shared_secret_path` must not be empty.
"""
_DEFAULT_SERVER_URL = "http://localhost:8008" _DEFAULT_SERVER_URL = "http://localhost:8008"
@@ -175,12 +170,6 @@ def register_new_user(
) )
def bail(err_msg: str) -> Never:
"""Prints the given message to stderr and exits."""
print(err_msg, file=sys.stderr)
sys.exit(1)
def main() -> None: def main() -> None:
logging.captureWarnings(True) logging.captureWarnings(True)
@@ -273,20 +262,15 @@ def main() -> None:
assert config is not None assert config is not None
secret = config.get("registration_shared_secret") secret = config.get("registration_shared_secret")
if not isinstance(secret, (str, type(None))):
bail("registration_shared_secret is not a string.")
secret_file = config.get("registration_shared_secret_path") secret_file = config.get("registration_shared_secret_path")
if not isinstance(secret_file, (str, type(None))): if secret_file:
bail("registration_shared_secret_path is not a string.") if secret:
print(_CONFLICTING_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
if not secret and not secret_file: sys.exit(1)
bail(_NO_SHARED_SECRET_OPTS_ERROR)
elif secret and secret_file:
bail(_CONFLICTING_SHARED_SECRET_OPTS_ERROR)
elif not secret and secret_file:
secret = _read_file(secret_file, "registration_shared_secret_path").strip() secret = _read_file(secret_file, "registration_shared_secret_path").strip()
if not secret: if not secret:
bail(_EMPTY_SHARED_SECRET_PATH_OPTS_ERROR) print(_NO_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
sys.exit(1)
if args.password_file: if args.password_file:
password = _read_file(args.password_file, "password-file").strip() password = _read_file(args.password_file, "password-file").strip()

View File

@@ -29,21 +29,19 @@ import attr
from synapse.config._base import ( from synapse.config._base import (
Config, Config,
ConfigError,
RootConfig, RootConfig,
find_config_files, find_config_files,
read_config_files, read_config_files,
) )
from synapse.config.database import DatabaseConfig from synapse.config.database import DatabaseConfig
from synapse.config.server import ServerConfig
from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
from synapse.storage.engines import create_engine from synapse.storage.engines import create_engine
class ReviewConfig(RootConfig): class ReviewConfig(RootConfig):
"A config class that just pulls out the server and database config" "A config class that just pulls out the database config"
config_classes = [ServerConfig, DatabaseConfig] config_classes = [DatabaseConfig]
@attr.s(auto_attribs=True) @attr.s(auto_attribs=True)
@@ -150,10 +148,6 @@ def main() -> None:
config_dict = read_config_files(config_files) config_dict = read_config_files(config_files)
config.parse_config_dict(config_dict, "", "") config.parse_config_dict(config_dict, "", "")
server_name = config.server.server_name
if not isinstance(server_name, str):
raise ConfigError("Must be a string", ("server_name",))
since_ms = time.time() * 1000 - Config.parse_duration(config_args.since) since_ms = time.time() * 1000 - Config.parse_duration(config_args.since)
exclude_users_with_email = config_args.exclude_emails exclude_users_with_email = config_args.exclude_emails
exclude_users_with_appservice = config_args.exclude_app_service exclude_users_with_appservice = config_args.exclude_app_service
@@ -165,12 +159,7 @@ def main() -> None:
engine = create_engine(database_config.config) engine = create_engine(database_config.config)
with make_conn( with make_conn(database_config, engine, "review_recent_signups") as db_conn:
db_config=database_config,
engine=engine,
default_txn_name="review_recent_signups",
server_name=server_name,
) as db_conn:
# This generates a type of Cursor, not LoggingTransaction. # This generates a type of Cursor, not LoggingTransaction.
user_infos = get_recent_users( user_infos = get_recent_users(
db_conn.cursor(), db_conn.cursor(),

View File

@@ -672,14 +672,8 @@ class Porter:
engine = create_engine(db_config.config) engine = create_engine(db_config.config)
hs = MockHomeserver(self.hs_config) hs = MockHomeserver(self.hs_config)
server_name = hs.hostname
with make_conn( with make_conn(db_config, engine, "portdb") as db_conn:
db_config=db_config,
engine=engine,
default_txn_name="portdb",
server_name=server_name,
) as db_conn:
engine.check_database( engine.check_database(
db_conn, allow_outdated_version=allow_outdated_version db_conn, allow_outdated_version=allow_outdated_version
) )

View File

@@ -20,13 +20,10 @@
# #
from typing import TYPE_CHECKING, Optional, Protocol, Tuple from typing import TYPE_CHECKING, Optional, Protocol, Tuple
from prometheus_client import Histogram
from twisted.web.server import Request from twisted.web.server import Request
from synapse.appservice import ApplicationService from synapse.appservice import ApplicationService
from synapse.http.site import SynapseRequest from synapse.http.site import SynapseRequest
from synapse.metrics import SERVER_NAME_LABEL
from synapse.types import Requester from synapse.types import Requester
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -36,13 +33,6 @@ if TYPE_CHECKING:
GUEST_DEVICE_ID = "guest_device" GUEST_DEVICE_ID = "guest_device"
introspection_response_timer = Histogram(
"synapse_api_auth_delegated_introspection_response",
"Time taken to get a response for an introspection request",
labelnames=["code", SERVER_NAME_LABEL],
)
class Auth(Protocol): class Auth(Protocol):
"""The interface that an auth provider must implement.""" """The interface that an auth provider must implement."""

View File

@@ -296,4 +296,4 @@ class InternalAuth(BaseAuth):
Returns: Returns:
True if the user is an admin True if the user is an admin
""" """
return await self.store.is_server_admin(requester.user.to_string()) return await self.store.is_server_admin(requester.user)

View File

@@ -1,432 +0,0 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2025 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
#
import logging
from typing import TYPE_CHECKING, Optional
from urllib.parse import urlencode
from synapse._pydantic_compat import (
BaseModel,
Extra,
StrictBool,
StrictInt,
StrictStr,
ValidationError,
)
from synapse.api.auth.base import BaseAuth
from synapse.api.errors import (
AuthError,
HttpResponseException,
InvalidClientTokenError,
SynapseError,
UnrecognizedRequestError,
)
from synapse.http.site import SynapseRequest
from synapse.logging.context import PreserveLoggingContext
from synapse.logging.opentracing import (
active_span,
force_tracing,
inject_request_headers,
start_active_span,
)
from synapse.metrics import SERVER_NAME_LABEL
from synapse.synapse_rust.http_client import HttpClient
from synapse.types import JsonDict, Requester, UserID, create_requester
from synapse.util import json_decoder
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext
from . import introspection_response_timer
if TYPE_CHECKING:
from synapse.rest.admin.experimental_features import ExperimentalFeature
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
# Scope as defined by MSC2967
# https://github.com/matrix-org/matrix-spec-proposals/pull/2967
SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
class ServerMetadata(BaseModel):
class Config:
extra = Extra.allow
issuer: StrictStr
account_management_uri: StrictStr
class IntrospectionResponse(BaseModel):
retrieved_at_ms: StrictInt
active: StrictBool
scope: Optional[StrictStr]
username: Optional[StrictStr]
sub: Optional[StrictStr]
device_id: Optional[StrictStr]
expires_in: Optional[StrictInt]
class Config:
extra = Extra.allow
def get_scope_set(self) -> set[str]:
if not self.scope:
return set()
return {token for token in self.scope.split(" ") if token}
def is_active(self, now_ms: int) -> bool:
if not self.active:
return False
# Compatibility tokens don't expire and don't have an 'expires_in' field
if self.expires_in is None:
return True
absolute_expiry_ms = self.expires_in * 1000 + self.retrieved_at_ms
return now_ms < absolute_expiry_ms
class MasDelegatedAuth(BaseAuth):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.server_name = hs.hostname
self._clock = hs.get_clock()
self._config = hs.config.mas
self._http_client = hs.get_proxied_http_client()
self._rust_http_client = HttpClient(
reactor=hs.get_reactor(),
user_agent=self._http_client.user_agent.decode("utf8"),
)
self._server_metadata = RetryOnExceptionCachedCall[ServerMetadata](
self._load_metadata
)
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
# # Token Introspection Cache
# This remembers what users/devices are represented by which access tokens,
# in order to reduce overall system load:
# - on Synapse (as requests are relatively expensive)
# - on the network
# - on MAS
#
# Since there is no invalidation mechanism currently,
# the entries expire after 2 minutes.
# This does mean tokens can be treated as valid by Synapse
# for longer than reality.
#
# Ideally, tokens should logically be invalidated in the following circumstances:
# - If a session logout happens.
# In this case, MAS will delete the device within Synapse
# anyway and this is good enough as an invalidation.
# - If the client refreshes their token in MAS.
# In this case, the device still exists and it's not the end of the world for
# the old access token to continue working for a short time.
self._introspection_cache: ResponseCache[str] = ResponseCache(
clock=self._clock,
name="mas_token_introspection",
server_name=self.server_name,
timeout_ms=120_000,
# don't log because the keys are access tokens
enable_logging=False,
)
@property
def _metadata_url(self) -> str:
return f"{self._config.endpoint.rstrip('/')}/.well-known/openid-configuration"
@property
def _introspection_endpoint(self) -> str:
return f"{self._config.endpoint.rstrip('/')}/oauth2/introspect"
async def _load_metadata(self) -> ServerMetadata:
response = await self._http_client.get_json(self._metadata_url)
metadata = ServerMetadata(**response)
return metadata
async def issuer(self) -> str:
metadata = await self._server_metadata.get()
return metadata.issuer
async def account_management_url(self) -> str:
metadata = await self._server_metadata.get()
return metadata.account_management_uri
async def auth_metadata(self) -> JsonDict:
metadata = await self._server_metadata.get()
return metadata.dict()
def is_request_using_the_shared_secret(self, request: SynapseRequest) -> bool:
"""
Check if the request is using the shared secret.
Args:
request: The request to check.
Returns:
True if the request is using the shared secret, False otherwise.
"""
access_token = self.get_access_token_from_request(request)
shared_secret = self._config.secret()
if not shared_secret:
return False
return access_token == shared_secret
async def _introspect_token(
self, token: str, cache_context: ResponseCacheContext[str]
) -> IntrospectionResponse:
"""
Send a token to the introspection endpoint and returns the introspection response
Parameters:
token: The token to introspect
Raises:
HttpResponseException: If the introspection endpoint returns a non-2xx response
ValueError: If the introspection endpoint returns an invalid JSON response
JSONDecodeError: If the introspection endpoint returns a non-JSON response
Exception: If the HTTP request fails
Returns:
The introspection response
"""
# By default, we shouldn't cache the result unless we know it's valid
cache_context.should_cache = False
raw_headers: dict[str, str] = {
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
"Authorization": f"Bearer {self._config.secret()}",
# Tell MAS that we support reading the device ID as an explicit
# value, not encoded in the scope. This is supported by MAS 0.15+
"X-MAS-Supports-Device-Id": "1",
}
args = {"token": token, "token_type_hint": "access_token"}
body = urlencode(args, True)
# Do the actual request
logger.debug("Fetching token from MAS")
start_time = self._clock.time()
try:
with start_active_span("mas-introspect-token"):
inject_request_headers(raw_headers)
with PreserveLoggingContext():
resp_body = await self._rust_http_client.post(
url=self._introspection_endpoint,
response_limit=1 * 1024 * 1024,
headers=raw_headers,
request_body=body,
)
except HttpResponseException as e:
end_time = self._clock.time()
introspection_response_timer.labels(
code=e.code, **{SERVER_NAME_LABEL: self.server_name}
).observe(end_time - start_time)
raise
except Exception:
end_time = self._clock.time()
introspection_response_timer.labels(
code="ERR", **{SERVER_NAME_LABEL: self.server_name}
).observe(end_time - start_time)
raise
logger.debug("Fetched token from MAS")
end_time = self._clock.time()
introspection_response_timer.labels(
code=200, **{SERVER_NAME_LABEL: self.server_name}
).observe(end_time - start_time)
raw_response = json_decoder.decode(resp_body.decode("utf-8"))
try:
response = IntrospectionResponse(
retrieved_at_ms=self._clock.time_msec(),
**raw_response,
)
except ValidationError as e:
raise ValueError(
"The introspection endpoint returned an invalid JSON response"
) from e
# We had a valid response, so we can cache it
cache_context.should_cache = True
return response
async def is_server_admin(self, requester: Requester) -> bool:
return "urn:synapse:admin:*" in requester.scope
async def get_user_by_req(
self,
request: SynapseRequest,
allow_guest: bool = False,
allow_expired: bool = False,
allow_locked: bool = False,
) -> Requester:
parent_span = active_span()
with start_active_span("get_user_by_req"):
access_token = self.get_access_token_from_request(request)
requester = await self.get_appservice_user(request, access_token)
if not requester:
requester = await self.get_user_by_access_token(
token=access_token,
allow_expired=allow_expired,
)
await self._record_request(request, requester)
request.requester = requester
if parent_span:
if requester.authenticated_entity in self._force_tracing_for_users:
# request tracing is enabled for this user, so we need to force it
# tracing on for the parent span (which will be the servlet span).
#
# It's too late for the get_user_by_req span to inherit the setting,
# so we also force it on for that.
force_tracing()
force_tracing(parent_span)
parent_span.set_tag(
"authenticated_entity", requester.authenticated_entity
)
parent_span.set_tag("user_id", requester.user.to_string())
if requester.device_id is not None:
parent_span.set_tag("device_id", requester.device_id)
if requester.app_service is not None:
parent_span.set_tag("appservice_id", requester.app_service.id)
return requester
async def get_user_by_access_token(
self,
token: str,
allow_expired: bool = False,
) -> Requester:
try:
introspection_result = await self._introspection_cache.wrap(
token, self._introspect_token, token, cache_context=True
)
except Exception:
logger.exception("Failed to introspect token")
raise SynapseError(503, "Unable to introspect the access token")
logger.debug("Introspection result: %r", introspection_result)
if not introspection_result.is_active(self._clock.time_msec()):
raise InvalidClientTokenError("Token is not active")
# Let's look at the scope
scope = introspection_result.get_scope_set()
# Determine type of user based on presence of particular scopes
if SCOPE_MATRIX_API not in scope:
raise InvalidClientTokenError(
"Token doesn't grant access to the Matrix C-S API"
)
if introspection_result.username is None:
raise AuthError(
500,
"Invalid username claim in the introspection result",
)
user_id = UserID(
localpart=introspection_result.username,
domain=self.server_name,
)
# Try to find a user from the username claim
user_info = await self.store.get_user_by_id(user_id=user_id.to_string())
if user_info is None:
raise AuthError(
500,
"User not found",
)
# MAS will give us the device ID as an explicit value for *compatibility* sessions
# If present, we get it from here, if not we get it in the scope for next-gen sessions
device_id = introspection_result.device_id
if device_id is None:
# Find device_ids in scope
# We only allow a single device_id in the scope, so we find them all in the
# scope list, and raise if there are more than one. The OIDC server should be
# the one enforcing valid scopes, so we raise a 500 if we find an invalid scope.
device_ids = [
tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :]
for tok in scope
if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX)
]
if len(device_ids) > 1:
raise AuthError(
500,
"Multiple device IDs in scope",
)
device_id = device_ids[0] if device_ids else None
if device_id is not None:
# Sanity check the device_id
if len(device_id) > 255 or len(device_id) < 1:
raise AuthError(
500,
"Invalid device ID in introspection result",
)
# Make sure the device exists. This helps with introspection cache
# invalidation: if we log out, the device gets deleted by MAS
device = await self.store.get_device(
user_id=user_id.to_string(),
device_id=device_id,
)
if device is None:
# Invalidate the introspection cache, the device was deleted
self._introspection_cache.unset(token)
raise InvalidClientTokenError("Token is not active")
return create_requester(
user_id=user_id,
device_id=device_id,
scope=scope,
)
async def get_user_by_req_experimental_feature(
self,
request: SynapseRequest,
feature: "ExperimentalFeature",
allow_guest: bool = False,
allow_expired: bool = False,
allow_locked: bool = False,
) -> Requester:
try:
requester = await self.get_user_by_req(
request,
allow_guest=allow_guest,
allow_expired=allow_expired,
allow_locked=allow_locked,
)
if await self.store.is_feature_enabled(requester.user.to_string(), feature):
return requester
raise UnrecognizedRequestError(code=404)
except (AuthError, InvalidClientTokenError):
if feature.is_globally_enabled(self.hs.config):
# If its globally enabled then return the auth error
raise
raise UnrecognizedRequestError(code=404)

View File

@@ -28,6 +28,7 @@ from authlib.oauth2.auth import encode_client_secret_basic, encode_client_secret
from authlib.oauth2.rfc7523 import ClientSecretJWT, PrivateKeyJWT, private_key_jwt_sign from authlib.oauth2.rfc7523 import ClientSecretJWT, PrivateKeyJWT, private_key_jwt_sign
from authlib.oauth2.rfc7662 import IntrospectionToken from authlib.oauth2.rfc7662 import IntrospectionToken
from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url
from prometheus_client import Histogram
from synapse.api.auth.base import BaseAuth from synapse.api.auth.base import BaseAuth
from synapse.api.errors import ( from synapse.api.errors import (
@@ -46,21 +47,25 @@ from synapse.logging.opentracing import (
inject_request_headers, inject_request_headers,
start_active_span, start_active_span,
) )
from synapse.metrics import SERVER_NAME_LABEL
from synapse.synapse_rust.http_client import HttpClient from synapse.synapse_rust.http_client import HttpClient
from synapse.types import Requester, UserID, create_requester from synapse.types import Requester, UserID, create_requester
from synapse.util import json_decoder from synapse.util import json_decoder
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext
from . import introspection_response_timer
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.rest.admin.experimental_features import ExperimentalFeature from synapse.rest.admin.experimental_features import ExperimentalFeature
from synapse.server import HomeServer from synapse.server import HomeServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
introspection_response_timer = Histogram(
"synapse_api_auth_delegated_introspection_response",
"Time taken to get a response for an introspection request",
["code"],
)
# Scope as defined by MSC2967 # Scope as defined by MSC2967
# https://github.com/matrix-org/matrix-spec-proposals/pull/2967 # https://github.com/matrix-org/matrix-spec-proposals/pull/2967
SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*" SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
@@ -336,23 +341,17 @@ class MSC3861DelegatedAuth(BaseAuth):
) )
except HttpResponseException as e: except HttpResponseException as e:
end_time = self._clock.time() end_time = self._clock.time()
introspection_response_timer.labels( introspection_response_timer.labels(e.code).observe(end_time - start_time)
code=e.code, **{SERVER_NAME_LABEL: self.server_name}
).observe(end_time - start_time)
raise raise
except Exception: except Exception:
end_time = self._clock.time() end_time = self._clock.time()
introspection_response_timer.labels( introspection_response_timer.labels("ERR").observe(end_time - start_time)
code="ERR", **{SERVER_NAME_LABEL: self.server_name}
).observe(end_time - start_time)
raise raise
logger.debug("Fetched token from MAS") logger.debug("Fetched token from MAS")
end_time = self._clock.time() end_time = self._clock.time()
introspection_response_timer.labels( introspection_response_timer.labels(200).observe(end_time - start_time)
code=200, **{SERVER_NAME_LABEL: self.server_name}
).observe(end_time - start_time)
resp = json_decoder.decode(resp_body.decode("utf-8")) resp = json_decoder.decode(resp_body.decode("utf-8"))

View File

@@ -46,9 +46,6 @@ MAX_USERID_LENGTH = 255
# Constant value used for the pseudo-thread which is the main timeline. # Constant value used for the pseudo-thread which is the main timeline.
MAIN_TIMELINE: Final = "main" MAIN_TIMELINE: Final = "main"
# MAX_INT + 1, so it always trumps any PL in canonical JSON.
CREATOR_POWER_LEVEL = 2**53
class Membership: class Membership:
"""Represents the membership states of a user in a room.""" """Represents the membership states of a user in a room."""
@@ -238,8 +235,6 @@ class EventContentFields:
# #
# This is deprecated in MSC2175. # This is deprecated in MSC2175.
ROOM_CREATOR: Final = "creator" ROOM_CREATOR: Final = "creator"
# MSC4289
ADDITIONAL_CREATORS: Final = "additional_creators"
# The version of the room for `m.room.create` events. # The version of the room for `m.room.create` events.
ROOM_VERSION: Final = "room_version" ROOM_VERSION: Final = "room_version"

View File

@@ -140,12 +140,6 @@ class Codes(str, Enum):
# Part of MSC4155 # Part of MSC4155
INVITE_BLOCKED = "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED" INVITE_BLOCKED = "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED"
# Part of MSC4306: Thread Subscriptions
MSC4306_CONFLICTING_UNSUBSCRIPTION = (
"IO.ELEMENT.MSC4306.M_CONFLICTING_UNSUBSCRIPTION"
)
MSC4306_NOT_IN_THREAD = "IO.ELEMENT.MSC4306.M_NOT_IN_THREAD"
class CodeMessageException(RuntimeError): class CodeMessageException(RuntimeError):
"""An exception with integer code, a message string attributes and optional headers. """An exception with integer code, a message string attributes and optional headers.

View File

@@ -36,14 +36,12 @@ class EventFormatVersions:
ROOM_V1_V2 = 1 # $id:server event id format: used for room v1 and v2 ROOM_V1_V2 = 1 # $id:server event id format: used for room v1 and v2
ROOM_V3 = 2 # MSC1659-style $hash event id format: used for room v3 ROOM_V3 = 2 # MSC1659-style $hash event id format: used for room v3
ROOM_V4_PLUS = 3 # MSC1884-style $hash format: introduced for room v4 ROOM_V4_PLUS = 3 # MSC1884-style $hash format: introduced for room v4
ROOM_V11_HYDRA_PLUS = 4 # MSC4291 room IDs as hashes: introduced for room HydraV11
KNOWN_EVENT_FORMAT_VERSIONS = { KNOWN_EVENT_FORMAT_VERSIONS = {
EventFormatVersions.ROOM_V1_V2, EventFormatVersions.ROOM_V1_V2,
EventFormatVersions.ROOM_V3, EventFormatVersions.ROOM_V3,
EventFormatVersions.ROOM_V4_PLUS, EventFormatVersions.ROOM_V4_PLUS,
EventFormatVersions.ROOM_V11_HYDRA_PLUS,
} }
@@ -52,7 +50,6 @@ class StateResolutionVersions:
V1 = 1 # room v1 state res V1 = 1 # room v1 state res
V2 = 2 # MSC1442 state res: room v2 and later V2 = 2 # MSC1442 state res: room v2 and later
V2_1 = 3 # MSC4297 state res
class RoomDisposition: class RoomDisposition:
@@ -112,10 +109,6 @@ class RoomVersion:
msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
# MSC3757: Restricting who can overwrite a state event # MSC3757: Restricting who can overwrite a state event
msc3757_enabled: bool msc3757_enabled: bool
# MSC4289: Creator power enabled
msc4289_creator_power_enabled: bool
# MSC4291: Room IDs as hashes of the create event
msc4291_room_ids_as_hashes: bool
class RoomVersions: class RoomVersions:
@@ -138,8 +131,6 @@ class RoomVersions:
enforce_int_power_levels=False, enforce_int_power_levels=False,
msc3931_push_features=(), msc3931_push_features=(),
msc3757_enabled=False, msc3757_enabled=False,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
) )
V2 = RoomVersion( V2 = RoomVersion(
"2", "2",
@@ -160,8 +151,6 @@ class RoomVersions:
enforce_int_power_levels=False, enforce_int_power_levels=False,
msc3931_push_features=(), msc3931_push_features=(),
msc3757_enabled=False, msc3757_enabled=False,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
) )
V3 = RoomVersion( V3 = RoomVersion(
"3", "3",
@@ -182,8 +171,6 @@ class RoomVersions:
enforce_int_power_levels=False, enforce_int_power_levels=False,
msc3931_push_features=(), msc3931_push_features=(),
msc3757_enabled=False, msc3757_enabled=False,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
) )
V4 = RoomVersion( V4 = RoomVersion(
"4", "4",
@@ -204,8 +191,6 @@ class RoomVersions:
enforce_int_power_levels=False, enforce_int_power_levels=False,
msc3931_push_features=(), msc3931_push_features=(),
msc3757_enabled=False, msc3757_enabled=False,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
) )
V5 = RoomVersion( V5 = RoomVersion(
"5", "5",
@@ -226,8 +211,6 @@ class RoomVersions:
enforce_int_power_levels=False, enforce_int_power_levels=False,
msc3931_push_features=(), msc3931_push_features=(),
msc3757_enabled=False, msc3757_enabled=False,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
) )
V6 = RoomVersion( V6 = RoomVersion(
"6", "6",
@@ -248,8 +231,6 @@ class RoomVersions:
enforce_int_power_levels=False, enforce_int_power_levels=False,
msc3931_push_features=(), msc3931_push_features=(),
msc3757_enabled=False, msc3757_enabled=False,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
) )
V7 = RoomVersion( V7 = RoomVersion(
"7", "7",
@@ -270,8 +251,6 @@ class RoomVersions:
enforce_int_power_levels=False, enforce_int_power_levels=False,
msc3931_push_features=(), msc3931_push_features=(),
msc3757_enabled=False, msc3757_enabled=False,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
) )
V8 = RoomVersion( V8 = RoomVersion(
"8", "8",
@@ -292,8 +271,6 @@ class RoomVersions:
enforce_int_power_levels=False, enforce_int_power_levels=False,
msc3931_push_features=(), msc3931_push_features=(),
msc3757_enabled=False, msc3757_enabled=False,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
) )
V9 = RoomVersion( V9 = RoomVersion(
"9", "9",
@@ -314,8 +291,6 @@ class RoomVersions:
enforce_int_power_levels=False, enforce_int_power_levels=False,
msc3931_push_features=(), msc3931_push_features=(),
msc3757_enabled=False, msc3757_enabled=False,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
) )
V10 = RoomVersion( V10 = RoomVersion(
"10", "10",
@@ -336,8 +311,6 @@ class RoomVersions:
enforce_int_power_levels=True, enforce_int_power_levels=True,
msc3931_push_features=(), msc3931_push_features=(),
msc3757_enabled=False, msc3757_enabled=False,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
) )
MSC1767v10 = RoomVersion( MSC1767v10 = RoomVersion(
# MSC1767 (Extensible Events) based on room version "10" # MSC1767 (Extensible Events) based on room version "10"
@@ -359,8 +332,6 @@ class RoomVersions:
enforce_int_power_levels=True, enforce_int_power_levels=True,
msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,), msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
msc3757_enabled=False, msc3757_enabled=False,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
) )
MSC3757v10 = RoomVersion( MSC3757v10 = RoomVersion(
# MSC3757 (Restricting who can overwrite a state event) based on room version "10" # MSC3757 (Restricting who can overwrite a state event) based on room version "10"
@@ -382,8 +353,6 @@ class RoomVersions:
enforce_int_power_levels=True, enforce_int_power_levels=True,
msc3931_push_features=(), msc3931_push_features=(),
msc3757_enabled=True, msc3757_enabled=True,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
) )
V11 = RoomVersion( V11 = RoomVersion(
"11", "11",
@@ -404,8 +373,6 @@ class RoomVersions:
enforce_int_power_levels=True, enforce_int_power_levels=True,
msc3931_push_features=(), msc3931_push_features=(),
msc3757_enabled=False, msc3757_enabled=False,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
) )
MSC3757v11 = RoomVersion( MSC3757v11 = RoomVersion(
# MSC3757 (Restricting who can overwrite a state event) based on room version "11" # MSC3757 (Restricting who can overwrite a state event) based on room version "11"
@@ -427,52 +394,6 @@ class RoomVersions:
enforce_int_power_levels=True, enforce_int_power_levels=True,
msc3931_push_features=(), msc3931_push_features=(),
msc3757_enabled=True, msc3757_enabled=True,
msc4289_creator_power_enabled=False,
msc4291_room_ids_as_hashes=False,
)
HydraV11 = RoomVersion(
"org.matrix.hydra.11",
RoomDisposition.UNSTABLE,
EventFormatVersions.ROOM_V11_HYDRA_PLUS,
StateResolutionVersions.V2_1, # Changed from v11
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
implicit_room_creator=True, # Used by MSC3820
updated_redaction_rules=True, # Used by MSC3820
restricted_join_rule=True,
restricted_join_rule_fix=True,
knock_join_rule=True,
msc3389_relation_redactions=False,
knock_restricted_join_rule=True,
enforce_int_power_levels=True,
msc3931_push_features=(),
msc3757_enabled=False,
msc4289_creator_power_enabled=True, # Changed from v11
msc4291_room_ids_as_hashes=True, # Changed from v11
)
V12 = RoomVersion(
"12",
RoomDisposition.STABLE,
EventFormatVersions.ROOM_V11_HYDRA_PLUS,
StateResolutionVersions.V2_1, # Changed from v11
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
implicit_room_creator=True, # Used by MSC3820
updated_redaction_rules=True, # Used by MSC3820
restricted_join_rule=True,
restricted_join_rule_fix=True,
knock_join_rule=True,
msc3389_relation_redactions=False,
knock_restricted_join_rule=True,
enforce_int_power_levels=True,
msc3931_push_features=(),
msc3757_enabled=False,
msc4289_creator_power_enabled=True, # Changed from v11
msc4291_room_ids_as_hashes=True, # Changed from v11
) )
@@ -490,10 +411,8 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
RoomVersions.V9, RoomVersions.V9,
RoomVersions.V10, RoomVersions.V10,
RoomVersions.V11, RoomVersions.V11,
RoomVersions.V12,
RoomVersions.MSC3757v10, RoomVersions.MSC3757v10,
RoomVersions.MSC3757v11, RoomVersions.MSC3757v11,
RoomVersions.HydraV11,
) )
} }

View File

@@ -525,12 +525,8 @@ async def start(hs: "HomeServer") -> None:
) )
# Register the threadpools with our metrics. # Register the threadpools with our metrics.
register_threadpool( register_threadpool("default", reactor.getThreadPool())
name="default", server_name=server_name, threadpool=reactor.getThreadPool() register_threadpool("gai_resolver", resolver_threadpool)
)
register_threadpool(
name="gai_resolver", server_name=server_name, threadpool=resolver_threadpool
)
# Set up the SIGHUP machinery. # Set up the SIGHUP machinery.
if hasattr(signal, "SIGHUP"): if hasattr(signal, "SIGHUP"):

View File

@@ -28,7 +28,6 @@ from prometheus_client import Gauge
from twisted.internet import defer from twisted.internet import defer
from synapse.metrics import SERVER_NAME_LABEL
from synapse.metrics.background_process_metrics import ( from synapse.metrics.background_process_metrics import (
run_as_background_process, run_as_background_process,
) )
@@ -58,25 +57,16 @@ Phone home stats are sent every 3 hours
_stats_process: List[Tuple[int, "resource.struct_rusage"]] = [] _stats_process: List[Tuple[int, "resource.struct_rusage"]] = []
# Gauges to expose monthly active user control metrics # Gauges to expose monthly active user control metrics
current_mau_gauge = Gauge( current_mau_gauge = Gauge("synapse_admin_mau_current", "Current MAU")
"synapse_admin_mau_current",
"Current MAU",
labelnames=[SERVER_NAME_LABEL],
)
current_mau_by_service_gauge = Gauge( current_mau_by_service_gauge = Gauge(
"synapse_admin_mau_current_mau_by_service", "synapse_admin_mau_current_mau_by_service",
"Current MAU by service", "Current MAU by service",
labelnames=["app_service", SERVER_NAME_LABEL], ["app_service"],
)
max_mau_gauge = Gauge(
"synapse_admin_mau_max",
"MAU Limit",
labelnames=[SERVER_NAME_LABEL],
) )
max_mau_gauge = Gauge("synapse_admin_mau_max", "MAU Limit")
registered_reserved_users_mau_gauge = Gauge( registered_reserved_users_mau_gauge = Gauge(
"synapse_admin_mau_registered_reserved_users", "synapse_admin_mau_registered_reserved_users",
"Registered users with reserved threepids", "Registered users with reserved threepids",
labelnames=[SERVER_NAME_LABEL],
) )
@@ -247,21 +237,13 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
await store.get_monthly_active_count_by_service() await store.get_monthly_active_count_by_service()
) )
reserved_users = await store.get_registered_reserved_users() reserved_users = await store.get_registered_reserved_users()
current_mau_gauge.labels(**{SERVER_NAME_LABEL: server_name}).set( current_mau_gauge.set(float(current_mau_count))
float(current_mau_count)
)
for app_service, count in current_mau_count_by_service.items(): for app_service, count in current_mau_count_by_service.items():
current_mau_by_service_gauge.labels( current_mau_by_service_gauge.labels(app_service).set(float(count))
app_service=app_service, **{SERVER_NAME_LABEL: server_name}
).set(float(count))
registered_reserved_users_mau_gauge.labels( registered_reserved_users_mau_gauge.set(float(len(reserved_users)))
**{SERVER_NAME_LABEL: server_name} max_mau_gauge.set(float(hs.config.server.max_mau_value))
).set(float(len(reserved_users)))
max_mau_gauge.labels(**{SERVER_NAME_LABEL: server_name}).set(
float(hs.config.server.max_mau_value)
)
return run_as_background_process( return run_as_background_process(
"generate_monthly_active_users", "generate_monthly_active_users",

View File

@@ -36,7 +36,6 @@ from synapse.config import ( # noqa: F401
jwt, jwt,
key, key,
logger, logger,
mas,
metrics, metrics,
modules, modules,
oembed, oembed,
@@ -125,7 +124,6 @@ class RootConfig:
background_updates: background_updates.BackgroundUpdateConfig background_updates: background_updates.BackgroundUpdateConfig
auto_accept_invites: auto_accept_invites.AutoAcceptInvitesConfig auto_accept_invites: auto_accept_invites.AutoAcceptInvitesConfig
user_types: user_types.UserTypesConfig user_types: user_types.UserTypesConfig
mas: mas.MasConfig
config_classes: List[Type["Config"]] = ... config_classes: List[Type["Config"]] = ...
config_files: List[str] config_files: List[str]

View File

@@ -36,14 +36,13 @@ class AuthConfig(Config):
if password_config is None: if password_config is None:
password_config = {} password_config = {}
auth_delegated = (config.get("experimental_features") or {}).get( # The default value of password_config.enabled is True, unless msc3861 is enabled.
"msc3861", {} msc3861_enabled = (
).get("enabled", False) or ( (config.get("experimental_features") or {})
config.get("matrix_authentication_service") or {} .get("msc3861", {})
).get("enabled", False) .get("enabled", False)
)
# The default value of password_config.enabled is True, unless auth is delegated passwords_enabled = password_config.get("enabled", not msc3861_enabled)
passwords_enabled = password_config.get("enabled", not auth_delegated)
# 'only_for_reauth' allows users who have previously set a password to use it, # 'only_for_reauth' allows users who have previously set a password to use it,
# even though passwords would otherwise be disabled. # even though passwords would otherwise be disabled.

View File

@@ -535,15 +535,11 @@ class ExperimentalConfig(Config):
"msc4108_delegation_endpoint", None "msc4108_delegation_endpoint", None
) )
auth_delegated = self.msc3861.enabled or (
config.get("matrix_authentication_service") or {}
).get("enabled", False)
if ( if (
self.msc4108_enabled or self.msc4108_delegation_endpoint is not None self.msc4108_enabled or self.msc4108_delegation_endpoint is not None
) and not auth_delegated: ) and not self.msc3861.enabled:
raise ConfigError( raise ConfigError(
"MSC4108 requires MSC3861 or matrix_authentication_service to be enabled", "MSC4108 requires MSC3861 to be enabled",
("experimental", "msc4108_delegation_endpoint"), ("experimental", "msc4108_delegation_endpoint"),
) )

View File

@@ -36,7 +36,6 @@ from .federation import FederationConfig
from .jwt import JWTConfig from .jwt import JWTConfig
from .key import KeyConfig from .key import KeyConfig
from .logger import LoggingConfig from .logger import LoggingConfig
from .mas import MasConfig
from .metrics import MetricsConfig from .metrics import MetricsConfig
from .modules import ModulesConfig from .modules import ModulesConfig
from .oembed import OembedConfig from .oembed import OembedConfig
@@ -110,6 +109,4 @@ class HomeServerConfig(RootConfig):
BackgroundUpdateConfig, BackgroundUpdateConfig,
AutoAcceptInvitesConfig, AutoAcceptInvitesConfig,
UserTypesConfig, UserTypesConfig,
# This must be last, as it checks for conflicts with other config options.
MasConfig,
] ]

View File

@@ -1,192 +0,0 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2025 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
#
from typing import Any, Optional
from synapse._pydantic_compat import (
AnyHttpUrl,
Field,
FilePath,
StrictBool,
StrictStr,
ValidationError,
validator,
)
from synapse.config.experimental import read_secret_from_file_once
from synapse.types import JsonDict
from synapse.util.pydantic_models import ParseModel
from ._base import Config, ConfigError, RootConfig
class MasConfigModel(ParseModel):
enabled: StrictBool = False
endpoint: AnyHttpUrl = Field(default="http://localhost:8080")
secret: Optional[StrictStr] = Field(default=None)
secret_path: Optional[FilePath] = Field(default=None)
@validator("secret")
def validate_secret_is_set_if_enabled(cls, v: Any, values: dict) -> Any:
if values.get("enabled", False) and not values.get("secret_path") and not v:
raise ValueError(
"You must set a `secret` or `secret_path` when enabling Matrix Authentication Service integration."
)
return v
@validator("secret_path")
def validate_secret_path_is_set_if_enabled(cls, v: Any, values: dict) -> Any:
if values.get("secret"):
raise ValueError(
"`secret` and `secret_path` cannot be set at the same time."
)
return v
class MasConfig(Config):
section = "mas"
def read_config(
self, config: JsonDict, allow_secrets_in_config: bool, **kwargs: Any
) -> None:
mas_config = config.get("matrix_authentication_service", {})
if mas_config is None:
mas_config = {}
try:
parsed = MasConfigModel(**mas_config)
except ValidationError as e:
raise ConfigError(
"Could not validate Matrix Authentication Service configuration",
path=("matrix_authentication_service",),
) from e
if parsed.secret and not allow_secrets_in_config:
raise ConfigError(
"Config options that expect an in-line secret as value are disabled",
("matrix_authentication_service", "secret"),
)
self.enabled = parsed.enabled
self.endpoint = parsed.endpoint
self._secret = parsed.secret
self._secret_path = parsed.secret_path
self.check_config_conflicts(self.root)
def check_config_conflicts(
self,
root: RootConfig,
) -> None:
"""Checks for any configuration conflicts with other parts of Synapse.
Raises:
ConfigError: If there are any configuration conflicts.
"""
if not self.enabled:
return
if root.experimental.msc3861.enabled:
raise ConfigError(
"Experimental MSC3861 was replaced by Matrix Authentication Service."
"Please disable MSC3861 or disable Matrix Authentication Service.",
("experimental", "msc3861"),
)
if (
root.auth.password_enabled_for_reauth
or root.auth.password_enabled_for_login
):
raise ConfigError(
"Password auth cannot be enabled when OAuth delegation is enabled",
("password_config", "enabled"),
)
if root.registration.enable_registration:
raise ConfigError(
"Registration cannot be enabled when OAuth delegation is enabled",
("enable_registration",),
)
# We only need to test the user consent version, as if it must be set if the user_consent section was present in the config
if root.consent.user_consent_version is not None:
raise ConfigError(
"User consent cannot be enabled when OAuth delegation is enabled",
("user_consent",),
)
if (
root.oidc.oidc_enabled
or root.saml2.saml2_enabled
or root.cas.cas_enabled
or root.jwt.jwt_enabled
):
raise ConfigError("SSO cannot be enabled when OAuth delegation is enabled")
if bool(root.authproviders.password_providers):
raise ConfigError(
"Password auth providers cannot be enabled when OAuth delegation is enabled"
)
if root.captcha.enable_registration_captcha:
raise ConfigError(
"CAPTCHA cannot be enabled when OAuth delegation is enabled",
("captcha", "enable_registration_captcha"),
)
if root.auth.login_via_existing_enabled:
raise ConfigError(
"Login via existing session cannot be enabled when OAuth delegation is enabled",
("login_via_existing_session", "enabled"),
)
if root.registration.refresh_token_lifetime:
raise ConfigError(
"refresh_token_lifetime cannot be set when OAuth delegation is enabled",
("refresh_token_lifetime",),
)
if root.registration.nonrefreshable_access_token_lifetime:
raise ConfigError(
"nonrefreshable_access_token_lifetime cannot be set when OAuth delegation is enabled",
("nonrefreshable_access_token_lifetime",),
)
if root.registration.session_lifetime:
raise ConfigError(
"session_lifetime cannot be set when OAuth delegation is enabled",
("session_lifetime",),
)
if root.registration.enable_3pid_changes:
raise ConfigError(
"enable_3pid_changes cannot be enabled when OAuth delegation is enabled",
("enable_3pid_changes",),
)
def secret(self) -> str:
if self._secret is not None:
return self._secret
elif self._secret_path is not None:
return read_secret_from_file_once(
str(self._secret_path),
("matrix_authentication_service", "secret_path"),
)
else:
raise RuntimeError(
"Neither `secret` nor `secret_path` are set, this is a bug.",
)

View File

@@ -148,14 +148,15 @@ class RegistrationConfig(Config):
self.enable_set_displayname = config.get("enable_set_displayname", True) self.enable_set_displayname = config.get("enable_set_displayname", True)
self.enable_set_avatar_url = config.get("enable_set_avatar_url", True) self.enable_set_avatar_url = config.get("enable_set_avatar_url", True)
auth_delegated = (config.get("experimental_features") or {}).get(
"msc3861", {}
).get("enabled", False) or (
config.get("matrix_authentication_service") or {}
).get("enabled", False)
# The default value of enable_3pid_changes is True, unless msc3861 is enabled. # The default value of enable_3pid_changes is True, unless msc3861 is enabled.
self.enable_3pid_changes = config.get("enable_3pid_changes", not auth_delegated) msc3861_enabled = (
(config.get("experimental_features") or {})
.get("msc3861", {})
.get("enabled", False)
)
self.enable_3pid_changes = config.get(
"enable_3pid_changes", not msc3861_enabled
)
self.disable_msisdn_registration = config.get( self.disable_msisdn_registration = config.get(
"disable_msisdn_registration", False "disable_msisdn_registration", False

View File

@@ -101,9 +101,6 @@ def compute_content_hash(
event_dict.pop("outlier", None) event_dict.pop("outlier", None)
event_dict.pop("destinations", None) event_dict.pop("destinations", None)
# N.B. no need to pop the room_id from create events in MSC4291 rooms
# as they shouldn't have one.
event_json_bytes = encode_canonical_json(event_dict) event_json_bytes = encode_canonical_json(event_dict)
hashed = hash_algorithm(event_json_bytes) hashed = hash_algorithm(event_json_bytes)

View File

@@ -45,7 +45,6 @@ from signedjson.sign import SignatureVerifyException, verify_signed_json
from unpaddedbase64 import decode_base64 from unpaddedbase64 import decode_base64
from synapse.api.constants import ( from synapse.api.constants import (
CREATOR_POWER_LEVEL,
MAX_PDU_SIZE, MAX_PDU_SIZE,
EventContentFields, EventContentFields,
EventTypes, EventTypes,
@@ -65,7 +64,6 @@ from synapse.api.room_versions import (
RoomVersion, RoomVersion,
RoomVersions, RoomVersions,
) )
from synapse.events import is_creator
from synapse.state import CREATE_KEY from synapse.state import CREATE_KEY
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import ( from synapse.types import (
@@ -263,8 +261,7 @@ async def check_state_independent_auth_rules(
f"Event {event.event_id} has unexpected auth_event for {k}: {auth_event_id}", f"Event {event.event_id} has unexpected auth_event for {k}: {auth_event_id}",
) )
# 2.3 ... If there are entries which were themselves rejected under the checks performed on receipt # We also need to check that the auth event itself is not rejected.
# of a PDU, reject.
if auth_event.rejected_reason: if auth_event.rejected_reason:
raise AuthError( raise AuthError(
403, 403,
@@ -274,7 +271,7 @@ async def check_state_independent_auth_rules(
auth_dict[k] = auth_event_id auth_dict[k] = auth_event_id
# 2.4. If event does not have a m.room.create in its auth_events, reject. # 3. If event does not have a m.room.create in its auth_events, reject.
creation_event = auth_dict.get((EventTypes.Create, ""), None) creation_event = auth_dict.get((EventTypes.Create, ""), None)
if not creation_event: if not creation_event:
raise AuthError(403, "No create event in auth events") raise AuthError(403, "No create event in auth events")
@@ -314,14 +311,13 @@ def check_state_dependent_auth_rules(
# Later code relies on there being a create event e.g _can_federate, _is_membership_change_allowed # Later code relies on there being a create event e.g _can_federate, _is_membership_change_allowed
# so produce a more intelligible error if we don't have one. # so produce a more intelligible error if we don't have one.
create_event = auth_dict.get(CREATE_KEY) if auth_dict.get(CREATE_KEY) is None:
if create_event is None:
raise AuthError( raise AuthError(
403, f"Event {event.event_id} is missing a create event in auth_events." 403, f"Event {event.event_id} is missing a create event in auth_events."
) )
# additional check for m.federate # additional check for m.federate
creating_domain = get_domain_from_id(create_event.sender) creating_domain = get_domain_from_id(event.room_id)
originating_domain = get_domain_from_id(event.sender) originating_domain = get_domain_from_id(event.sender)
if creating_domain != originating_domain: if creating_domain != originating_domain:
if not _can_federate(event, auth_dict): if not _can_federate(event, auth_dict):
@@ -474,20 +470,12 @@ def _check_create(event: "EventBase") -> None:
if event.prev_event_ids(): if event.prev_event_ids():
raise AuthError(403, "Create event has prev events") raise AuthError(403, "Create event has prev events")
if event.room_version.msc4291_room_ids_as_hashes:
# 1.2 If the create event has a room_id, reject
if "room_id" in event:
raise AuthError(403, "Create event has a room_id")
else:
# 1.2 If the domain of the room_id does not match the domain of the sender, # 1.2 If the domain of the room_id does not match the domain of the sender,
# reject. # reject.
if not event.room_version.msc4291_room_ids_as_hashes:
sender_domain = get_domain_from_id(event.sender) sender_domain = get_domain_from_id(event.sender)
room_id_domain = get_domain_from_id(event.room_id) room_id_domain = get_domain_from_id(event.room_id)
if room_id_domain != sender_domain: if room_id_domain != sender_domain:
raise AuthError( raise AuthError(403, "Creation event's room_id domain does not match sender's")
403, "Creation event's room_id domain does not match sender's"
)
# 1.3 If content.room_version is present and is not a recognised version, reject # 1.3 If content.room_version is present and is not a recognised version, reject
room_version_prop = event.content.get("room_version", "1") room_version_prop = event.content.get("room_version", "1")
@@ -504,16 +492,6 @@ def _check_create(event: "EventBase") -> None:
): ):
raise AuthError(403, "Create event lacks a 'creator' property") raise AuthError(403, "Create event lacks a 'creator' property")
# 1.5 If the additional_creators field is present and is not an array of strings where each
# string is a valid user ID, reject.
if (
event.room_version.msc4289_creator_power_enabled
and EventContentFields.ADDITIONAL_CREATORS in event.content
):
check_valid_additional_creators(
event.content[EventContentFields.ADDITIONAL_CREATORS]
)
def _can_federate(event: "EventBase", auth_events: StateMap["EventBase"]) -> bool: def _can_federate(event: "EventBase", auth_events: StateMap["EventBase"]) -> bool:
creation_event = auth_events.get((EventTypes.Create, "")) creation_event = auth_events.get((EventTypes.Create, ""))
@@ -555,13 +533,7 @@ def _is_membership_change_allowed(
target_user_id = event.state_key target_user_id = event.state_key
# We need the create event in order to check if we can federate or not. creating_domain = get_domain_from_id(event.room_id)
# If it's missing, yell loudly. Previously we only did this inside the
# _can_federate check.
create_event = auth_events.get((EventTypes.Create, ""))
if not create_event:
raise AuthError(403, "Create event missing from auth_events")
creating_domain = get_domain_from_id(create_event.sender)
target_domain = get_domain_from_id(target_user_id) target_domain = get_domain_from_id(target_user_id)
if creating_domain != target_domain: if creating_domain != target_domain:
if not _can_federate(event, auth_events): if not _can_federate(event, auth_events):
@@ -931,32 +903,6 @@ def _check_power_levels(
except Exception: except Exception:
raise SynapseError(400, "Not a valid power level: %s" % (v,)) raise SynapseError(400, "Not a valid power level: %s" % (v,))
if room_version_obj.msc4289_creator_power_enabled:
# Enforce the creator does not appear in the users map
create_event = auth_events.get((EventTypes.Create, ""))
if not create_event:
raise SynapseError(
400, "Cannot check power levels without a create event in auth_events"
)
if create_event.sender in user_list:
raise SynapseError(
400,
"Creator user %s must not appear in content.users"
% (create_event.sender,),
)
additional_creators = create_event.content.get(
EventContentFields.ADDITIONAL_CREATORS, []
)
if additional_creators:
creators_in_user_list = set(additional_creators).intersection(
set(user_list)
)
if len(creators_in_user_list) > 0:
raise SynapseError(
400,
"Additional creators users must not appear in content.users",
)
# Reject events with stringy power levels if required by room version # Reject events with stringy power levels if required by room version
if ( if (
event.type == EventTypes.PowerLevels event.type == EventTypes.PowerLevels
@@ -1082,9 +1028,6 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in
"A create event in the auth events chain is required to calculate user power level correctly," "A create event in the auth events chain is required to calculate user power level correctly,"
" but was not found. This indicates a bug" " but was not found. This indicates a bug"
) )
if create_event.room_version.msc4289_creator_power_enabled:
if is_creator(create_event, user_id):
return CREATOR_POWER_LEVEL
power_level_event = get_power_level_event(auth_events) power_level_event = get_power_level_event(auth_events)
if power_level_event: if power_level_event:
level = power_level_event.content.get("users", {}).get(user_id) level = power_level_event.content.get("users", {}).get(user_id)
@@ -1245,26 +1188,3 @@ def auth_types_for_event(
auth_types.add(key) auth_types.add(key)
return auth_types return auth_types
def check_valid_additional_creators(additional_creators: Any) -> None:
"""Check if the additional_creators provided is valid according to MSC4289.
The additional_creators can be supplied from an m.room.create event or from an /upgrade request.
Raises:
AuthError if the additional_creators is invalid for some reason.
"""
if type(additional_creators) is not list:
raise AuthError(400, "additional_creators must be an array")
for entry in additional_creators:
if type(entry) is not str:
raise AuthError(400, "entry in additional_creators is not a string")
if not UserID.is_valid(entry):
raise AuthError(400, "entry in additional_creators is not a valid user ID")
# UserID.is_valid doesn't actually validate everything, so check the rest manually.
if len(entry) > 255 or len(entry.encode("utf-8")) > 255:
raise AuthError(
400,
"entry in additional_creators too long",
)

View File

@@ -41,13 +41,10 @@ from typing import (
import attr import attr
from unpaddedbase64 import encode_base64 from unpaddedbase64 import encode_base64
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
from synapse.synapse_rust.events import EventInternalMetadata from synapse.synapse_rust.events import EventInternalMetadata
from synapse.types import ( from synapse.types import JsonDict, StrCollection
JsonDict,
StrCollection,
)
from synapse.util.caches import intern_dict from synapse.util.caches import intern_dict
from synapse.util.frozenutils import freeze from synapse.util.frozenutils import freeze
@@ -212,6 +209,7 @@ class EventBase(metaclass=abc.ABCMeta):
content: DictProperty[JsonDict] = DictProperty("content") content: DictProperty[JsonDict] = DictProperty("content")
hashes: DictProperty[Dict[str, str]] = DictProperty("hashes") hashes: DictProperty[Dict[str, str]] = DictProperty("hashes")
origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts") origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts")
room_id: DictProperty[str] = DictProperty("room_id")
sender: DictProperty[str] = DictProperty("sender") sender: DictProperty[str] = DictProperty("sender")
# TODO state_key should be Optional[str]. This is generally asserted in Synapse # TODO state_key should be Optional[str]. This is generally asserted in Synapse
# by calling is_state() first (which ensures it is not None), but it is hard (not possible?) # by calling is_state() first (which ensures it is not None), but it is hard (not possible?)
@@ -226,10 +224,6 @@ class EventBase(metaclass=abc.ABCMeta):
def event_id(self) -> str: def event_id(self) -> str:
raise NotImplementedError() raise NotImplementedError()
@property
def room_id(self) -> str:
raise NotImplementedError()
@property @property
def membership(self) -> str: def membership(self) -> str:
return self.content["membership"] return self.content["membership"]
@@ -392,10 +386,6 @@ class FrozenEvent(EventBase):
def event_id(self) -> str: def event_id(self) -> str:
return self._event_id return self._event_id
@property
def room_id(self) -> str:
return self._dict["room_id"]
class FrozenEventV2(EventBase): class FrozenEventV2(EventBase):
format_version = EventFormatVersions.ROOM_V3 # All events of this type are V2 format_version = EventFormatVersions.ROOM_V3 # All events of this type are V2
@@ -453,10 +443,6 @@ class FrozenEventV2(EventBase):
self._event_id = "$" + encode_base64(compute_event_reference_hash(self)[1]) self._event_id = "$" + encode_base64(compute_event_reference_hash(self)[1])
return self._event_id return self._event_id
@property
def room_id(self) -> str:
return self._dict["room_id"]
def prev_event_ids(self) -> List[str]: def prev_event_ids(self) -> List[str]:
"""Returns the list of prev event IDs. The order matches the order """Returns the list of prev event IDs. The order matches the order
specified in the event, though there is no meaning to it. specified in the event, though there is no meaning to it.
@@ -495,67 +481,6 @@ class FrozenEventV3(FrozenEventV2):
return self._event_id return self._event_id
class FrozenEventV4(FrozenEventV3):
"""FrozenEventV4 for MSC4291 room IDs are hashes"""
format_version = EventFormatVersions.ROOM_V11_HYDRA_PLUS
"""Override the room_id for m.room.create events"""
def __init__(
self,
event_dict: JsonDict,
room_version: RoomVersion,
internal_metadata_dict: Optional[JsonDict] = None,
rejected_reason: Optional[str] = None,
):
super().__init__(
event_dict=event_dict,
room_version=room_version,
internal_metadata_dict=internal_metadata_dict,
rejected_reason=rejected_reason,
)
self._room_id: Optional[str] = None
@property
def room_id(self) -> str:
# if we have calculated the room ID already, don't do it again.
if self._room_id:
return self._room_id
is_create_event = self.type == EventTypes.Create and self.get_state_key() == ""
# for non-create events: use the supplied value from the JSON, as per FrozenEventV3
if not is_create_event:
self._room_id = self._dict["room_id"]
assert self._room_id is not None
return self._room_id
# for create events: calculate the room ID
from synapse.crypto.event_signing import compute_event_reference_hash
self._room_id = "!" + encode_base64(
compute_event_reference_hash(self)[1], urlsafe=True
)
return self._room_id
def auth_event_ids(self) -> StrCollection:
"""Returns the list of auth event IDs. The order matches the order
specified in the event, though there is no meaning to it.
Returns:
The list of event IDs of this event's auth_events
Includes the creation event ID for convenience of all the codepaths
which expects the auth chain to include the creator ID, even though
it's explicitly not included on the wire. Excludes the create event
for the create event itself.
"""
create_event_id = "$" + self.room_id[1:]
assert create_event_id not in self._dict["auth_events"]
if self.type == EventTypes.Create and self.get_state_key() == "":
return self._dict["auth_events"] # should be []
return self._dict["auth_events"] + [create_event_id]
def _event_type_from_format_version( def _event_type_from_format_version(
format_version: int, format_version: int,
) -> Type[Union[FrozenEvent, FrozenEventV2, FrozenEventV3]]: ) -> Type[Union[FrozenEvent, FrozenEventV2, FrozenEventV3]]:
@@ -575,8 +500,6 @@ def _event_type_from_format_version(
return FrozenEventV2 return FrozenEventV2
elif format_version == EventFormatVersions.ROOM_V4_PLUS: elif format_version == EventFormatVersions.ROOM_V4_PLUS:
return FrozenEventV3 return FrozenEventV3
elif format_version == EventFormatVersions.ROOM_V11_HYDRA_PLUS:
return FrozenEventV4
else: else:
raise Exception("No event format %r" % (format_version,)) raise Exception("No event format %r" % (format_version,))
@@ -636,23 +559,6 @@ def relation_from_event(event: EventBase) -> Optional[_EventRelation]:
return _EventRelation(parent_id, rel_type, aggregation_key) return _EventRelation(parent_id, rel_type, aggregation_key)
def is_creator(create: EventBase, user_id: str) -> bool:
"""
Return true if the provided user ID is the room creator.
This includes additional creators in MSC4289.
"""
assert create.type == EventTypes.Create
if create.sender == user_id:
return True
if create.room_version.msc4289_creator_power_enabled:
additional_creators = set(
create.content.get(EventContentFields.ADDITIONAL_CREATORS, [])
)
return user_id in additional_creators
return False
@attr.s(slots=True, frozen=True, auto_attribs=True) @attr.s(slots=True, frozen=True, auto_attribs=True)
class StrippedStateEvent: class StrippedStateEvent:
""" """

View File

@@ -114,6 +114,7 @@ class InviteAutoAccepter:
# that occurs when responding to invites over federation (see https://github.com/matrix-org/synapse-auto-accept-invite/issues/12) # that occurs when responding to invites over federation (see https://github.com/matrix-org/synapse-auto-accept-invite/issues/12)
run_as_background_process( run_as_background_process(
"retry_make_join", "retry_make_join",
self.server_name,
self._retry_make_join, self._retry_make_join,
event.state_key, event.state_key,
event.state_key, event.state_key,

View File

@@ -82,8 +82,7 @@ class EventBuilder:
room_version: RoomVersion room_version: RoomVersion
# MSC4291 makes the room ID == the create event ID. This means the create event has no room_id. room_id: str
room_id: Optional[str]
type: str type: str
sender: str sender: str
@@ -143,14 +142,7 @@ class EventBuilder:
Returns: Returns:
The signed and hashed event. The signed and hashed event.
""" """
# Create events always have empty auth_events.
if self.type == EventTypes.Create and self.is_state() and self.state_key == "":
auth_event_ids = []
# Calculate auth_events for non-create events
if auth_event_ids is None: if auth_event_ids is None:
# Every non-create event must have a room ID
assert self.room_id is not None
state_ids = await self._state.compute_state_after_events( state_ids = await self._state.compute_state_after_events(
self.room_id, self.room_id,
prev_event_ids, prev_event_ids,
@@ -232,31 +224,12 @@ class EventBuilder:
"auth_events": auth_events, "auth_events": auth_events,
"prev_events": prev_events, "prev_events": prev_events,
"type": self.type, "type": self.type,
"room_id": self.room_id,
"sender": self.sender, "sender": self.sender,
"content": self.content, "content": self.content,
"unsigned": self.unsigned, "unsigned": self.unsigned,
"depth": depth, "depth": depth,
} }
if self.room_id is not None:
event_dict["room_id"] = self.room_id
if self.room_version.msc4291_room_ids_as_hashes:
# In MSC4291: the create event has no room ID as the create event ID /is/ the room ID.
if (
self.type == EventTypes.Create
and self.is_state()
and self._state_key == ""
):
assert self.room_id is None
else:
# All other events do not reference the create event in auth_events, as the room ID
# /is/ the create event. However, the rest of the code (for consistency between room
# versions) assume that the create event remains part of the auth events. c.f. event
# class which automatically adds the create event when `.auth_event_ids()` is called
assert self.room_id is not None
create_event_id = "$" + self.room_id[1:]
auth_event_ids.remove(create_event_id)
event_dict["auth_events"] = auth_event_ids
if self.is_state(): if self.is_state():
event_dict["state_key"] = self._state_key event_dict["state_key"] = self._state_key
@@ -312,7 +285,7 @@ class EventBuilderFactory:
room_version=room_version, room_version=room_version,
type=key_values["type"], type=key_values["type"],
state_key=key_values.get("state_key"), state_key=key_values.get("state_key"),
room_id=key_values.get("room_id"), room_id=key_values["room_id"],
sender=key_values["sender"], sender=key_values["sender"],
content=key_values.get("content", {}), content=key_values.get("content", {}),
unsigned=key_values.get("unsigned", {}), unsigned=key_values.get("unsigned", {}),

View File

@@ -26,8 +26,8 @@ from typing import (
Any, Any,
Awaitable, Awaitable,
Callable, Callable,
Collection,
Dict, Dict,
Iterable,
List, List,
Mapping, Mapping,
Match, Match,
@@ -49,7 +49,6 @@ from synapse.api.constants import (
) )
from synapse.api.errors import Codes, SynapseError from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion from synapse.api.room_versions import RoomVersion
from synapse.logging.opentracing import SynapseTags, set_tag, trace
from synapse.types import JsonDict, Requester from synapse.types import JsonDict, Requester
from . import EventBase, StrippedStateEvent, make_event_from_dict from . import EventBase, StrippedStateEvent, make_event_from_dict
@@ -177,12 +176,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
if room_version.updated_redaction_rules: if room_version.updated_redaction_rules:
# MSC2176 rules state that create events cannot have their `content` redacted. # MSC2176 rules state that create events cannot have their `content` redacted.
new_content = event_dict["content"] new_content = event_dict["content"]
if not room_version.implicit_room_creator: elif not room_version.implicit_room_creator:
# Some room versions give meaning to `creator` # Some room versions give meaning to `creator`
add_fields("creator") add_fields("creator")
if room_version.msc4291_room_ids_as_hashes:
# room_id is not allowed on the create event as it's derived from the event ID
allowed_keys.remove("room_id")
elif event_type == EventTypes.JoinRules: elif event_type == EventTypes.JoinRules:
add_fields("join_rule") add_fields("join_rule")
@@ -531,10 +527,6 @@ def serialize_event(
if config.as_client_event: if config.as_client_event:
d = config.event_format(d) d = config.event_format(d)
# Ensure the room_id field is set for create events in MSC4291 rooms
if e.type == EventTypes.Create and e.room_version.msc4291_room_ids_as_hashes:
d["room_id"] = e.room_id
# If the event is a redaction, the field with the redacted event ID appears # If the event is a redaction, the field with the redacted event ID appears
# in a different location depending on the room version. e.redacts handles # in a different location depending on the room version. e.redacts handles
# fetching from the proper location; copy it to the other location for forwards- # fetching from the proper location; copy it to the other location for forwards-
@@ -546,11 +538,8 @@ def serialize_event(
d["content"] = dict(d["content"]) d["content"] = dict(d["content"])
d["content"]["redacts"] = e.redacts d["content"]["redacts"] = e.redacts
if config.include_admin_metadata: if config.include_admin_metadata and e.internal_metadata.is_soft_failed():
if e.internal_metadata.is_soft_failed():
d["unsigned"]["io.element.synapse.soft_failed"] = True d["unsigned"]["io.element.synapse.soft_failed"] = True
if e.internal_metadata.policy_server_spammy:
d["unsigned"]["io.element.synapse.policy_server_spammy"] = True
only_event_fields = config.only_event_fields only_event_fields = config.only_event_fields
if only_event_fields: if only_event_fields:
@@ -711,10 +700,9 @@ class EventClientSerializer:
"m.relations", {} "m.relations", {}
).update(serialized_aggregations) ).update(serialized_aggregations)
@trace
async def serialize_events( async def serialize_events(
self, self,
events: Collection[Union[JsonDict, EventBase]], events: Iterable[Union[JsonDict, EventBase]],
time_now: int, time_now: int,
*, *,
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
@@ -733,11 +721,6 @@ class EventClientSerializer:
Returns: Returns:
The list of serialized events The list of serialized events
""" """
set_tag(
SynapseTags.FUNC_ARG_PREFIX + "events.length",
str(len(events)),
)
return [ return [
await self.serialize_event( await self.serialize_event(
event, event,
@@ -886,14 +869,6 @@ def strip_event(event: EventBase) -> JsonDict:
Stripped state events can only have the `sender`, `type`, `state_key` and `content` Stripped state events can only have the `sender`, `type`, `state_key` and `content`
properties present. properties present.
""" """
# MSC4311: Ensure the create event is available on invites and knocks.
# TODO: Implement the rest of MSC4311
if (
event.room_version.msc4291_room_ids_as_hashes
and event.type == EventTypes.Create
and event.get_state_key() == ""
):
return event.get_pdu_json()
return { return {
"type": event.type, "type": event.type,

View File

@@ -183,18 +183,8 @@ class EventValidator:
fields an event would have fields an event would have
""" """
create_event_as_room_id = (
event.room_version.msc4291_room_ids_as_hashes
and event.type == EventTypes.Create
and hasattr(event, "state_key")
and event.state_key == ""
)
strings = ["room_id", "sender", "type"] strings = ["room_id", "sender", "type"]
if create_event_as_room_id:
strings.remove("room_id")
if hasattr(event, "state_key"): if hasattr(event, "state_key"):
strings.append("state_key") strings.append("state_key")
@@ -202,14 +192,7 @@ class EventValidator:
if not isinstance(getattr(event, s), str): if not isinstance(getattr(event, s), str):
raise SynapseError(400, "Not '%s' a string type" % (s,)) raise SynapseError(400, "Not '%s' a string type" % (s,))
if not create_event_as_room_id:
assert event.room_id is not None
RoomID.from_string(event.room_id) RoomID.from_string(event.room_id)
if event.room_version.msc4291_room_ids_as_hashes and not RoomID.is_valid(
event.room_id
):
raise SynapseError(400, f"Invalid room ID '{event.room_id}'")
UserID.from_string(event.sender) UserID.from_string(event.sender)
if event.type == EventTypes.Message: if event.type == EventTypes.Message:

View File

@@ -174,7 +174,6 @@ class FederationBase:
"Event not allowed by policy server, soft-failing %s", pdu.event_id "Event not allowed by policy server, soft-failing %s", pdu.event_id
) )
pdu.internal_metadata.soft_failed = True pdu.internal_metadata.soft_failed = True
pdu.internal_metadata.policy_server_spammy = True
# Note: we don't redact the event so admins can inspect the event after the # Note: we don't redact the event so admins can inspect the event after the
# fact. Other processes may redact the event, but that won't be applied to # fact. Other processes may redact the event, but that won't be applied to
# the database copy of the event until the server's config requires it. # the database copy of the event until the server's config requires it.
@@ -343,21 +342,6 @@ def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventB
if room_version.strict_canonicaljson: if room_version.strict_canonicaljson:
validate_canonicaljson(pdu_json) validate_canonicaljson(pdu_json)
# enforce that MSC4291 auth events don't include the create event.
# N.B. if they DO include a spurious create event, it'll fail auth checks elsewhere, so we don't
# need to do expensive DB lookups to find which event ID is the create event here.
if room_version.msc4291_room_ids_as_hashes:
room_id = pdu_json.get("room_id")
if room_id:
create_event_id = "$" + room_id[1:]
auth_events = pdu_json.get("auth_events")
if auth_events:
if create_event_id in auth_events:
raise SynapseError(
400,
"auth_events must not contain the create event",
Codes.BAD_JSON,
)
event = make_event_from_dict(pdu_json, room_version) event = make_event_from_dict(pdu_json, room_version)
return event return event

View File

@@ -122,13 +122,12 @@ received_queries_counter = Counter(
pdu_process_time = Histogram( pdu_process_time = Histogram(
"synapse_federation_server_pdu_process_time", "synapse_federation_server_pdu_process_time",
"Time taken to process an event", "Time taken to process an event",
labelnames=[SERVER_NAME_LABEL],
) )
last_pdu_ts_metric = Gauge( last_pdu_ts_metric = Gauge(
"synapse_federation_last_received_pdu_time", "synapse_federation_last_received_pdu_time",
"The timestamp of the last PDU which was successfully received from the given domain", "The timestamp of the last PDU which was successfully received from the given domain",
labelnames=("origin_server_name", SERVER_NAME_LABEL), labelnames=("server_name",),
) )
@@ -555,9 +554,7 @@ class FederationServer(FederationBase):
) )
if newest_pdu_ts and origin in self._federation_metrics_domains: if newest_pdu_ts and origin in self._federation_metrics_domains:
last_pdu_ts_metric.labels( last_pdu_ts_metric.labels(server_name=origin).set(newest_pdu_ts / 1000)
origin_server_name=origin, **{SERVER_NAME_LABEL: self.server_name}
).set(newest_pdu_ts / 1000)
return pdu_results return pdu_results
@@ -1325,9 +1322,9 @@ class FederationServer(FederationBase):
origin, event.event_id origin, event.event_id
) )
if received_ts is not None: if received_ts is not None:
pdu_process_time.labels( pdu_process_time.observe(
**{SERVER_NAME_LABEL: self.server_name} (self._clock.time_msec() - received_ts) / 1000
).observe((self._clock.time_msec() - received_ts) / 1000) )
next = await self._get_next_nonspam_staged_event_for_room( next = await self._get_next_nonspam_staged_event_for_room(
room_id, room_version room_id, room_version

View File

@@ -54,7 +54,7 @@ from sortedcontainers import SortedDict
from synapse.api.presence import UserPresenceState from synapse.api.presence import UserPresenceState
from synapse.federation.sender import AbstractFederationSender, FederationSender from synapse.federation.sender import AbstractFederationSender, FederationSender
from synapse.metrics import SERVER_NAME_LABEL, LaterGauge from synapse.metrics import LaterGauge
from synapse.replication.tcp.streams.federation import FederationStream from synapse.replication.tcp.streams.federation import FederationStream
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection
from synapse.util.metrics import Measure from synapse.util.metrics import Measure
@@ -113,10 +113,10 @@ class FederationRemoteSendQueue(AbstractFederationSender):
# changes. ARGH. # changes. ARGH.
def register(name: str, queue: Sized) -> None: def register(name: str, queue: Sized) -> None:
LaterGauge( LaterGauge(
name="synapse_federation_send_queue_%s_size" % (queue_name,), "synapse_federation_send_queue_%s_size" % (queue_name,),
desc="", "",
labelnames=[SERVER_NAME_LABEL], [],
caller=lambda: {(self.server_name,): len(queue)}, lambda: len(queue),
) )
for queue_name in [ for queue_name in [

View File

@@ -399,37 +399,31 @@ class FederationSender(AbstractFederationSender):
self._per_destination_queues: Dict[str, PerDestinationQueue] = {} self._per_destination_queues: Dict[str, PerDestinationQueue] = {}
LaterGauge( LaterGauge(
name="synapse_federation_transaction_queue_pending_destinations", "synapse_federation_transaction_queue_pending_destinations",
desc="", "",
labelnames=[SERVER_NAME_LABEL], [],
caller=lambda: { lambda: sum(
(self.server_name,): sum(
1 1
for d in self._per_destination_queues.values() for d in self._per_destination_queues.values()
if d.transmission_loop_running if d.transmission_loop_running
) ),
},
) )
LaterGauge( LaterGauge(
name="synapse_federation_transaction_queue_pending_pdus", "synapse_federation_transaction_queue_pending_pdus",
desc="", "",
labelnames=[SERVER_NAME_LABEL], [],
caller=lambda: { lambda: sum(
(self.server_name,): sum(
d.pending_pdu_count() for d in self._per_destination_queues.values() d.pending_pdu_count() for d in self._per_destination_queues.values()
) ),
},
) )
LaterGauge( LaterGauge(
name="synapse_federation_transaction_queue_pending_edus", "synapse_federation_transaction_queue_pending_edus",
desc="", "",
labelnames=[SERVER_NAME_LABEL], [],
caller=lambda: { lambda: sum(
(self.server_name,): sum(
d.pending_edu_count() for d in self._per_destination_queues.values() d.pending_edu_count() for d in self._per_destination_queues.values()
) ),
},
) )
self._is_processing = False self._is_processing = False
@@ -667,8 +661,7 @@ class FederationSender(AbstractFederationSender):
ts = event_to_received_ts[event.event_id] ts = event_to_received_ts[event.event_id]
assert ts is not None assert ts is not None
synapse.metrics.event_processing_lag_by_event.labels( synapse.metrics.event_processing_lag_by_event.labels(
name="federation_sender", "federation_sender"
**{SERVER_NAME_LABEL: self.server_name},
).observe((now - ts) / 1000) ).observe((now - ts) / 1000)
async def handle_room_events(events: List[EventBase]) -> None: async def handle_room_events(events: List[EventBase]) -> None:
@@ -712,12 +705,10 @@ class FederationSender(AbstractFederationSender):
assert ts is not None assert ts is not None
synapse.metrics.event_processing_lag.labels( synapse.metrics.event_processing_lag.labels(
name="federation_sender", "federation_sender"
**{SERVER_NAME_LABEL: self.server_name},
).set(now - ts) ).set(now - ts)
synapse.metrics.event_processing_last_ts.labels( synapse.metrics.event_processing_last_ts.labels(
name="federation_sender", "federation_sender"
**{SERVER_NAME_LABEL: self.server_name},
).set(ts) ).set(ts)
events_processed_counter.labels( events_processed_counter.labels(
@@ -735,7 +726,7 @@ class FederationSender(AbstractFederationSender):
).inc() ).inc()
synapse.metrics.event_processing_positions.labels( synapse.metrics.event_processing_positions.labels(
name="federation_sender", **{SERVER_NAME_LABEL: self.server_name} "federation_sender"
).set(next_token) ).set(next_token)
finally: finally:

View File

@@ -34,7 +34,6 @@ from synapse.logging.opentracing import (
tags, tags,
whitelisted_homeserver, whitelisted_homeserver,
) )
from synapse.metrics import SERVER_NAME_LABEL
from synapse.types import JsonDict from synapse.types import JsonDict
from synapse.util import json_decoder from synapse.util import json_decoder
from synapse.util.metrics import measure_func from synapse.util.metrics import measure_func
@@ -48,7 +47,7 @@ issue_8631_logger = logging.getLogger("synapse.8631_debug")
last_pdu_ts_metric = Gauge( last_pdu_ts_metric = Gauge(
"synapse_federation_last_sent_pdu_time", "synapse_federation_last_sent_pdu_time",
"The timestamp of the last PDU which was successfully sent to the given domain", "The timestamp of the last PDU which was successfully sent to the given domain",
labelnames=("destination_server_name", SERVER_NAME_LABEL), labelnames=("server_name",),
) )
@@ -192,7 +191,6 @@ class TransactionManager:
if pdus and destination in self._federation_metrics_domains: if pdus and destination in self._federation_metrics_domains:
last_pdu = pdus[-1] last_pdu = pdus[-1]
last_pdu_ts_metric.labels( last_pdu_ts_metric.labels(server_name=destination).set(
destination_server_name=destination, last_pdu.origin_server_ts / 1000
**{SERVER_NAME_LABEL: self.server_name}, )
).set(last_pdu.origin_server_ts / 1000)

View File

@@ -187,8 +187,7 @@ class ApplicationServicesHandler:
assert ts is not None assert ts is not None
synapse.metrics.event_processing_lag_by_event.labels( synapse.metrics.event_processing_lag_by_event.labels(
name="appservice_sender", "appservice_sender"
**{SERVER_NAME_LABEL: self.server_name},
).observe((now - ts) / 1000) ).observe((now - ts) / 1000)
async def handle_room_events(events: Iterable[EventBase]) -> None: async def handle_room_events(events: Iterable[EventBase]) -> None:
@@ -208,8 +207,7 @@ class ApplicationServicesHandler:
await self.store.set_appservice_last_pos(upper_bound) await self.store.set_appservice_last_pos(upper_bound)
synapse.metrics.event_processing_positions.labels( synapse.metrics.event_processing_positions.labels(
name="appservice_sender", "appservice_sender"
**{SERVER_NAME_LABEL: self.server_name},
).set(upper_bound) ).set(upper_bound)
events_processed_counter.labels( events_processed_counter.labels(
@@ -232,12 +230,10 @@ class ApplicationServicesHandler:
assert ts is not None assert ts is not None
synapse.metrics.event_processing_lag.labels( synapse.metrics.event_processing_lag.labels(
name="appservice_sender", "appservice_sender"
**{SERVER_NAME_LABEL: self.server_name},
).set(now - ts) ).set(now - ts)
synapse.metrics.event_processing_last_ts.labels( synapse.metrics.event_processing_last_ts.labels(
name="appservice_sender", "appservice_sender"
**{SERVER_NAME_LABEL: self.server_name},
).set(ts) ).set(ts)
finally: finally:
self.is_processing = False self.is_processing = False

View File

@@ -222,7 +222,6 @@ class AuthHandler:
self._password_localdb_enabled = hs.config.auth.password_localdb_enabled self._password_localdb_enabled = hs.config.auth.password_localdb_enabled
self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
self._account_validity_handler = hs.get_account_validity_handler() self._account_validity_handler = hs.get_account_validity_handler()
self._pusher_pool = hs.get_pusherpool()
# Ratelimiter for failed auth during UIA. Uses same ratelimit config # Ratelimiter for failed auth during UIA. Uses same ratelimit config
# as per `rc_login.failed_attempts`. # as per `rc_login.failed_attempts`.
@@ -282,9 +281,7 @@ class AuthHandler:
# response. # response.
self._extra_attributes: Dict[str, SsoLoginExtraAttributes] = {} self._extra_attributes: Dict[str, SsoLoginExtraAttributes] = {}
self._auth_delegation_enabled = ( self.msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled
hs.config.mas.enabled or hs.config.experimental.msc3861.enabled
)
async def validate_user_via_ui_auth( async def validate_user_via_ui_auth(
self, self,
@@ -335,7 +332,7 @@ class AuthHandler:
LimitExceededError if the ratelimiter's failed request count for this LimitExceededError if the ratelimiter's failed request count for this
user is too high to proceed user is too high to proceed
""" """
if self._auth_delegation_enabled: if self.msc3861_oauth_delegation_enabled:
raise SynapseError( raise SynapseError(
HTTPStatus.INTERNAL_SERVER_ERROR, "UIA shouldn't be used with MSC3861" HTTPStatus.INTERNAL_SERVER_ERROR, "UIA shouldn't be used with MSC3861"
) )
@@ -1665,7 +1662,7 @@ class AuthHandler:
) )
if medium == "email": if medium == "email":
await self._pusher_pool.remove_pusher( await self.store.delete_pusher_by_app_id_pushkey_user_id(
app_id="m.email", pushkey=address, user_id=user_id app_id="m.email", pushkey=address, user_id=user_id
) )

View File

@@ -25,9 +25,6 @@ from typing import TYPE_CHECKING, Optional
from synapse.api.constants import Membership from synapse.api.constants import Membership
from synapse.api.errors import SynapseError from synapse.api.errors import SynapseError
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.deactivate_account import (
ReplicationNotifyAccountDeactivatedServlet,
)
from synapse.types import Codes, Requester, UserID, create_requester from synapse.types import Codes, Requester, UserID, create_requester
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -48,7 +45,6 @@ class DeactivateAccountHandler:
self._room_member_handler = hs.get_room_member_handler() self._room_member_handler = hs.get_room_member_handler()
self._identity_handler = hs.get_identity_handler() self._identity_handler = hs.get_identity_handler()
self._profile_handler = hs.get_profile_handler() self._profile_handler = hs.get_profile_handler()
self._pusher_pool = hs.get_pusherpool()
self.user_directory_handler = hs.get_user_directory_handler() self.user_directory_handler = hs.get_user_directory_handler()
self._server_name = hs.hostname self._server_name = hs.hostname
self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
@@ -57,16 +53,10 @@ class DeactivateAccountHandler:
self._user_parter_running = False self._user_parter_running = False
self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
self._notify_account_deactivated_client = None
# Start the user parter loop so it can resume parting users from rooms where # Start the user parter loop so it can resume parting users from rooms where
# it left off (if it has work left to do). # it left off (if it has work left to do).
if hs.config.worker.worker_app is None: if hs.config.worker.run_background_tasks:
hs.get_reactor().callWhenRunning(self._start_user_parting) hs.get_reactor().callWhenRunning(self._start_user_parting)
else:
self._notify_account_deactivated_client = (
ReplicationNotifyAccountDeactivatedServlet.make_client(hs)
)
self._account_validity_enabled = ( self._account_validity_enabled = (
hs.config.account_validity.account_validity_enabled hs.config.account_validity.account_validity_enabled
@@ -156,7 +146,7 @@ class DeactivateAccountHandler:
# Most of the pushers will have been deleted when we logged out the # Most of the pushers will have been deleted when we logged out the
# associated devices above, but we still need to delete pushers not # associated devices above, but we still need to delete pushers not
# associated with devices, e.g. email pushers. # associated with devices, e.g. email pushers.
await self._pusher_pool.delete_all_pushers_for_user(user_id) await self.store.delete_all_pushers_for_user(user_id)
# Add the user to a table of users pending deactivation (ie. # Add the user to a table of users pending deactivation (ie.
# removal from all the rooms they're a member of) # removal from all the rooms they're a member of)
@@ -180,6 +170,10 @@ class DeactivateAccountHandler:
logger.info("Marking %s as erased", user_id) logger.info("Marking %s as erased", user_id)
await self.store.mark_user_erased(user_id) await self.store.mark_user_erased(user_id)
# Now start the process that goes through that list and
# parts users from rooms (if it isn't already running)
self._start_user_parting()
# Reject all pending invites and knocks for the user, so that the # Reject all pending invites and knocks for the user, so that the
# user doesn't show up in the "invited" section of rooms' members list. # user doesn't show up in the "invited" section of rooms' members list.
await self._reject_pending_invites_and_knocks_for_user(user_id) await self._reject_pending_invites_and_knocks_for_user(user_id)
@@ -200,37 +194,15 @@ class DeactivateAccountHandler:
# Delete any server-side backup keys # Delete any server-side backup keys
await self.store.bulk_delete_backup_keys_and_versions_for_user(user_id) await self.store.bulk_delete_backup_keys_and_versions_for_user(user_id)
# Notify modules and start the room parting process.
await self.notify_account_deactivated(user_id, by_admin=by_admin)
return identity_server_supports_unbinding
async def notify_account_deactivated(
self,
user_id: str,
by_admin: bool = False,
) -> None:
"""Notify modules and start the room parting process.
Goes through replication if this is not the main process.
"""
if self._notify_account_deactivated_client is not None:
await self._notify_account_deactivated_client(
user_id=user_id,
by_admin=by_admin,
)
return
# Now start the process that goes through that list and
# parts users from rooms (if it isn't already running)
self._start_user_parting()
# Let modules know the user has been deactivated. # Let modules know the user has been deactivated.
await self._third_party_rules.on_user_deactivation_status_changed( await self._third_party_rules.on_user_deactivation_status_changed(
user_id, user_id,
True, True,
by_admin=by_admin, by_admin,
) )
return identity_server_supports_unbinding
async def _reject_pending_invites_and_knocks_for_user(self, user_id: str) -> None: async def _reject_pending_invites_and_knocks_for_user(self, user_id: str) -> None:
"""Reject pending invites and knocks addressed to a given user ID. """Reject pending invites and knocks addressed to a given user ID.

View File

@@ -22,7 +22,7 @@ from synapse.api.errors import ShadowBanError
from synapse.api.ratelimiting import Ratelimiter from synapse.api.ratelimiting import Ratelimiter
from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME
from synapse.logging.opentracing import set_tag from synapse.logging.opentracing import set_tag
from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions from synapse.metrics import event_processing_positions
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.delayed_events import ( from synapse.replication.http.delayed_events import (
ReplicationAddedDelayedEventRestServlet, ReplicationAddedDelayedEventRestServlet,
@@ -191,9 +191,7 @@ class DelayedEventsHandler:
self._event_pos = max_pos self._event_pos = max_pos
# Expose current event processing position to prometheus # Expose current event processing position to prometheus
event_processing_positions.labels( event_processing_positions.labels("delayed_events").set(max_pos)
name="delayed_events", **{SERVER_NAME_LABEL: self.server_name}
).set(max_pos)
await self._store.update_delayed_events_stream_pos(max_pos) await self._store.update_delayed_events_stream_pos(max_pos)

View File

@@ -23,8 +23,6 @@ from typing import TYPE_CHECKING, List, Mapping, Optional, Union
from synapse import event_auth from synapse import event_auth
from synapse.api.constants import ( from synapse.api.constants import (
CREATOR_POWER_LEVEL,
EventContentFields,
EventTypes, EventTypes,
JoinRules, JoinRules,
Membership, Membership,
@@ -143,8 +141,6 @@ class EventAuthHandler:
Raises: Raises:
SynapseError if no appropriate user is found. SynapseError if no appropriate user is found.
""" """
create_event_id = current_state_ids[(EventTypes.Create, "")]
create_event = await self._store.get_event(create_event_id)
power_level_event_id = current_state_ids.get((EventTypes.PowerLevels, "")) power_level_event_id = current_state_ids.get((EventTypes.PowerLevels, ""))
invite_level = 0 invite_level = 0
users_default_level = 0 users_default_level = 0
@@ -160,28 +156,15 @@ class EventAuthHandler:
# Find the user with the highest power level (only interested in local # Find the user with the highest power level (only interested in local
# users). # users).
user_power_level = 0
chosen_user = None
local_users_in_room = await self._store.get_local_users_in_room(room_id) local_users_in_room = await self._store.get_local_users_in_room(room_id)
if create_event.room_version.msc4289_creator_power_enabled:
creators = set(
create_event.content.get(EventContentFields.ADDITIONAL_CREATORS, [])
)
creators.add(create_event.sender)
local_creators = creators.intersection(set(local_users_in_room))
if len(local_creators) > 0:
chosen_user = local_creators.pop() # random creator
user_power_level = CREATOR_POWER_LEVEL
else:
chosen_user = max( chosen_user = max(
local_users_in_room, local_users_in_room,
key=lambda user: users.get(user, users_default_level), key=lambda user: users.get(user, users_default_level),
default=None, default=None,
) )
# Return the chosen if they can issue invites.
if chosen_user:
user_power_level = users.get(chosen_user, users_default_level)
# Return the chosen if they can issue invites.
user_power_level = users.get(chosen_user, users_default_level)
if chosen_user and user_power_level >= invite_level: if chosen_user and user_power_level >= invite_level:
logger.debug( logger.debug(
"Found a user who can issue invites %s with power level %d >= invite level %d", "Found a user who can issue invites %s with power level %d >= invite level %d",

View File

@@ -71,7 +71,6 @@ from synapse.handlers.pagination import PURGE_PAGINATION_LOCK_NAME
from synapse.http.servlet import assert_params_in_dict from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import nested_logging_context from synapse.logging.context import nested_logging_context
from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace
from synapse.metrics import SERVER_NAME_LABEL
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.module_api import NOT_SPAM from synapse.module_api import NOT_SPAM
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
@@ -91,7 +90,7 @@ logger = logging.getLogger(__name__)
backfill_processing_before_timer = Histogram( backfill_processing_before_timer = Histogram(
"synapse_federation_backfill_processing_before_time_seconds", "synapse_federation_backfill_processing_before_time_seconds",
"sec", "sec",
labelnames=[SERVER_NAME_LABEL], [],
buckets=( buckets=(
0.1, 0.1,
0.5, 0.5,
@@ -534,9 +533,9 @@ class FederationHandler:
# backfill points regardless of `current_depth`. # backfill points regardless of `current_depth`.
if processing_start_time is not None: if processing_start_time is not None:
processing_end_time = self.clock.time_msec() processing_end_time = self.clock.time_msec()
backfill_processing_before_timer.labels( backfill_processing_before_timer.observe(
**{SERVER_NAME_LABEL: self.server_name} (processing_end_time - processing_start_time) / 1000
).observe((processing_end_time - processing_start_time) / 1000) )
success = await try_backfill(likely_domains) success = await try_backfill(likely_domains)
if success: if success:

View File

@@ -113,7 +113,7 @@ soft_failed_event_counter = Counter(
backfill_processing_after_timer = Histogram( backfill_processing_after_timer = Histogram(
"synapse_federation_backfill_processing_after_time_seconds", "synapse_federation_backfill_processing_after_time_seconds",
"sec", "sec",
labelnames=[SERVER_NAME_LABEL], [],
buckets=( buckets=(
0.1, 0.1,
0.25, 0.25,
@@ -692,9 +692,7 @@ class FederationEventHandler:
if not events: if not events:
return return
with backfill_processing_after_timer.labels( with backfill_processing_after_timer.time():
**{SERVER_NAME_LABEL: self.server_name}
).time():
# if there are any events in the wrong room, the remote server is buggy and # if there are any events in the wrong room, the remote server is buggy and
# should not be trusted. # should not be trusted.
for ev in events: for ev in events:
@@ -1728,9 +1726,6 @@ class FederationEventHandler:
event, event,
auth_event_id, auth_event_id,
) )
# Drop the event from the auth_map too, else we may incorrectly persist
# events which depend on this dropped event.
auth_map.pop(event.event_id, None)
return return
auth.append(ae) auth.append(ae)

View File

@@ -22,7 +22,7 @@
import logging import logging
import random import random
from http import HTTPStatus from http import HTTPStatus
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Tuple from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple
from canonicaljson import encode_canonical_json from canonicaljson import encode_canonical_json
@@ -55,11 +55,7 @@ from synapse.api.urls import ConsentURIBuilder
from synapse.event_auth import validate_event_for_room_version from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase, relation_from_event from synapse.events import EventBase, relation_from_event
from synapse.events.builder import EventBuilder from synapse.events.builder import EventBuilder
from synapse.events.snapshot import ( from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
EventContext,
UnpersistedEventContext,
UnpersistedEventContextBase,
)
from synapse.events.utils import SerializeEventConfig, maybe_upsert_event_field from synapse.events.utils import SerializeEventConfig, maybe_upsert_event_field
from synapse.events.validator import EventValidator from synapse.events.validator import EventValidator
from synapse.handlers.directory import DirectoryHandler from synapse.handlers.directory import DirectoryHandler
@@ -67,10 +63,10 @@ from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
from synapse.logging import opentracing from synapse.logging import opentracing
from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
from synapse.replication.http.send_events import ReplicationSendEventsRestServlet from synapse.replication.http.send_events import ReplicationSendEventsRestServlet
from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import ( from synapse.types import (
JsonDict,
PersistedEventPosition, PersistedEventPosition,
Requester, Requester,
RoomAlias, RoomAlias,
@@ -505,6 +501,7 @@ class EventCreationHandler:
self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
self.send_events = ReplicationSendEventsRestServlet.make_client(hs) self.send_events = ReplicationSendEventsRestServlet.make_client(hs)
self.request_ratelimiter = hs.get_request_ratelimiter() self.request_ratelimiter = hs.get_request_ratelimiter()
@@ -647,14 +644,6 @@ class EventCreationHandler:
""" """
await self.auth_blocking.check_auth_blocking(requester=requester) await self.auth_blocking.check_auth_blocking(requester=requester)
# The requester may be a regular user, but puppeted by the server.
request_by_server = (
requester.authenticated_entity == self.hs.config.server.server_name
)
# If the request is initiated by the server, ignore whether the
# requester or target is suspended.
if not request_by_server:
requester_suspended = await self.store.get_user_suspended_status( requester_suspended = await self.store.get_user_suspended_status(
requester.user.to_string() requester.user.to_string()
) )
@@ -688,10 +677,7 @@ class EventCreationHandler:
Codes.USER_ACCOUNT_SUSPENDED, Codes.USER_ACCOUNT_SUSPENDED,
) )
is_create_event = ( if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
event_dict["type"] == EventTypes.Create and event_dict["state_key"] == ""
)
if is_create_event:
room_version_id = event_dict["content"]["room_version"] room_version_id = event_dict["content"]["room_version"]
maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id) maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
if not maybe_room_version_obj: if not maybe_room_version_obj:
@@ -797,7 +783,6 @@ class EventCreationHandler:
""" """
# the only thing the user can do is join the server notices room. # the only thing the user can do is join the server notices room.
if builder.type == EventTypes.Member: if builder.type == EventTypes.Member:
assert builder.room_id is not None
membership = builder.content.get("membership", None) membership = builder.content.get("membership", None)
if membership == Membership.JOIN: if membership == Membership.JOIN:
return await self.store.is_server_notice_room(builder.room_id) return await self.store.is_server_notice_room(builder.room_id)
@@ -1116,9 +1101,6 @@ class EventCreationHandler:
policy_allowed = await self._policy_handler.is_event_allowed(event) policy_allowed = await self._policy_handler.is_event_allowed(event)
if not policy_allowed: if not policy_allowed:
# We shouldn't need to set the metadata because the raise should
# cause the request to be denied, but just in case:
event.internal_metadata.policy_server_spammy = True
logger.warning( logger.warning(
"Event not allowed by policy server, rejecting %s", "Event not allowed by policy server, rejecting %s",
event.event_id, event.event_id,
@@ -1263,40 +1245,13 @@ class EventCreationHandler:
for_verification=False, for_verification=False,
) )
if (
builder.room_version.msc4291_room_ids_as_hashes
and builder.type == EventTypes.Create
and builder.is_state()
):
if builder.room_id is not None:
raise SynapseError(
400,
"Cannot resend m.room.create event",
Codes.INVALID_PARAM,
)
else:
assert builder.room_id is not None
if prev_event_ids is not None: if prev_event_ids is not None:
assert len(prev_event_ids) <= 10, ( assert len(prev_event_ids) <= 10, (
"Attempting to create an event with %i prev_events" "Attempting to create an event with %i prev_events"
% (len(prev_event_ids),) % (len(prev_event_ids),)
) )
else: else:
if builder.room_id: prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
prev_event_ids = await self.store.get_prev_events_for_room(
builder.room_id
)
else:
prev_event_ids = [] # can only happen for the create event in MSC4291 rooms
if builder.type == EventTypes.Create and builder.is_state():
if len(prev_event_ids) != 0:
raise SynapseError(
400,
"Cannot resend m.room.create event",
Codes.INVALID_PARAM,
)
# We now ought to have some `prev_events` (unless it's a create event). # We now ought to have some `prev_events` (unless it's a create event).
# #
@@ -1556,98 +1511,6 @@ class EventCreationHandler:
return result return result
async def create_and_send_new_client_events(
self,
requester: Requester,
room_id: str,
prev_event_id: Optional[str],
event_dicts: Sequence[JsonDict],
ratelimit: bool = True,
ignore_shadow_ban: bool = False,
) -> None:
"""Helper to create and send a batch of new client events.
This supports sending membership events in very limited circumstances
(namely that the event is valid as is and doesn't need federation
requests or anything). Callers should prefer to use `update_membership`,
which correctly handles membership events in all cases. We allow
sending membership events here as its useful when copying e.g. bans
between rooms.
All other events and state events are supported.
Args:
requester: The requester sending the events.
room_id: The room ID to send the events in.
prev_event_id: The event ID to use as the previous event for the first
of the events, must have already been persisted.
event_dicts: A sequence of event dictionaries to create and send.
ratelimit: Whether to rate limit this send.
ignore_shadow_ban: True if shadow-banned users should be allowed to
send these events.
"""
if not event_dicts:
# Nothing to do.
return
if prev_event_id is None:
# Pick the latest forward extremity as the previous event ID.
prev_event_ids = await self.store.get_forward_extremities_for_room(room_id)
prev_event_ids.sort(key=lambda x: x[2]) # Sort by depth.
prev_event_id = prev_event_ids[-1][0]
state_groups = await self._storage_controllers.state.get_state_group_for_events(
[prev_event_id]
)
if prev_event_id not in state_groups:
# This should only happen if we got passed a prev event ID that
# hasn't been persisted yet.
raise Exception("Previous event ID not found ")
current_state_group = state_groups[prev_event_id]
state_map = await self._storage_controllers.state.get_state_ids_for_group(
current_state_group
)
events_and_contexts_to_send = []
state_map = dict(state_map)
depth = None
for event_dict in event_dicts:
event, context = await self.create_event(
requester=requester,
event_dict=event_dict,
prev_event_ids=[prev_event_id],
depth=depth,
# Take a copy to ensure each event gets a unique copy of
# state_map since it is modified below.
state_map=dict(state_map),
for_batch=True,
)
events_and_contexts_to_send.append((event, context))
prev_event_id = event.event_id
depth = event.depth + 1
if event.is_state():
# If this is a state event, we need to update the state map
# so that it can be used for the next event.
state_map[(event.type, event.state_key)] = event.event_id
datastore = self.hs.get_datastores().state
events_and_context = (
await UnpersistedEventContext.batch_persist_unpersisted_contexts(
events_and_contexts_to_send, room_id, current_state_group, datastore
)
)
await self.handle_new_client_event(
requester,
events_and_context,
ignore_shadow_ban=ignore_shadow_ban,
ratelimit=ratelimit,
)
async def _persist_events( async def _persist_events(
self, self,
requester: Requester, requester: Requester,
@@ -2265,7 +2128,6 @@ class EventCreationHandler:
original_event.room_version, third_party_result original_event.room_version, third_party_result
) )
self.validator.validate_builder(builder) self.validator.validate_builder(builder)
assert builder.room_id is not None
except SynapseError as e: except SynapseError as e:
raise Exception( raise Exception(
"Third party rules module created an invalid event: " + e.msg, "Third party rules module created an invalid event: " + e.msg,

View File

@@ -780,10 +780,10 @@ class PresenceHandler(BasePresenceHandler):
) )
LaterGauge( LaterGauge(
name="synapse_handlers_presence_user_to_current_state_size", "synapse_handlers_presence_user_to_current_state_size",
desc="", "",
labelnames=[SERVER_NAME_LABEL], [],
caller=lambda: {(self.server_name,): len(self.user_to_current_state)}, lambda: len(self.user_to_current_state),
) )
# The per-device presence state, maps user to devices to per-device presence state. # The per-device presence state, maps user to devices to per-device presence state.
@@ -883,10 +883,10 @@ class PresenceHandler(BasePresenceHandler):
) )
LaterGauge( LaterGauge(
name="synapse_handlers_presence_wheel_timer_size", "synapse_handlers_presence_wheel_timer_size",
desc="", "",
labelnames=[SERVER_NAME_LABEL], [],
caller=lambda: {(self.server_name,): len(self.wheel_timer)}, lambda: len(self.wheel_timer),
) )
# Used to handle sending of presence to newly joined users/servers # Used to handle sending of presence to newly joined users/servers
@@ -1568,9 +1568,9 @@ class PresenceHandler(BasePresenceHandler):
self._event_pos = max_pos self._event_pos = max_pos
# Expose current event processing position to prometheus # Expose current event processing position to prometheus
synapse.metrics.event_processing_positions.labels( synapse.metrics.event_processing_positions.labels("presence").set(
name="presence", **{SERVER_NAME_LABEL: self.server_name} max_pos
).set(max_pos) )
async def _handle_state_delta(self, room_id: str, deltas: List[StateDelta]) -> None: async def _handle_state_delta(self, room_id: str, deltas: List[StateDelta]) -> None:
"""Process current state deltas for the room to find new joins that need """Process current state deltas for the room to find new joins that need

View File

@@ -82,7 +82,6 @@ from synapse.types import (
Requester, Requester,
RoomAlias, RoomAlias,
RoomID, RoomID,
RoomIdWithDomain,
RoomStreamToken, RoomStreamToken,
StateMap, StateMap,
StrCollection, StrCollection,
@@ -94,9 +93,7 @@ from synapse.types import (
from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
from synapse.types.state import StateFilter from synapse.types.state import StateFilter
from synapse.util import stringutils from synapse.util import stringutils
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.response_cache import ResponseCache from synapse.util.caches.response_cache import ResponseCache
from synapse.util.iterutils import batch_iter
from synapse.util.stringutils import parse_and_validate_server_name from synapse.util.stringutils import parse_and_validate_server_name
from synapse.visibility import filter_events_for_client from synapse.visibility import filter_events_for_client
@@ -197,13 +194,7 @@ class RoomCreationHandler:
) )
async def upgrade_room( async def upgrade_room(
self, self, requester: Requester, old_room_id: str, new_version: RoomVersion
requester: Requester,
old_room_id: str,
new_version: RoomVersion,
additional_creators: Optional[List[str]],
auto_member: bool = False,
ratelimit: bool = True,
) -> str: ) -> str:
"""Replace a room with a new room with a different version """Replace a room with a new room with a different version
@@ -211,9 +202,6 @@ class RoomCreationHandler:
requester: the user requesting the upgrade requester: the user requesting the upgrade
old_room_id: the id of the room to be replaced old_room_id: the id of the room to be replaced
new_version: the new room version to use new_version: the new room version to use
additional_creators: additional room creators, for MSC4289.
auto_member: Whether to automatically join local users to the new
room and send out invites to remote users.
Returns: Returns:
the new room id the new room id
@@ -221,7 +209,6 @@ class RoomCreationHandler:
Raises: Raises:
ShadowBanError if the requester is shadow-banned. ShadowBanError if the requester is shadow-banned.
""" """
if ratelimit:
await self.creation_ratelimiter.ratelimit(requester, update=False) await self.creation_ratelimiter.ratelimit(requester, update=False)
# then apply the ratelimits # then apply the ratelimits
@@ -247,28 +234,7 @@ class RoomCreationHandler:
old_room = await self.store.get_room(old_room_id) old_room = await self.store.get_room(old_room_id)
if old_room is None: if old_room is None:
raise NotFoundError("Unknown room id %s" % (old_room_id,)) raise NotFoundError("Unknown room id %s" % (old_room_id,))
old_room_is_public, _ = old_room
creation_event_with_context = None
if new_version.msc4291_room_ids_as_hashes:
old_room_create_event = await self.store.get_create_event_for_room(
old_room_id
)
creation_content = self._calculate_upgraded_room_creation_content(
old_room_create_event,
tombstone_event_id=None,
new_room_version=new_version,
additional_creators=additional_creators,
)
creation_event_with_context = await self._generate_create_event_for_room_id(
requester,
creation_content,
old_room_is_public,
new_version,
)
(create_event, _) = creation_event_with_context
new_room_id = create_event.room_id
else:
new_room_id = self._generate_room_id() new_room_id = self._generate_room_id()
# Try several times, it could fail with PartialStateConflictError # Try several times, it could fail with PartialStateConflictError
@@ -318,9 +284,6 @@ class RoomCreationHandler:
new_version, new_version,
tombstone_event, tombstone_event,
tombstone_context, tombstone_context,
additional_creators,
creation_event_with_context,
auto_member=auto_member,
) )
return ret return ret
@@ -344,11 +307,6 @@ class RoomCreationHandler:
new_version: RoomVersion, new_version: RoomVersion,
tombstone_event: EventBase, tombstone_event: EventBase,
tombstone_context: synapse.events.snapshot.EventContext, tombstone_context: synapse.events.snapshot.EventContext,
additional_creators: Optional[List[str]],
creation_event_with_context: Optional[
Tuple[EventBase, synapse.events.snapshot.EventContext]
] = None,
auto_member: bool = False,
) -> str: ) -> str:
""" """
Args: Args:
@@ -360,10 +318,6 @@ class RoomCreationHandler:
new_version: the version to upgrade the room to new_version: the version to upgrade the room to
tombstone_event: the tombstone event to send to the old room tombstone_event: the tombstone event to send to the old room
tombstone_context: the context for the tombstone event tombstone_context: the context for the tombstone event
additional_creators: additional room creators, for MSC4289.
creation_event_with_context: The new room's create event, for room IDs as create event IDs.
auto_member: Whether to automatically join local users to the new
room and send out invites to remote users.
Raises: Raises:
ShadowBanError if the requester is shadow-banned. ShadowBanError if the requester is shadow-banned.
@@ -373,8 +327,6 @@ class RoomCreationHandler:
logger.info("Creating new room %s to replace %s", new_room_id, old_room_id) logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
# We've already stored the room if we have the create event
if not creation_event_with_context:
# create the new room. may raise a `StoreError` in the exceedingly unlikely # create the new room. may raise a `StoreError` in the exceedingly unlikely
# event of a room ID collision. # event of a room ID collision.
await self.store.store_room( await self.store.store_room(
@@ -390,9 +342,6 @@ class RoomCreationHandler:
new_room_id=new_room_id, new_room_id=new_room_id,
new_room_version=new_version, new_room_version=new_version,
tombstone_event_id=tombstone_event.event_id, tombstone_event_id=tombstone_event.event_id,
additional_creators=additional_creators,
creation_event_with_context=creation_event_with_context,
auto_member=auto_member,
) )
# now send the tombstone # now send the tombstone
@@ -426,7 +375,6 @@ class RoomCreationHandler:
old_room_id, old_room_id,
new_room_id, new_room_id,
old_room_state, old_room_state,
additional_creators,
) )
return new_room_id return new_room_id
@@ -437,7 +385,6 @@ class RoomCreationHandler:
old_room_id: str, old_room_id: str,
new_room_id: str, new_room_id: str,
old_room_state: StateMap[str], old_room_state: StateMap[str],
additional_creators: Optional[List[str]],
) -> None: ) -> None:
"""Send updated power levels in both rooms after an upgrade """Send updated power levels in both rooms after an upgrade
@@ -446,7 +393,7 @@ class RoomCreationHandler:
old_room_id: the id of the room to be replaced old_room_id: the id of the room to be replaced
new_room_id: the id of the replacement room new_room_id: the id of the replacement room
old_room_state: the state map for the old room old_room_state: the state map for the old room
additional_creators: Additional creators in the new room.
Raises: Raises:
ShadowBanError if the requester is shadow-banned. ShadowBanError if the requester is shadow-banned.
""" """
@@ -502,14 +449,6 @@ class RoomCreationHandler:
except AuthError as e: except AuthError as e:
logger.warning("Unable to update PLs in old room: %s", e) logger.warning("Unable to update PLs in old room: %s", e)
new_room_version = await self.store.get_room_version(new_room_id)
if new_room_version.msc4289_creator_power_enabled:
self._remove_creators_from_pl_users_map(
old_room_pl_state.content.get("users", {}),
requester.user.to_string(),
additional_creators,
)
await self.event_creation_handler.create_and_send_nonmember_event( await self.event_creation_handler.create_and_send_nonmember_event(
requester, requester,
{ {
@@ -524,36 +463,6 @@ class RoomCreationHandler:
ratelimit=False, ratelimit=False,
) )
def _calculate_upgraded_room_creation_content(
self,
old_room_create_event: EventBase,
tombstone_event_id: Optional[str],
new_room_version: RoomVersion,
additional_creators: Optional[List[str]],
) -> JsonDict:
creation_content: JsonDict = {
"room_version": new_room_version.identifier,
"predecessor": {
"room_id": old_room_create_event.room_id,
},
}
if tombstone_event_id is not None:
creation_content["predecessor"]["event_id"] = tombstone_event_id
if (
additional_creators is not None
and new_room_version.msc4289_creator_power_enabled
):
creation_content["additional_creators"] = additional_creators
# Check if old room was non-federatable
if not old_room_create_event.content.get(EventContentFields.FEDERATE, True):
# If so, mark the new room as non-federatable as well
creation_content[EventContentFields.FEDERATE] = False
# Copy the room type as per MSC3818.
room_type = old_room_create_event.content.get(EventContentFields.ROOM_TYPE)
if room_type is not None:
creation_content[EventContentFields.ROOM_TYPE] = room_type
return creation_content
async def clone_existing_room( async def clone_existing_room(
self, self,
requester: Requester, requester: Requester,
@@ -561,11 +470,6 @@ class RoomCreationHandler:
new_room_id: str, new_room_id: str,
new_room_version: RoomVersion, new_room_version: RoomVersion,
tombstone_event_id: str, tombstone_event_id: str,
additional_creators: Optional[List[str]],
creation_event_with_context: Optional[
Tuple[EventBase, synapse.events.snapshot.EventContext]
] = None,
auto_member: bool = False,
) -> None: ) -> None:
"""Populate a new room based on an old room """Populate a new room based on an old room
@@ -576,27 +480,24 @@ class RoomCreationHandler:
created with _generate_room_id()) created with _generate_room_id())
new_room_version: the new room version to use new_room_version: the new room version to use
tombstone_event_id: the ID of the tombstone event in the old room. tombstone_event_id: the ID of the tombstone event in the old room.
additional_creators: additional room creators, for MSC4289.
creation_event_with_context: The create event of the new room, if the new room supports
room ID as create event ID hash.
auto_member: Whether to automatically join local users to the new
room and send out invites to remote users.
""" """
user_id = requester.user.to_string() user_id = requester.user.to_string()
creation_content: JsonDict = {
"room_version": new_room_version.identifier,
"predecessor": {"room_id": old_room_id, "event_id": tombstone_event_id},
}
# Check if old room was non-federatable
# Get old room's create event # Get old room's create event
old_room_create_event = await self.store.get_create_event_for_room(old_room_id) old_room_create_event = await self.store.get_create_event_for_room(old_room_id)
if creation_event_with_context: # Check if the create event specified a non-federatable room
create_event, _ = creation_event_with_context if not old_room_create_event.content.get(EventContentFields.FEDERATE, True):
creation_content = create_event.content # If so, mark the new room as non-federatable as well
else: creation_content[EventContentFields.FEDERATE] = False
creation_content = self._calculate_upgraded_room_creation_content(
old_room_create_event,
tombstone_event_id,
new_room_version,
additional_creators=additional_creators,
)
initial_state = {} initial_state = {}
# Replicate relevant room events # Replicate relevant room events
@@ -612,8 +513,11 @@ class RoomCreationHandler:
(EventTypes.PowerLevels, ""), (EventTypes.PowerLevels, ""),
] ]
# Copy the room type as per MSC3818.
room_type = old_room_create_event.content.get(EventContentFields.ROOM_TYPE) room_type = old_room_create_event.content.get(EventContentFields.ROOM_TYPE)
if room_type is not None: if room_type is not None:
creation_content[EventContentFields.ROOM_TYPE] = room_type
# If the old room was a space, copy over the rooms in the space. # If the old room was a space, copy over the rooms in the space.
if room_type == RoomTypes.SPACE: if room_type == RoomTypes.SPACE:
types_to_copy.append((EventTypes.SpaceChild, None)) types_to_copy.append((EventTypes.SpaceChild, None))
@@ -685,14 +589,6 @@ class RoomCreationHandler:
if current_power_level_int < needed_power_level: if current_power_level_int < needed_power_level:
user_power_levels[user_id] = needed_power_level user_power_levels[user_id] = needed_power_level
if new_room_version.msc4289_creator_power_enabled:
# the creator(s) cannot be in the users map
self._remove_creators_from_pl_users_map(
user_power_levels,
user_id,
additional_creators,
)
# We construct what the body of a call to /createRoom would look like for passing # We construct what the body of a call to /createRoom would look like for passing
# to the spam checker. We don't include a preset here, as we expect the # to the spam checker. We don't include a preset here, as we expect the
# initial state to contain everything we need. # initial state to contain everything we need.
@@ -711,7 +607,7 @@ class RoomCreationHandler:
additional_fields=spam_check[1], additional_fields=spam_check[1],
) )
_, last_event_id, _ = await self._send_events_for_new_room( await self._send_events_for_new_room(
requester, requester,
new_room_id, new_room_id,
new_room_version, new_room_version,
@@ -721,228 +617,36 @@ class RoomCreationHandler:
invite_list=[], invite_list=[],
initial_state=initial_state, initial_state=initial_state,
creation_content=creation_content, creation_content=creation_content,
creation_event_with_context=creation_event_with_context,
) )
# Transfer membership events # Transfer membership events
ban_event_ids = await self.store.get_ban_event_ids_in_room(old_room_id) old_room_member_state_ids = (
if ban_event_ids: await self._storage_controllers.state.get_current_state_ids(
ban_events = await self.store.get_events_as_list(ban_event_ids) old_room_id, StateFilter.from_types([(EventTypes.Member, None)])
)
# Add any banned users to the new room.
#
# Note generally we should send membership events via
# `update_membership`, however in this case its fine to bypass as
# these bans don't need any special treatment, i.e. the sender is in
# the room and they don't need any extra signatures, etc.
for batched_ban_events in batch_iter(ban_events, 1000):
await self.event_creation_handler.create_and_send_new_client_events(
requester=requester,
room_id=new_room_id,
prev_event_id=last_event_id,
event_dicts=[
{
"type": EventTypes.Member,
"state_key": ban_event.state_key,
"room_id": new_room_id,
"sender": requester.user.to_string(),
"content": ban_event.content,
}
for ban_event in batched_ban_events
],
ratelimit=False, # We ratelimit the entire upgrade, not individual events.
) )
if auto_member: # map from event_id to BaseEvent
logger.info("Joining local users to %s", new_room_id) old_room_member_state_events = await self.store.get_events(
old_room_member_state_ids.values()
# 1. Copy over all joins for local
joined_profiles = await self.store.get_users_in_room_with_profiles(
old_room_id
) )
for old_event in old_room_member_state_events.values():
local_user_ids = [ # Only transfer ban events
user_id for user_id in joined_profiles if self.hs.is_mine_id(user_id) if (
] "membership" in old_event.content
and old_event.content["membership"] == "ban"
logger.info("Local user IDs %s", local_user_ids)
for batched_local_user_ids in batch_iter(local_user_ids, 1000):
invites_to_send = []
# For each local user we create an invite event (from the
# upgrading user) plus a join event.
for local_user_id in batched_local_user_ids:
if local_user_id == user_id:
# Ignore the upgrading user, as they are already in the
# new room.
continue
invites_to_send.append(
{
"type": EventTypes.Member,
"state_key": local_user_id,
"room_id": new_room_id,
"sender": requester.user.to_string(),
"content": {
"membership": Membership.INVITE,
},
}
)
# If the user has profile information in the previous join,
# add it to the content.
#
# We could instead copy over the contents from the old join
# event, however a) that would require us to fetch all the
# old join events (which is slow), and b) generally the join
# events have no extra information in them. (We also believe
# that most clients don't copy this information over either,
# but we could be wrong.)
content_profile = {}
user_profile = joined_profiles[local_user_id]
if user_profile.display_name:
content_profile["displayname"] = user_profile.display_name
if user_profile.avatar_url:
content_profile["avatar_url"] = user_profile.avatar_url
invites_to_send.append(
{
"type": EventTypes.Member,
"state_key": local_user_id,
"room_id": new_room_id,
"sender": local_user_id,
"content": {
"membership": Membership.JOIN,
**content_profile,
},
}
)
await self.event_creation_handler.create_and_send_new_client_events(
requester=requester,
room_id=new_room_id,
prev_event_id=None,
event_dicts=invites_to_send,
ratelimit=False, # We ratelimit the entire upgrade, not individual events.
)
# Invite other users if the room is not public. If the room *is*
# public then users can simply directly join, and inviting them as
# well may lead to confusion.
join_rule_content = initial_state.get((EventTypes.JoinRules, ""), None)
is_public = False
if join_rule_content:
is_public = join_rule_content["join_rule"] == JoinRules.PUBLIC
if not is_public:
# Copy invites
# TODO: Copy over 3pid invites as well.
invited_users = await self.store.get_invited_users_in_room(
room_id=old_room_id
)
# For local users we can just batch send the invites.
local_invited_users = [
user_id for user_id in invited_users if self.hs.is_mine_id(user_id)
]
logger.info(
"Joining local user IDs %s to new room %s",
local_invited_users,
new_room_id,
)
for batched_local_invited_users in batch_iter(
local_invited_users, 1000
): ):
invites_to_send = []
leaves_to_send = []
# For each local user we create an invite event (from the
# upgrading user), and reject the invite event in the old
# room.
#
# This ensures that the user ends up with a single invite to
# the new room (rather than multiple invites which may be
# noisy and confusing).
for local_user_id in batched_local_invited_users:
leaves_to_send.append(
{
"type": EventTypes.Member,
"state_key": local_user_id,
"room_id": old_room_id,
"sender": local_user_id,
"content": {
"membership": Membership.LEAVE,
},
}
)
invites_to_send.append(
{
"type": EventTypes.Member,
"state_key": local_user_id,
"room_id": new_room_id,
"sender": requester.user.to_string(),
"content": {
"membership": Membership.INVITE,
},
}
)
await self.event_creation_handler.create_and_send_new_client_events(
requester=requester,
room_id=old_room_id,
prev_event_id=None,
event_dicts=leaves_to_send,
ratelimit=False, # We ratelimit the entire upgrade, not individual events.
)
await self.event_creation_handler.create_and_send_new_client_events(
requester=requester,
room_id=new_room_id,
prev_event_id=None,
event_dicts=invites_to_send,
ratelimit=False,
)
# For remote users we send invites one by one, as we need to
# send each one to the remote server.
#
# We also invite joined remote users who were in the old room.
remote_user_ids = [
user_id
for user_id in itertools.chain(invited_users, joined_profiles)
if not self.hs.is_mine_id(user_id)
]
logger.debug("Inviting remote user IDs %s", remote_user_ids)
async def remote_invite(remote_user: str) -> None:
try:
await self.room_member_handler.update_membership( await self.room_member_handler.update_membership(
requester, requester,
UserID.from_string(remote_user), UserID.from_string(old_event.state_key),
new_room_id, new_room_id,
Membership.INVITE, "ban",
ratelimit=False, # We ratelimit the entire upgrade, not individual events. ratelimit=False,
) content=old_event.content,
except SynapseError as e:
# If we fail to invite a remote user, we log it but continue
# on with the upgrade.
logger.warning(
"Failed to invite remote user %s to new room %s: %s",
remote_user,
new_room_id,
e,
) )
# We do this concurrently, as it can take a while to invite # XXX invites/joins
await concurrently_execute( # XXX 3pid invites
remote_invite,
remote_user_ids,
10,
)
async def _move_aliases_to_new_room( async def _move_aliases_to_new_room(
self, self,
@@ -1071,25 +775,6 @@ class RoomCreationHandler:
await self.auth_blocking.check_auth_blocking(requester=requester) await self.auth_blocking.check_auth_blocking(requester=requester)
if ratelimit:
# Limit the rate of room creations,
# using both the limiter specific to room creations as well
# as the general request ratelimiter.
#
# Note that we don't rate limit the individual
# events in the room — room creation isn't atomic and
# historically it was very janky if half the events in the
# initial state don't make it because of rate limiting.
# First check the room creation ratelimiter without updating it
# (this is so we don't consume a token if the other ratelimiter doesn't
# allow us to proceed)
await self.creation_ratelimiter.ratelimit(requester, update=False)
# then apply the ratelimits
await self.common_request_ratelimiter.ratelimit(requester)
await self.creation_ratelimiter.ratelimit(requester)
if ( if (
self._server_notices_mxid is not None self._server_notices_mxid is not None
and user_id == self._server_notices_mxid and user_id == self._server_notices_mxid
@@ -1121,6 +806,37 @@ class RoomCreationHandler:
Codes.MISSING_PARAM, Codes.MISSING_PARAM,
) )
if not is_requester_admin:
spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
user_id, config
)
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
raise SynapseError(
403,
"You are not permitted to create rooms",
errcode=spam_check[0],
additional_fields=spam_check[1],
)
if ratelimit:
# Limit the rate of room creations,
# using both the limiter specific to room creations as well
# as the general request ratelimiter.
#
# Note that we don't rate limit the individual
# events in the room — room creation isn't atomic and
# historically it was very janky if half the events in the
# initial state don't make it because of rate limiting.
# First check the room creation ratelimiter without updating it
# (this is so we don't consume a token if the other ratelimiter doesn't
# allow us to proceed)
await self.creation_ratelimiter.ratelimit(requester, update=False)
# then apply the ratelimits
await self.common_request_ratelimiter.ratelimit(requester)
await self.creation_ratelimiter.ratelimit(requester)
room_version_id = config.get( room_version_id = config.get(
"room_version", self.config.server.default_room_version.identifier "room_version", self.config.server.default_room_version.identifier
) )
@@ -1196,7 +912,6 @@ class RoomCreationHandler:
power_level_content_override = config.get("power_level_content_override") power_level_content_override = config.get("power_level_content_override")
if ( if (
power_level_content_override power_level_content_override
and not room_version.msc4289_creator_power_enabled # this validation doesn't apply in MSC4289 rooms
and "users" in power_level_content_override and "users" in power_level_content_override
and user_id not in power_level_content_override["users"] and user_id not in power_level_content_override["users"]
): ):
@@ -1213,49 +928,6 @@ class RoomCreationHandler:
self._validate_room_config(config, visibility) self._validate_room_config(config, visibility)
# Run the spam checker after other validation
if not is_requester_admin:
spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
user_id, config
)
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
raise SynapseError(
403,
"You are not permitted to create rooms",
errcode=spam_check[0],
additional_fields=spam_check[1],
)
creation_content = config.get("creation_content", {})
# override any attempt to set room versions via the creation_content
creation_content["room_version"] = room_version.identifier
# trusted private chats have the invited users marked as additional creators
if (
room_version.msc4289_creator_power_enabled
and config.get("preset", None) == RoomCreationPreset.TRUSTED_PRIVATE_CHAT
and len(config.get("invite", [])) > 0
):
# the other user(s) are additional creators
invitees = config.get("invite", [])
# we don't want to replace any additional_creators additionally specified, and we want
# to remove duplicates.
creation_content[EventContentFields.ADDITIONAL_CREATORS] = list(
set(creation_content.get(EventContentFields.ADDITIONAL_CREATORS, []))
| set(invitees)
)
creation_event_with_context = None
if room_version.msc4291_room_ids_as_hashes:
creation_event_with_context = await self._generate_create_event_for_room_id(
requester,
creation_content,
is_public,
room_version,
)
(create_event, _) = creation_event_with_context
room_id = create_event.room_id
else:
room_id = await self._generate_and_create_room_id( room_id = await self._generate_and_create_room_id(
creator_id=user_id, creator_id=user_id,
is_public=is_public, is_public=is_public,
@@ -1297,6 +969,11 @@ class RoomCreationHandler:
for val in raw_initial_state: for val in raw_initial_state:
initial_state[(val["type"], val.get("state_key", ""))] = val["content"] initial_state[(val["type"], val.get("state_key", ""))] = val["content"]
creation_content = config.get("creation_content", {})
# override any attempt to set room versions via the creation_content
creation_content["room_version"] = room_version.identifier
( (
last_stream_id, last_stream_id,
last_sent_event_id, last_sent_event_id,
@@ -1313,7 +990,6 @@ class RoomCreationHandler:
power_level_content_override=power_level_content_override, power_level_content_override=power_level_content_override,
creator_join_profile=creator_join_profile, creator_join_profile=creator_join_profile,
ignore_forced_encryption=ignore_forced_encryption, ignore_forced_encryption=ignore_forced_encryption,
creation_event_with_context=creation_event_with_context,
) )
# we avoid dropping the lock between invites, as otherwise joins can # we avoid dropping the lock between invites, as otherwise joins can
@@ -1379,38 +1055,6 @@ class RoomCreationHandler:
return room_id, room_alias, last_stream_id return room_id, room_alias, last_stream_id
async def _generate_create_event_for_room_id(
self,
creator: Requester,
creation_content: JsonDict,
is_public: bool,
room_version: RoomVersion,
) -> Tuple[EventBase, synapse.events.snapshot.EventContext]:
(
creation_event,
new_unpersisted_context,
) = await self.event_creation_handler.create_event(
creator,
{
"content": creation_content,
"sender": creator.user.to_string(),
"type": EventTypes.Create,
"state_key": "",
},
prev_event_ids=[],
depth=1,
state_map={},
for_batch=False,
)
await self.store.store_room(
room_id=creation_event.room_id,
room_creator_user_id=creator.user.to_string(),
is_public=is_public,
room_version=room_version,
)
creation_context = await new_unpersisted_context.persist(creation_event)
return (creation_event, creation_context)
async def _send_events_for_new_room( async def _send_events_for_new_room(
self, self,
creator: Requester, creator: Requester,
@@ -1424,9 +1068,6 @@ class RoomCreationHandler:
power_level_content_override: Optional[JsonDict] = None, power_level_content_override: Optional[JsonDict] = None,
creator_join_profile: Optional[JsonDict] = None, creator_join_profile: Optional[JsonDict] = None,
ignore_forced_encryption: bool = False, ignore_forced_encryption: bool = False,
creation_event_with_context: Optional[
Tuple[EventBase, synapse.events.snapshot.EventContext]
] = None,
) -> Tuple[int, str, int]: ) -> Tuple[int, str, int]:
"""Sends the initial events into a new room. Sends the room creation, membership, """Sends the initial events into a new room. Sends the room creation, membership,
and power level events into the room sequentially, then creates and batches up the and power level events into the room sequentially, then creates and batches up the
@@ -1463,10 +1104,7 @@ class RoomCreationHandler:
user in this room. user in this room.
ignore_forced_encryption: ignore_forced_encryption:
Ignore encryption forced by `encryption_enabled_by_default_for_room_type` setting. Ignore encryption forced by `encryption_enabled_by_default_for_room_type` setting.
creation_event_with_context:
Set in MSC4291 rooms where the create event determines the room ID. If provided,
does not create an additional create event but instead appends the remaining new
events onto the provided create event.
Returns: Returns:
A tuple containing the stream ID, event ID and depth of the last A tuple containing the stream ID, event ID and depth of the last
event sent to the room. event sent to the room.
@@ -1531,26 +1169,13 @@ class RoomCreationHandler:
preset_config, config = self._room_preset_config(room_config) preset_config, config = self._room_preset_config(room_config)
if creation_event_with_context is None:
# MSC2175 removes the creator field from the create event. # MSC2175 removes the creator field from the create event.
if not room_version.implicit_room_creator: if not room_version.implicit_room_creator:
creation_content["creator"] = creator_id creation_content["creator"] = creator_id
creation_event, unpersisted_creation_context = await create_event( creation_event, unpersisted_creation_context = await create_event(
EventTypes.Create, creation_content, False EventTypes.Create, creation_content, False
) )
creation_context = await unpersisted_creation_context.persist( creation_context = await unpersisted_creation_context.persist(creation_event)
creation_event
)
else:
(creation_event, creation_context) = creation_event_with_context
# we had to do the above already in order to have a room ID, so just updates local vars
# and continue.
depth = 2
prev_event = [creation_event.event_id]
state_map[(creation_event.type, creation_event.state_key)] = (
creation_event.event_id
)
logger.debug("Sending %s in new room", EventTypes.Member) logger.debug("Sending %s in new room", EventTypes.Member)
ev = await self.event_creation_handler.handle_new_client_event( ev = await self.event_creation_handler.handle_new_client_event(
requester=creator, requester=creator,
@@ -1599,9 +1224,7 @@ class RoomCreationHandler:
# Please update the docs for `default_power_level_content_override` when # Please update the docs for `default_power_level_content_override` when
# updating the `events` dict below # updating the `events` dict below
power_level_content: JsonDict = { power_level_content: JsonDict = {
"users": {creator_id: 100} "users": {creator_id: 100},
if not room_version.msc4289_creator_power_enabled
else {},
"users_default": 0, "users_default": 0,
"events": { "events": {
EventTypes.Name: 50, EventTypes.Name: 50,
@@ -1609,9 +1232,7 @@ class RoomCreationHandler:
EventTypes.RoomHistoryVisibility: 100, EventTypes.RoomHistoryVisibility: 100,
EventTypes.CanonicalAlias: 50, EventTypes.CanonicalAlias: 50,
EventTypes.RoomAvatar: 50, EventTypes.RoomAvatar: 50,
EventTypes.Tombstone: 150 EventTypes.Tombstone: 100,
if room_version.msc4289_creator_power_enabled
else 100,
EventTypes.ServerACL: 100, EventTypes.ServerACL: 100,
EventTypes.RoomEncryption: 100, EventTypes.RoomEncryption: 100,
}, },
@@ -1624,13 +1245,7 @@ class RoomCreationHandler:
"historical": 100, "historical": 100,
} }
# original_invitees_have_ops is set on preset:trusted_private_chat which will already if config["original_invitees_have_ops"]:
# have set these users as additional_creators, hence don't set the PL for creators as
# that is invalid.
if (
config["original_invitees_have_ops"]
and not room_version.msc4289_creator_power_enabled
):
for invitee in invite_list: for invitee in invite_list:
power_level_content["users"][invitee] = 100 power_level_content["users"][invitee] = 100
@@ -1803,19 +1418,6 @@ class RoomCreationHandler:
) )
return preset_name, preset_config return preset_name, preset_config
def _remove_creators_from_pl_users_map(
self,
users_map: Dict[str, int],
creator: str,
additional_creators: Optional[List[str]],
) -> None:
creators = [creator]
if additional_creators:
creators.extend(additional_creators)
for creator in creators:
# the creator(s) cannot be in the users map
users_map.pop(creator, None)
def _generate_room_id(self) -> str: def _generate_room_id(self) -> str:
"""Generates a random room ID. """Generates a random room ID.
@@ -1833,7 +1435,7 @@ class RoomCreationHandler:
A random room ID of the form "!opaque_id:domain". A random room ID of the form "!opaque_id:domain".
""" """
random_string = stringutils.random_string(18) random_string = stringutils.random_string(18)
return RoomIdWithDomain(random_string, self.hs.hostname).to_string() return RoomID(random_string, self.hs.hostname).to_string()
async def _generate_and_create_room_id( async def _generate_and_create_room_id(
self, self,

View File

@@ -42,14 +42,14 @@ from synapse.api.errors import (
) )
from synapse.api.ratelimiting import Ratelimiter from synapse.api.ratelimiting import Ratelimiter
from synapse.event_auth import get_named_level, get_power_level_event from synapse.event_auth import get_named_level, get_power_level_event
from synapse.events import EventBase, is_creator from synapse.events import EventBase
from synapse.events.snapshot import EventContext from synapse.events.snapshot import EventContext
from synapse.handlers.pagination import PURGE_ROOM_ACTION_NAME from synapse.handlers.pagination import PURGE_ROOM_ACTION_NAME
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
from synapse.logging import opentracing from synapse.logging import opentracing
from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions from synapse.metrics import event_processing_positions
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.push import ReplicationCopyPusherRestServlet from synapse.replication.http.push import ReplicationCopyPusherRestServlet
from synapse.storage.databases.main.state_deltas import StateDelta from synapse.storage.databases.main.state_deltas import StateDelta
@@ -746,12 +746,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
and requester.user.to_string() == self._server_notices_mxid and requester.user.to_string() == self._server_notices_mxid
) )
# The requester may be a regular user, but puppeted by the server.
request_by_server = requester.authenticated_entity == self._server_name
# If the request is initiated by the server, ignore whether the
# requester or target is suspended.
if not request_by_server:
requester_suspended = await self.store.get_user_suspended_status( requester_suspended = await self.store.get_user_suspended_status(
requester.user.to_string() requester.user.to_string()
) )
@@ -1160,8 +1154,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
elif effective_membership_state == Membership.KNOCK: elif effective_membership_state == Membership.KNOCK:
if not is_host_in_room: if not is_host_in_room:
# we used to add the domain of the room ID to remote_room_hosts. # The knock needs to be sent over federation instead
# This is not safe in MSC4291 rooms which do not have a domain. remote_room_hosts.append(get_domain_from_id(room_id))
content["membership"] = Membership.KNOCK content["membership"] = Membership.KNOCK
try: try:
@@ -1920,7 +1915,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
check_complexity check_complexity
and self.hs.config.server.limit_remote_rooms.admins_can_join and self.hs.config.server.limit_remote_rooms.admins_can_join
): ):
check_complexity = not await self.store.is_server_admin(user.to_string()) check_complexity = not await self.store.is_server_admin(user)
if check_complexity: if check_complexity:
# Fetch the room complexity # Fetch the room complexity
@@ -2260,9 +2255,7 @@ class RoomForgetterHandler(StateDeltasHandler):
self.pos = max_pos self.pos = max_pos
# Expose current event processing position to prometheus # Expose current event processing position to prometheus
event_processing_positions.labels( event_processing_positions.labels("room_forgetter").set(max_pos)
name="room_forgetter", **{SERVER_NAME_LABEL: self.server_name}
).set(max_pos)
await self._store.update_room_forgetter_stream_pos(max_pos) await self._store.update_room_forgetter_stream_pos(max_pos)
@@ -2323,7 +2316,6 @@ def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[s
# Check which members are able to invite by ensuring they're joined and have # Check which members are able to invite by ensuring they're joined and have
# the necessary power level. # the necessary power level.
create_event = auth_events[(EventTypes.Create, "")]
for (event_type, state_key), event in auth_events.items(): for (event_type, state_key), event in auth_events.items():
if event_type != EventTypes.Member: if event_type != EventTypes.Member:
continue continue
@@ -2331,12 +2323,8 @@ def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[s
if event.membership != Membership.JOIN: if event.membership != Membership.JOIN:
continue continue
if create_event.room_version.msc4289_creator_power_enabled and is_creator(
create_event, state_key
):
result.append(state_key)
# Check if the user has a custom power level. # Check if the user has a custom power level.
elif users.get(state_key, users_default_level) >= invite_level: if users.get(state_key, users_default_level) >= invite_level:
result.append(state_key) result.append(state_key)
return result return result

View File

@@ -24,13 +24,16 @@ import logging
from email.mime.multipart import MIMEMultipart from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText from email.mime.text import MIMEText
from io import BytesIO from io import BytesIO
from typing import TYPE_CHECKING, Dict, Optional from typing import TYPE_CHECKING, Any, Dict, Optional
from pkg_resources import parse_version
import twisted
from twisted.internet.defer import Deferred from twisted.internet.defer import Deferred
from twisted.internet.endpoints import HostnameEndpoint from twisted.internet.endpoints import HostnameEndpoint
from twisted.internet.interfaces import IProtocolFactory from twisted.internet.interfaces import IOpenSSLContextFactory, IProtocolFactory
from twisted.internet.ssl import optionsForClientTLS from twisted.internet.ssl import optionsForClientTLS
from twisted.mail.smtp import ESMTPSenderFactory from twisted.mail.smtp import ESMTPSender, ESMTPSenderFactory
from twisted.protocols.tls import TLSMemoryBIOFactory from twisted.protocols.tls import TLSMemoryBIOFactory
from synapse.logging.context import make_deferred_yieldable from synapse.logging.context import make_deferred_yieldable
@@ -41,6 +44,49 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
_is_old_twisted = parse_version(twisted.__version__) < parse_version("21")
class _BackportESMTPSender(ESMTPSender):
"""Extend old versions of ESMTPSender to configure TLS.
Unfortunately, before Twisted 21.2, ESMTPSender doesn't give an easy way to
disable TLS, or to configure the hostname used for TLS certificate validation.
This backports the `hostname` parameter for that functionality.
"""
__hostname: Optional[str]
def __init__(self, *args: Any, **kwargs: Any) -> None:
""""""
self.__hostname = kwargs.pop("hostname", None)
super().__init__(*args, **kwargs)
def _getContextFactory(self) -> Optional[IOpenSSLContextFactory]:
if self.context is not None:
return self.context
elif self.__hostname is None:
return None # disable TLS if hostname is None
return optionsForClientTLS(self.__hostname)
class _BackportESMTPSenderFactory(ESMTPSenderFactory):
"""An ESMTPSenderFactory for _BackportESMTPSender.
This backports the `hostname` parameter, to disable or configure TLS.
"""
__hostname: Optional[str]
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.__hostname = kwargs.pop("hostname", None)
super().__init__(*args, **kwargs)
def protocol(self, *args: Any, **kwargs: Any) -> ESMTPSender: # type: ignore
# this overrides ESMTPSenderFactory's `protocol` attribute, with a Callable
# instantiating our _BackportESMTPSender, providing the hostname parameter
return _BackportESMTPSender(*args, **kwargs, hostname=self.__hostname)
async def _sendmail( async def _sendmail(
reactor: ISynapseReactor, reactor: ISynapseReactor,
@@ -83,7 +129,9 @@ async def _sendmail(
elif tlsname is None: elif tlsname is None:
tlsname = smtphost tlsname = smtphost
factory: IProtocolFactory = ESMTPSenderFactory( factory: IProtocolFactory = (
_BackportESMTPSenderFactory if _is_old_twisted else ESMTPSenderFactory
)(
username, username,
password, password,
from_addr, from_addr,

View File

@@ -38,7 +38,6 @@ from synapse.logging.opentracing import (
tag_args, tag_args,
trace, trace,
) )
from synapse.metrics import SERVER_NAME_LABEL
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.databases.main.state_deltas import StateDelta from synapse.storage.databases.main.state_deltas import StateDelta
from synapse.storage.databases.main.stream import PaginateFunction from synapse.storage.databases.main.stream import PaginateFunction
@@ -80,7 +79,7 @@ logger = logging.getLogger(__name__)
sync_processing_time = Histogram( sync_processing_time = Histogram(
"synapse_sliding_sync_processing_time", "synapse_sliding_sync_processing_time",
"Time taken to generate a sliding sync response, ignoring wait times.", "Time taken to generate a sliding sync response, ignoring wait times.",
labelnames=["initial", SERVER_NAME_LABEL], ["initial"],
) )
# Limit the number of state_keys we should remember sending down the connection for each # Limit the number of state_keys we should remember sending down the connection for each
@@ -95,7 +94,6 @@ MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER = 100
class SlidingSyncHandler: class SlidingSyncHandler:
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
self.server_name = hs.hostname
self.clock = hs.get_clock() self.clock = hs.get_clock()
self.store = hs.get_datastores().main self.store = hs.get_datastores().main
self.storage_controllers = hs.get_storage_controllers() self.storage_controllers = hs.get_storage_controllers()
@@ -116,7 +114,7 @@ class SlidingSyncHandler:
sync_config: SlidingSyncConfig, sync_config: SlidingSyncConfig,
from_token: Optional[SlidingSyncStreamToken] = None, from_token: Optional[SlidingSyncStreamToken] = None,
timeout_ms: int = 0, timeout_ms: int = 0,
) -> Tuple[SlidingSyncResult, bool]: ) -> SlidingSyncResult:
""" """
Get the sync for a client if we have new data for it now. Otherwise Get the sync for a client if we have new data for it now. Otherwise
wait for new data to arrive on the server. If the timeout expires, then wait for new data to arrive on the server. If the timeout expires, then
@@ -128,16 +126,9 @@ class SlidingSyncHandler:
from_token: The point in the stream to sync from. Token of the end of the from_token: The point in the stream to sync from. Token of the end of the
previous batch. May be `None` if this is the initial sync request. previous batch. May be `None` if this is the initial sync request.
timeout_ms: The time in milliseconds to wait for new data to arrive. If 0, timeout_ms: The time in milliseconds to wait for new data to arrive. If 0,
we will respond immediately but there might not be any new data so we just we will immediately but there might not be any new data so we just return an
return an empty response. empty response.
Returns:
A tuple containing the `SlidingSyncResult` and whether we waited for new
activity before responding. Knowing whether we waited is useful in traces
to filter out long-running requests where we were just waiting.
""" """
did_wait = False
# If the user is not part of the mau group, then check that limits have # If the user is not part of the mau group, then check that limits have
# not been exceeded (if not part of the group by this point, almost certain # not been exceeded (if not part of the group by this point, almost certain
# auth_blocking will occur) # auth_blocking will occur)
@@ -156,7 +147,7 @@ class SlidingSyncHandler:
logger.warning( logger.warning(
"Timed out waiting for worker to catch up. Returning empty response" "Timed out waiting for worker to catch up. Returning empty response"
) )
return SlidingSyncResult.empty(from_token), did_wait return SlidingSyncResult.empty(from_token)
# If we've spent significant time waiting to catch up, take it off # If we've spent significant time waiting to catch up, take it off
# the timeout. # the timeout.
@@ -192,9 +183,8 @@ class SlidingSyncHandler:
current_sync_callback, current_sync_callback,
from_token=from_token.stream_token, from_token=from_token.stream_token,
) )
did_wait = True
return result, did_wait return result
@trace @trace
async def current_sync_for_user( async def current_sync_for_user(
@@ -378,9 +368,9 @@ class SlidingSyncHandler:
set_tag(SynapseTags.FUNC_ARG_PREFIX + "sync_config.user", user_id) set_tag(SynapseTags.FUNC_ARG_PREFIX + "sync_config.user", user_id)
end_time_s = self.clock.time() end_time_s = self.clock.time()
sync_processing_time.labels( sync_processing_time.labels(from_token is not None).observe(
initial=from_token is not None, **{SERVER_NAME_LABEL: self.server_name} end_time_s - start_time_s
).observe(end_time_s - start_time_s) )
return sliding_sync_result return sliding_sync_result

View File

@@ -32,7 +32,7 @@ from typing import (
) )
from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions from synapse.metrics import event_processing_positions
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.databases.main.state_deltas import StateDelta from synapse.storage.databases.main.state_deltas import StateDelta
from synapse.types import JsonDict from synapse.types import JsonDict
@@ -147,9 +147,7 @@ class StatsHandler:
logger.debug("Handled room stats to %s -> %s", self.pos, max_pos) logger.debug("Handled room stats to %s -> %s", self.pos, max_pos)
event_processing_positions.labels( event_processing_positions.labels("stats").set(max_pos)
name="stats", **{SERVER_NAME_LABEL: self.server_name}
).set(max_pos)
self.pos = max_pos self.pos = max_pos

View File

@@ -1,15 +1,9 @@
import logging import logging
from http import HTTPStatus
from typing import TYPE_CHECKING, Optional from typing import TYPE_CHECKING, Optional
from synapse.api.constants import RelationTypes from synapse.api.errors import AuthError, NotFoundError
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.storage.databases.main.thread_subscriptions import ThreadSubscription
from synapse.events import relation_from_event from synapse.types import UserID
from synapse.storage.databases.main.thread_subscriptions import (
AutomaticSubscriptionConflicted,
ThreadSubscription,
)
from synapse.types import EventOrderings, UserID
if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.server import HomeServer from synapse.server import HomeServer
@@ -61,79 +55,42 @@ class ThreadSubscriptionsHandler:
room_id: str, room_id: str,
thread_root_event_id: str, thread_root_event_id: str,
*, *,
automatic_event_id: Optional[str], automatic: bool,
) -> Optional[int]: ) -> Optional[int]:
"""Sets or updates a user's subscription settings for a specific thread root. """Sets or updates a user's subscription settings for a specific thread root.
Args: Args:
requester_user_id: The ID of the user whose settings are being updated. requester_user_id: The ID of the user whose settings are being updated.
thread_root_event_id: The event ID of the thread root. thread_root_event_id: The event ID of the thread root.
automatic_event_id: if the user was subscribed by an automatic decision by automatic: whether the user was subscribed by an automatic decision by
their client, the event ID that caused this. their client.
Returns: Returns:
The stream ID for this update, if the update isn't no-opped. The stream ID for this update, if the update isn't no-opped.
Raises: Raises:
NotFoundError if the user cannot access the thread root event, or it isn't NotFoundError if the user cannot access the thread root event, or it isn't
known to this homeserver. Ditto for the automatic cause event if supplied. known to this homeserver.
SynapseError(400, M_NOT_IN_THREAD): if client supplied an automatic cause event
but user cannot access the event.
SynapseError(409, M_SKIPPED): if client requested an automatic subscription
but it was skipped because the cause event is logically later than an unsubscription.
""" """
# First check that the user can access the thread root event # First check that the user can access the thread root event
# and that it exists # and that it exists
try: try:
thread_root_event = await self.event_handler.get_event( event = await self.event_handler.get_event(
user_id, room_id, thread_root_event_id user_id, room_id, thread_root_event_id
) )
if thread_root_event is None: if event is None:
raise NotFoundError("No such thread root") raise NotFoundError("No such thread root")
except AuthError: except AuthError:
logger.info("rejecting thread subscriptions change (thread not accessible)") logger.info("rejecting thread subscriptions change (thread not accessible)")
raise NotFoundError("No such thread root") raise NotFoundError("No such thread root")
if automatic_event_id: return await self.store.subscribe_user_to_thread(
autosub_cause_event = await self.event_handler.get_event(
user_id, room_id, automatic_event_id
)
if autosub_cause_event is None:
raise NotFoundError("Automatic subscription event not found")
relation = relation_from_event(autosub_cause_event)
if (
relation is None
or relation.rel_type != RelationTypes.THREAD
or relation.parent_id != thread_root_event_id
):
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Automatic subscription must use an event in the thread",
errcode=Codes.MSC4306_NOT_IN_THREAD,
)
automatic_event_orderings = EventOrderings.from_event(autosub_cause_event)
else:
automatic_event_orderings = None
outcome = await self.store.subscribe_user_to_thread(
user_id.to_string(), user_id.to_string(),
room_id, event.room_id,
thread_root_event_id, thread_root_event_id,
automatic_event_orderings=automatic_event_orderings, automatic=automatic,
) )
if isinstance(outcome, AutomaticSubscriptionConflicted):
raise SynapseError(
HTTPStatus.CONFLICT,
"Automatic subscription obsoleted by an unsubscription request.",
errcode=Codes.MSC4306_CONFLICTING_UNSUBSCRIPTION,
)
return outcome
async def unsubscribe_user_from_thread( async def unsubscribe_user_from_thread(
self, user_id: UserID, room_id: str, thread_root_event_id: str self, user_id: UserID, room_id: str, thread_root_event_id: str
) -> Optional[int]: ) -> Optional[int]:

View File

@@ -35,7 +35,6 @@ from synapse.api.constants import (
) )
from synapse.api.errors import Codes, SynapseError from synapse.api.errors import Codes, SynapseError
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
from synapse.metrics import SERVER_NAME_LABEL
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.databases.main.state_deltas import StateDelta from synapse.storage.databases.main.state_deltas import StateDelta
from synapse.storage.databases.main.user_directory import SearchResult from synapse.storage.databases.main.user_directory import SearchResult
@@ -263,9 +262,9 @@ class UserDirectoryHandler(StateDeltasHandler):
self.pos = max_pos self.pos = max_pos
# Expose current event processing position to prometheus # Expose current event processing position to prometheus
synapse.metrics.event_processing_positions.labels( synapse.metrics.event_processing_positions.labels("user_dir").set(
name="user_dir", **{SERVER_NAME_LABEL: self.server_name} max_pos
).set(max_pos) )
await self.store.update_user_directory_stream_pos(max_pos) await self.store.update_user_directory_stream_pos(max_pos)

View File

@@ -144,31 +144,27 @@ def _get_in_flight_counts() -> Mapping[Tuple[str, ...], int]:
# Cast to a list to prevent it changing while the Prometheus # Cast to a list to prevent it changing while the Prometheus
# thread is collecting metrics # thread is collecting metrics
with _in_flight_requests_lock: with _in_flight_requests_lock:
request_metrics = list(_in_flight_requests) reqs = list(_in_flight_requests)
for request_metric in request_metrics: for rm in reqs:
request_metric.update_metrics() rm.update_metrics()
# Map from (method, name) -> int, the number of in flight requests of that # Map from (method, name) -> int, the number of in flight requests of that
# type. The key type is Tuple[str, str], but we leave the length unspecified # type. The key type is Tuple[str, str], but we leave the length unspecified
# for compatability with LaterGauge's annotations. # for compatability with LaterGauge's annotations.
counts: Dict[Tuple[str, ...], int] = {} counts: Dict[Tuple[str, ...], int] = {}
for request_metric in request_metrics: for rm in reqs:
key = ( key = (rm.method, rm.name)
request_metric.method,
request_metric.name,
request_metric.our_server_name,
)
counts[key] = counts.get(key, 0) + 1 counts[key] = counts.get(key, 0) + 1
return counts return counts
LaterGauge( LaterGauge(
name="synapse_http_server_in_flight_requests_count", "synapse_http_server_in_flight_requests_count",
desc="", "",
labelnames=["method", "servlet", SERVER_NAME_LABEL], ["method", "servlet"],
caller=_get_in_flight_counts, _get_in_flight_counts,
) )
@@ -240,10 +236,9 @@ class RequestMetrics:
response_count.labels(**response_base_labels).inc() response_count.labels(**response_base_labels).inc()
response_timer.labels( response_timer.labels(code=response_code_str, **response_base_labels).observe(
code=response_code_str, time_sec - self.start_ts
**response_base_labels, )
).observe(time_sec - self.start_ts)
resource_usage = context.get_resource_usage() resource_usage = context.get_resource_usage()

View File

@@ -337,7 +337,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
callback_return = await self._async_render(request) callback_return = await self._async_render(request)
except LimitExceededError as e: except LimitExceededError as e:
if e.pause: if e.pause:
await self._clock.sleep(e.pause) self._clock.sleep(e.pause)
raise raise
if callback_return is not None: if callback_return is not None:

View File

@@ -33,7 +33,6 @@ from typing import (
Iterable, Iterable,
Mapping, Mapping,
Optional, Optional,
Sequence,
Set, Set,
Tuple, Tuple,
Type, Type,
@@ -156,13 +155,13 @@ class _RegistryProxy:
RegistryProxy = cast(CollectorRegistry, _RegistryProxy) RegistryProxy = cast(CollectorRegistry, _RegistryProxy)
@attr.s(slots=True, hash=True, auto_attribs=True, kw_only=True) @attr.s(slots=True, hash=True, auto_attribs=True)
class LaterGauge(Collector): class LaterGauge(Collector):
"""A Gauge which periodically calls a user-provided callback to produce metrics.""" """A Gauge which periodically calls a user-provided callback to produce metrics."""
name: str name: str
desc: str desc: str
labelnames: Optional[StrSequence] = attr.ib(hash=False) labels: Optional[StrSequence] = attr.ib(hash=False)
# callback: should either return a value (if there are no labels for this metric), # callback: should either return a value (if there are no labels for this metric),
# or dict mapping from a label tuple to a value # or dict mapping from a label tuple to a value
caller: Callable[ caller: Callable[
@@ -170,9 +169,7 @@ class LaterGauge(Collector):
] ]
def collect(self) -> Iterable[Metric]: def collect(self) -> Iterable[Metric]:
# The decision to add `SERVER_NAME_LABEL` is from the `LaterGauge` usage itself g = GaugeMetricFamily(self.name, self.desc, labels=self.labels)
# (we don't enforce it here, one level up).
g = GaugeMetricFamily(self.name, self.desc, labels=self.labelnames) # type: ignore[missing-server-name-label]
try: try:
calls = self.caller() calls = self.caller()
@@ -306,9 +303,7 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
Note: may be called by a separate thread. Note: may be called by a separate thread.
""" """
# The decision to add `SERVER_NAME_LABEL` is from the `GaugeBucketCollector` in_flight = GaugeMetricFamily(
# usage itself (we don't enforce it here, one level up).
in_flight = GaugeMetricFamily( # type: ignore[missing-server-name-label]
self.name + "_total", self.desc, labels=self.labels self.name + "_total", self.desc, labels=self.labels
) )
@@ -332,9 +327,7 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
yield in_flight yield in_flight
for name in self.sub_metrics: for name in self.sub_metrics:
# The decision to add `SERVER_NAME_LABEL` is from the `InFlightGauge` usage gauge = GaugeMetricFamily(
# itself (we don't enforce it here, one level up).
gauge = GaugeMetricFamily( # type: ignore[missing-server-name-label]
"_".join([self.name, name]), "", labels=self.labels "_".join([self.name, name]), "", labels=self.labels
) )
for key, metrics in metrics_by_key.items(): for key, metrics in metrics_by_key.items():
@@ -350,51 +343,6 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
all_gauges[self.name] = self all_gauges[self.name] = self
class GaugeHistogramMetricFamilyWithLabels(GaugeHistogramMetricFamily):
"""
Custom version of `GaugeHistogramMetricFamily` from `prometheus_client` that allows
specifying labels and label values.
A single gauge histogram and its samples.
For use by custom collectors.
"""
def __init__(
self,
*,
name: str,
documentation: str,
gsum_value: float,
buckets: Optional[Sequence[Tuple[str, float]]] = None,
labelnames: StrSequence = (),
labelvalues: StrSequence = (),
unit: str = "",
):
# Sanity check the number of label values matches the number of label names.
if len(labelvalues) != len(labelnames):
raise ValueError(
"The number of label values must match the number of label names"
)
# Call the super to validate and set the labelnames. We use this stable API
# instead of setting the internal `_labelnames` field directly.
super().__init__(
name=name,
documentation=documentation,
labels=labelnames,
# Since `GaugeHistogramMetricFamily` doesn't support supplying `labels` and
# `buckets` at the same time (artificial limitation), we will just set these
# as `None` and set up the buckets ourselves just below.
buckets=None,
gsum_value=None,
)
# Create a gauge for each bucket.
if buckets is not None:
self.add_metric(labels=labelvalues, buckets=buckets, gsum_value=gsum_value)
class GaugeBucketCollector(Collector): class GaugeBucketCollector(Collector):
"""Like a Histogram, but the buckets are Gauges which are updated atomically. """Like a Histogram, but the buckets are Gauges which are updated atomically.
@@ -407,17 +355,14 @@ class GaugeBucketCollector(Collector):
__slots__ = ( __slots__ = (
"_name", "_name",
"_documentation", "_documentation",
"_labelnames",
"_bucket_bounds", "_bucket_bounds",
"_metric", "_metric",
) )
def __init__( def __init__(
self, self,
*,
name: str, name: str,
documentation: str, documentation: str,
labelnames: Optional[StrSequence],
buckets: Iterable[float], buckets: Iterable[float],
registry: CollectorRegistry = REGISTRY, registry: CollectorRegistry = REGISTRY,
): ):
@@ -431,7 +376,6 @@ class GaugeBucketCollector(Collector):
""" """
self._name = name self._name = name
self._documentation = documentation self._documentation = documentation
self._labelnames = labelnames if labelnames else ()
# the tops of the buckets # the tops of the buckets
self._bucket_bounds = [float(b) for b in buckets] self._bucket_bounds = [float(b) for b in buckets]
@@ -443,7 +387,7 @@ class GaugeBucketCollector(Collector):
# We initially set this to None. We won't report metrics until # We initially set this to None. We won't report metrics until
# this has been initialised after a successful data update # this has been initialised after a successful data update
self._metric: Optional[GaugeHistogramMetricFamilyWithLabels] = None self._metric: Optional[GaugeHistogramMetricFamily] = None
registry.register(self) registry.register(self)
@@ -452,26 +396,15 @@ class GaugeBucketCollector(Collector):
if self._metric is not None: if self._metric is not None:
yield self._metric yield self._metric
def update_data(self, values: Iterable[float], labels: StrSequence = ()) -> None: def update_data(self, values: Iterable[float]) -> None:
"""Update the data to be reported by the metric """Update the data to be reported by the metric
The existing data is cleared, and each measurement in the input is assigned The existing data is cleared, and each measurement in the input is assigned
to the relevant bucket. to the relevant bucket.
"""
self._metric = self._values_to_metric(values)
Args: def _values_to_metric(self, values: Iterable[float]) -> GaugeHistogramMetricFamily:
values
labels
"""
self._metric = self._values_to_metric(values, labels)
def _values_to_metric(
self, values: Iterable[float], labels: StrSequence = ()
) -> GaugeHistogramMetricFamilyWithLabels:
"""
Args:
values
labels
"""
total = 0.0 total = 0.0
bucket_values = [0 for _ in self._bucket_bounds] bucket_values = [0 for _ in self._bucket_bounds]
@@ -489,13 +422,9 @@ class GaugeBucketCollector(Collector):
# that bucket or below. # that bucket or below.
accumulated_values = itertools.accumulate(bucket_values) accumulated_values = itertools.accumulate(bucket_values)
# The decision to add `SERVER_NAME_LABEL` is from the `GaugeBucketCollector` return GaugeHistogramMetricFamily(
# usage itself (we don't enforce it here, one level up). self._name,
return GaugeHistogramMetricFamilyWithLabels( # type: ignore[missing-server-name-label] self._documentation,
name=self._name,
documentation=self._documentation,
labelnames=self._labelnames,
labelvalues=labels,
buckets=list( buckets=list(
zip((str(b) for b in self._bucket_bounds), accumulated_values) zip((str(b) for b in self._bucket_bounds), accumulated_values)
), ),
@@ -527,19 +456,16 @@ class CPUMetrics(Collector):
line = s.read() line = s.read()
raw_stats = line.split(") ", 1)[1].split(" ") raw_stats = line.split(") ", 1)[1].split(" ")
# This is a process-level metric, so it does not have the `SERVER_NAME_LABEL`. user = GaugeMetricFamily("process_cpu_user_seconds_total", "")
user = GaugeMetricFamily("process_cpu_user_seconds_total", "") # type: ignore[missing-server-name-label]
user.add_metric([], float(raw_stats[11]) / self.ticks_per_sec) user.add_metric([], float(raw_stats[11]) / self.ticks_per_sec)
yield user yield user
# This is a process-level metric, so it does not have the `SERVER_NAME_LABEL`. sys = GaugeMetricFamily("process_cpu_system_seconds_total", "")
sys = GaugeMetricFamily("process_cpu_system_seconds_total", "") # type: ignore[missing-server-name-label]
sys.add_metric([], float(raw_stats[12]) / self.ticks_per_sec) sys.add_metric([], float(raw_stats[12]) / self.ticks_per_sec)
yield sys yield sys
# This is a process-level metric, so it does not have the `SERVER_NAME_LABEL`. REGISTRY.register(CPUMetrics())
REGISTRY.register(CPUMetrics()) # type: ignore[missing-server-name-label]
# #
@@ -569,40 +495,28 @@ event_processing_loop_room_count = Counter(
# Used to track where various components have processed in the event stream, # Used to track where various components have processed in the event stream,
# e.g. federation sending, appservice sending, etc. # e.g. federation sending, appservice sending, etc.
event_processing_positions = Gauge( event_processing_positions = Gauge("synapse_event_processing_positions", "", ["name"])
"synapse_event_processing_positions", "", labelnames=["name", SERVER_NAME_LABEL]
)
# Used to track the current max events stream position # Used to track the current max events stream position
event_persisted_position = Gauge( event_persisted_position = Gauge("synapse_event_persisted_position", "")
"synapse_event_persisted_position", "", labelnames=[SERVER_NAME_LABEL]
)
# Used to track the received_ts of the last event processed by various # Used to track the received_ts of the last event processed by various
# components # components
event_processing_last_ts = Gauge( event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name"])
"synapse_event_processing_last_ts", "", labelnames=["name", SERVER_NAME_LABEL]
)
# Used to track the lag processing events. This is the time difference # Used to track the lag processing events. This is the time difference
# between the last processed event's received_ts and the time it was # between the last processed event's received_ts and the time it was
# finished being processed. # finished being processed.
event_processing_lag = Gauge( event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"])
"synapse_event_processing_lag", "", labelnames=["name", SERVER_NAME_LABEL]
)
event_processing_lag_by_event = Histogram( event_processing_lag_by_event = Histogram(
"synapse_event_processing_lag_by_event", "synapse_event_processing_lag_by_event",
"Time between an event being persisted and it being queued up to be sent to the relevant remote servers", "Time between an event being persisted and it being queued up to be sent to the relevant remote servers",
labelnames=["name", SERVER_NAME_LABEL], ["name"],
) )
# Build info of the running server. # Build info of the running server.
# build_info = Gauge(
# This is a process-level metric, so it does not have the `SERVER_NAME_LABEL`. We
# consider this process-level because all Synapse homeservers running in the process
# will use the same Synapse version.
build_info = Gauge( # type: ignore[missing-server-name-label]
"synapse_build_info", "Build information", ["pythonversion", "version", "osversion"] "synapse_build_info", "Build information", ["pythonversion", "version", "osversion"]
) )
build_info.labels( build_info.labels(
@@ -618,57 +532,44 @@ threepid_send_requests = Histogram(
" there is a request with try count of 4, then there would have been one" " there is a request with try count of 4, then there would have been one"
" each for 1, 2 and 3", " each for 1, 2 and 3",
buckets=(1, 2, 3, 4, 5, 10), buckets=(1, 2, 3, 4, 5, 10),
labelnames=("type", "reason", SERVER_NAME_LABEL), labelnames=("type", "reason"),
) )
threadpool_total_threads = Gauge( threadpool_total_threads = Gauge(
"synapse_threadpool_total_threads", "synapse_threadpool_total_threads",
"Total number of threads currently in the threadpool", "Total number of threads currently in the threadpool",
labelnames=["name", SERVER_NAME_LABEL], ["name"],
) )
threadpool_total_working_threads = Gauge( threadpool_total_working_threads = Gauge(
"synapse_threadpool_working_threads", "synapse_threadpool_working_threads",
"Number of threads currently working in the threadpool", "Number of threads currently working in the threadpool",
labelnames=["name", SERVER_NAME_LABEL], ["name"],
) )
threadpool_total_min_threads = Gauge( threadpool_total_min_threads = Gauge(
"synapse_threadpool_min_threads", "synapse_threadpool_min_threads",
"Minimum number of threads configured in the threadpool", "Minimum number of threads configured in the threadpool",
labelnames=["name", SERVER_NAME_LABEL], ["name"],
) )
threadpool_total_max_threads = Gauge( threadpool_total_max_threads = Gauge(
"synapse_threadpool_max_threads", "synapse_threadpool_max_threads",
"Maximum number of threads configured in the threadpool", "Maximum number of threads configured in the threadpool",
labelnames=["name", SERVER_NAME_LABEL], ["name"],
) )
def register_threadpool(*, name: str, server_name: str, threadpool: ThreadPool) -> None: def register_threadpool(name: str, threadpool: ThreadPool) -> None:
""" """Add metrics for the threadpool."""
Add metrics for the threadpool.
Args: threadpool_total_min_threads.labels(name).set(threadpool.min)
name: The name of the threadpool, used to identify it in the metrics. threadpool_total_max_threads.labels(name).set(threadpool.max)
server_name: The homeserver name (used to label metrics) (this should be `hs.hostname`).
threadpool: The threadpool to register metrics for.
"""
threadpool_total_min_threads.labels( threadpool_total_threads.labels(name).set_function(lambda: len(threadpool.threads))
name=name, **{SERVER_NAME_LABEL: server_name} threadpool_total_working_threads.labels(name).set_function(
).set(threadpool.min) lambda: len(threadpool.working)
threadpool_total_max_threads.labels( )
name=name, **{SERVER_NAME_LABEL: server_name}
).set(threadpool.max)
threadpool_total_threads.labels(
name=name, **{SERVER_NAME_LABEL: server_name}
).set_function(lambda: len(threadpool.threads))
threadpool_total_working_threads.labels(
name=name, **{SERVER_NAME_LABEL: server_name}
).set_function(lambda: len(threadpool.working))
class MetricsResource(Resource): class MetricsResource(Resource):

View File

@@ -54,9 +54,8 @@ running_on_pypy = platform.python_implementation() == "PyPy"
# Python GC metrics # Python GC metrics
# #
# These are process-level metrics, so they do not have the `SERVER_NAME_LABEL`. gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"])
gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"]) # type: ignore[missing-server-name-label] gc_time = Histogram(
gc_time = Histogram( # type: ignore[missing-server-name-label]
"python_gc_time", "python_gc_time",
"Time taken to GC (sec)", "Time taken to GC (sec)",
["gen"], ["gen"],
@@ -83,8 +82,7 @@ gc_time = Histogram( # type: ignore[missing-server-name-label]
class GCCounts(Collector): class GCCounts(Collector):
def collect(self) -> Iterable[Metric]: def collect(self) -> Iterable[Metric]:
# This is a process-level metric, so it does not have the `SERVER_NAME_LABEL`. cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"])
cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"]) # type: ignore[missing-server-name-label]
for n, m in enumerate(gc.get_count()): for n, m in enumerate(gc.get_count()):
cm.add_metric([str(n)], m) cm.add_metric([str(n)], m)
@@ -103,8 +101,7 @@ def install_gc_manager() -> None:
if running_on_pypy: if running_on_pypy:
return return
# This is a process-level metric, so it does not have the `SERVER_NAME_LABEL`. REGISTRY.register(GCCounts())
REGISTRY.register(GCCounts()) # type: ignore[missing-server-name-label]
gc.disable() gc.disable()
@@ -179,8 +176,7 @@ class PyPyGCStats(Collector):
# #
# Total time spent in GC: 0.073 # s.total_gc_time # Total time spent in GC: 0.073 # s.total_gc_time
# This is a process-level metric, so it does not have the `SERVER_NAME_LABEL`. pypy_gc_time = CounterMetricFamily(
pypy_gc_time = CounterMetricFamily( # type: ignore[missing-server-name-label]
"pypy_gc_time_seconds_total", "pypy_gc_time_seconds_total",
"Total time spent in PyPy GC", "Total time spent in PyPy GC",
labels=[], labels=[],
@@ -188,8 +184,7 @@ class PyPyGCStats(Collector):
pypy_gc_time.add_metric([], s.total_gc_time / 1000) pypy_gc_time.add_metric([], s.total_gc_time / 1000)
yield pypy_gc_time yield pypy_gc_time
# This is a process-level metric, so it does not have the `SERVER_NAME_LABEL`. pypy_mem = GaugeMetricFamily(
pypy_mem = GaugeMetricFamily( # type: ignore[missing-server-name-label]
"pypy_memory_bytes", "pypy_memory_bytes",
"Memory tracked by PyPy allocator", "Memory tracked by PyPy allocator",
labels=["state", "class", "kind"], labels=["state", "class", "kind"],
@@ -213,5 +208,4 @@ class PyPyGCStats(Collector):
if running_on_pypy: if running_on_pypy:
# This is a process-level metric, so it does not have the `SERVER_NAME_LABEL`. REGISTRY.register(PyPyGCStats())
REGISTRY.register(PyPyGCStats()) # type: ignore[missing-server-name-label]

View File

@@ -62,8 +62,7 @@ logger = logging.getLogger(__name__)
# Twisted reactor metrics # Twisted reactor metrics
# #
# This is a process-level metric, so it does not have the `SERVER_NAME_LABEL`. tick_time = Histogram(
tick_time = Histogram( # type: ignore[missing-server-name-label]
"python_twisted_reactor_tick_time", "python_twisted_reactor_tick_time",
"Tick time of the Twisted reactor (sec)", "Tick time of the Twisted reactor (sec)",
buckets=[0.001, 0.002, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 5], buckets=[0.001, 0.002, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 5],
@@ -115,8 +114,7 @@ class ReactorLastSeenMetric(Collector):
self._call_wrapper = call_wrapper self._call_wrapper = call_wrapper
def collect(self) -> Iterable[Metric]: def collect(self) -> Iterable[Metric]:
# This is a process-level metric, so it does not have the `SERVER_NAME_LABEL`. cm = GaugeMetricFamily(
cm = GaugeMetricFamily( # type: ignore[missing-server-name-label]
"python_twisted_reactor_last_seen", "python_twisted_reactor_last_seen",
"Seconds since the Twisted reactor was last seen", "Seconds since the Twisted reactor was last seen",
) )
@@ -167,5 +165,4 @@ except Exception as e:
if wrapper: if wrapper:
# This is a process-level metric, so it does not have the `SERVER_NAME_LABEL`. REGISTRY.register(ReactorLastSeenMetric(wrapper))
REGISTRY.register(ReactorLastSeenMetric(wrapper)) # type: ignore[missing-server-name-label]

Some files were not shown because too many files have changed in this diff Show More