mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-05 01:10:13 +00:00
Compare commits
102 Commits
v1.136.0rc
...
madlittlem
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6ce2f3e59d | ||
|
|
93044f4c5b | ||
|
|
c7a80b63ec | ||
|
|
1f384b0e21 | ||
|
|
b2997a8f20 | ||
|
|
bff4a11b3f | ||
|
|
09a489e198 | ||
|
|
537e14169e | ||
|
|
3e66e0a1b8 | ||
|
|
0c8759bbb6 | ||
|
|
4303879cfe | ||
|
|
3742b3b3fb | ||
|
|
224cb3f827 | ||
|
|
68068de3a4 | ||
|
|
356cc4a0a1 | ||
|
|
27fc3389f3 | ||
|
|
df2cfb3932 | ||
|
|
c339021ce8 | ||
|
|
499f947c67 | ||
|
|
e76a9af4d7 | ||
|
|
eec1ca6e93 | ||
|
|
56b5759c0f | ||
|
|
767177ca5a | ||
|
|
5b8e6e7911 | ||
|
|
6a6be6fbe2 | ||
|
|
21c7841228 | ||
|
|
5b55e3f15d | ||
|
|
0e2b92bcbc | ||
|
|
481987eb83 | ||
|
|
5fd30c7ea7 | ||
|
|
d527c794fb | ||
|
|
19fe3f001e | ||
|
|
f8a44638eb | ||
|
|
7ec5e60671 | ||
|
|
48184eefa3 | ||
|
|
205d9e4fc4 | ||
|
|
40edb10a98 | ||
|
|
3d7e39b2ea | ||
|
|
c51da9bac0 | ||
|
|
4cee8c7b99 | ||
|
|
4ac656073d | ||
|
|
3212526673 | ||
|
|
c0878ac9e6 | ||
|
|
76c9f09e09 | ||
|
|
5c20a60f0b | ||
|
|
3671bdbc51 | ||
|
|
7e60ca70c8 | ||
|
|
9135d78b88 | ||
|
|
3e10b3392f | ||
|
|
40e4e379da | ||
|
|
87ba085cdf | ||
|
|
7e3e9a6d60 | ||
|
|
874c6b38f7 | ||
|
|
09aa3fc270 | ||
|
|
14e93d8043 | ||
|
|
6d39e3a411 | ||
|
|
f7aa36926e | ||
|
|
283ade8e33 | ||
|
|
1f155c9650 | ||
|
|
6679c719e3 | ||
|
|
073ce74464 | ||
|
|
a93ec56cec | ||
|
|
e8c6cb3d9e | ||
|
|
3bb95d4a9d | ||
|
|
526b875e03 | ||
|
|
d27438bc25 | ||
|
|
8f375ea6c1 | ||
|
|
3db9fa3eeb | ||
|
|
0c0a9fafde | ||
|
|
4054d956f7 | ||
|
|
04932c76f5 | ||
|
|
9244948750 | ||
|
|
fdd63882b1 | ||
|
|
1e45f35eb6 | ||
|
|
9301baa5f8 | ||
|
|
576022912b | ||
|
|
848949a727 | ||
|
|
3f37bd6277 | ||
|
|
a89afc733b | ||
|
|
f0656a3b06 | ||
|
|
2c434e5187 | ||
|
|
9f579b36c8 | ||
|
|
a407357eec | ||
|
|
92b0077b27 | ||
|
|
7e8782f47f | ||
|
|
8fe3c73f95 | ||
|
|
81f815ee33 | ||
|
|
3108fa32d3 | ||
|
|
a1a40523ae | ||
|
|
e65a6fc58a | ||
|
|
bd8f12f9c6 | ||
|
|
0eb7252a23 | ||
|
|
15146c2259 | ||
|
|
340e4de5af | ||
|
|
88a24bdd13 | ||
|
|
7aac7db652 | ||
|
|
a8886d3351 | ||
|
|
da23e8acde | ||
|
|
2f3a075514 | ||
|
|
87d80b0f9a | ||
|
|
731e81c9a3 | ||
|
|
6dd6bb4714 |
@@ -61,7 +61,7 @@ poetry run update_synapse_database --database-config .ci/postgres-config-unporte
|
||||
echo "+++ Comparing ported schema with unported schema"
|
||||
# Ignore the tables that portdb creates. (Should it tidy them up when the porting is completed?)
|
||||
psql synapse -c "DROP TABLE port_from_sqlite3;"
|
||||
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse_unported > unported.sql
|
||||
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse > ported.sql
|
||||
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner --restrict-key=TESTING synapse_unported > unported.sql
|
||||
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner --restrict-key=TESTING synapse > ported.sql
|
||||
# By default, `diff` returns zero if there are no changes and nonzero otherwise
|
||||
diff -u unported.sql ported.sql | tee schema_diff
|
||||
diff -u unported.sql ported.sql | tee schema_diff
|
||||
|
||||
14
.github/workflows/docker.yml
vendored
14
.github/workflows/docker.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Extract version from pyproject.toml
|
||||
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
|
||||
@@ -41,13 +41,13 @@ jobs:
|
||||
echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -95,21 +95,21 @@ jobs:
|
||||
- build
|
||||
steps:
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
path: ${{ runner.temp }}/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
if: ${{ startsWith(matrix.repository, 'docker.io') }}
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
if: ${{ startsWith(matrix.repository, 'ghcr.io') }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
@@ -123,7 +123,7 @@ jobs:
|
||||
uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # v3.9.2
|
||||
|
||||
- name: Calculate docker image tag
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
|
||||
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
|
||||
with:
|
||||
images: ${{ matrix.repository }}
|
||||
flavor: |
|
||||
|
||||
4
.github/workflows/docs-pr.yaml
vendored
4
.github/workflows/docs-pr.yaml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
# Fetch all history so that the schema_versions script works.
|
||||
fetch-depth: 0
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
name: Check links in documentation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0
|
||||
|
||||
2
.github/workflows/docs.yaml
vendored
2
.github/workflows/docs.yaml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
needs:
|
||||
- pre
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
# Fetch all history so that the schema_versions script works.
|
||||
fetch-depth: 0
|
||||
|
||||
4
.github/workflows/fix_lint.yaml
vendored
4
.github/workflows/fix_lint.yaml
vendored
@@ -18,10 +18,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
components: clippy, rustfmt
|
||||
|
||||
16
.github/workflows/latest_deps.yml
vendored
16
.github/workflows/latest_deps.yml
vendored
@@ -42,9 +42,9 @@ jobs:
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -77,10 +77,10 @@ jobs:
|
||||
postgres-version: "14"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -152,10 +152,10 @@ jobs:
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -202,7 +202,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out synapse codebase
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
@@ -234,7 +234,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
2
.github/workflows/poetry_lockfile.yaml
vendored
2
.github/workflows/poetry_lockfile.yaml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
name: "Check locked dependencies have sdists"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
10
.github/workflows/push_complement_image.yml
vendored
10
.github/workflows/push_complement_image.yml
vendored
@@ -33,29 +33,29 @@ jobs:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout specific branch (debug build)
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- name: Checkout clean copy of develop (scheduled build)
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
ref: develop
|
||||
- name: Checkout clean copy of master (on-push)
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
ref: master
|
||||
- name: Login to registry
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Work out labels for complement image
|
||||
id: meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
|
||||
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}/complement-synapse
|
||||
tags: |
|
||||
|
||||
12
.github/workflows/release-artifacts.yml
vendored
12
.github/workflows/release-artifacts.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
name: "Calculate list of debian distros"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
path: src
|
||||
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
install: true
|
||||
|
||||
- name: Set up docker layer caching
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
@@ -132,7 +132,7 @@ jobs:
|
||||
os: "ubuntu-24.04-arm"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
@@ -165,7 +165,7 @@ jobs:
|
||||
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.10"
|
||||
@@ -191,7 +191,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all workflow run artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
- name: Build a tarball for the debs
|
||||
# We need to merge all the debs uploads into one folder, then compress
|
||||
# that.
|
||||
|
||||
4
.github/workflows/schema.yaml
vendored
4
.github/workflows/schema.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
name: Ensure Synapse config schema is valid
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
name: Ensure generated documentation is up-to-date
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
74
.github/workflows/tests.yml
vendored
74
.github/workflows/tests.yml
vendored
@@ -86,9 +86,9 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -106,7 +106,7 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -116,7 +116,7 @@ jobs:
|
||||
check-lockfile:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -129,7 +129,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
@@ -151,10 +151,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -174,7 +174,7 @@ jobs:
|
||||
# Cribbed from
|
||||
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
|
||||
- name: Restore/persist mypy's cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: |
|
||||
.mypy_cache
|
||||
@@ -187,7 +187,7 @@ jobs:
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Check line endings
|
||||
run: scripts-dev/check_line_terminators.sh
|
||||
|
||||
@@ -195,7 +195,7 @@ jobs:
|
||||
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
@@ -213,11 +213,11 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.linting == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -233,10 +233,10 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
components: clippy
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
@@ -252,10 +252,10 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: nightly-2025-04-23
|
||||
components: clippy
|
||||
@@ -270,10 +270,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -306,10 +306,10 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
# We use nightly so that we can use some unstable options that we use in
|
||||
# `.rustfmt.toml`.
|
||||
@@ -326,7 +326,7 @@ jobs:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -376,7 +376,7 @@ jobs:
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -397,7 +397,7 @@ jobs:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
@@ -412,7 +412,7 @@ jobs:
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -453,10 +453,10 @@ jobs:
|
||||
- changes
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -518,7 +518,7 @@ jobs:
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
# Install libs necessary for PyPy to build binary wheels for dependencies
|
||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
@@ -568,12 +568,12 @@ jobs:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -615,7 +615,7 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||
with:
|
||||
@@ -659,7 +659,7 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Add PostgreSQL apt repository
|
||||
# We need a version of pg_dump that can handle the version of
|
||||
# PostgreSQL being tested against. The Ubuntu package repository lags
|
||||
@@ -714,12 +714,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout synapse codebase
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -750,10 +750,10 @@ jobs:
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -770,10 +770,10 @@ jobs:
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
|
||||
2
.github/workflows/triage_labelled.yml
vendored
2
.github/workflows/triage_labelled.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
if: >
|
||||
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
|
||||
steps:
|
||||
- uses: actions/add-to-project@5b1a254a3546aef88e0a7724a77a623fa2e47c36 # main (v1.0.2 + 10 commits)
|
||||
- uses: actions/add-to-project@4515659e2b458b27365e167605ac44f219494b66 # v1.0.2
|
||||
id: add_project
|
||||
with:
|
||||
project-url: "https://github.com/orgs/matrix-org/projects/67"
|
||||
|
||||
16
.github/workflows/twisted_trunk.yml
vendored
16
.github/workflows/twisted_trunk.yml
vendored
@@ -43,10 +43,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -70,11 +70,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -117,10 +117,10 @@ jobs:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # master
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
@@ -175,7 +175,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v4 for synapse
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
@@ -217,7 +217,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
128
CHANGES.md
128
CHANGES.md
@@ -1,3 +1,131 @@
|
||||
# Synapse 1.138.0rc1 (2025-09-02)
|
||||
|
||||
### Features
|
||||
|
||||
- Support for the stable endpoint and scopes of [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) & co. ([\#18549](https://github.com/element-hq/synapse/issues/18549))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Improve database performance of [MSC4293](https://github.com/matrix-org/matrix-spec-proposals/pull/4293) - Redact on Kick/Ban. ([\#18851](https://github.com/element-hq/synapse/issues/18851))
|
||||
- Do not throw an error when fetching a rejected delayed state event on startup. ([\#18858](https://github.com/element-hq/synapse/issues/18858))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Fix worker documentation incorrectly indicating all room Admin API requests were capable of being handled by workers. ([\#18853](https://github.com/element-hq/synapse/issues/18853))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Instrument `_ByteProducer` with tracing to measure potential dead time while writing bytes to the request. ([\#18804](https://github.com/element-hq/synapse/issues/18804))
|
||||
- Switch to OpenTracing's `ContextVarsScopeManager` instead of our own custom `LogContextScopeManager`. ([\#18849](https://github.com/element-hq/synapse/issues/18849))
|
||||
- Trace how much work is being done while "recursively fetching redactions". ([\#18854](https://github.com/element-hq/synapse/issues/18854))
|
||||
- Link [upstream Twisted bug](https://github.com/twisted/twisted/issues/12498) tracking the problem that explains why we have to use a `Producer` to write bytes to the request. ([\#18855](https://github.com/element-hq/synapse/issues/18855))
|
||||
- Introduce `EventPersistencePair` type. ([\#18857](https://github.com/element-hq/synapse/issues/18857))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump actions/add-to-project from c0c5949b017d0d4a39f7ba888255881bdac2a823 to 4515659e2b458b27365e167605ac44f219494b66. ([\#18863](https://github.com/element-hq/synapse/issues/18863))
|
||||
* Bump actions/checkout from 4.3.0 to 5.0.0. ([\#18834](https://github.com/element-hq/synapse/issues/18834))
|
||||
* Bump anyhow from 1.0.98 to 1.0.99. ([\#18841](https://github.com/element-hq/synapse/issues/18841))
|
||||
* Bump docker/login-action from 3.4.0 to 3.5.0. ([\#18835](https://github.com/element-hq/synapse/issues/18835))
|
||||
* Bump dtolnay/rust-toolchain from b3b07ba8b418998c39fb20f53e8b695cdcc8de1b to e97e2d8cc328f1b50210efc529dca0028893a2d9. ([\#18862](https://github.com/element-hq/synapse/issues/18862))
|
||||
* Bump phonenumbers from 9.0.11 to 9.0.12. ([\#18837](https://github.com/element-hq/synapse/issues/18837))
|
||||
* Bump regex from 1.11.1 to 1.11.2. ([\#18864](https://github.com/element-hq/synapse/issues/18864))
|
||||
* Bump reqwest from 0.12.22 to 0.12.23. ([\#18842](https://github.com/element-hq/synapse/issues/18842))
|
||||
* Bump ruff from 0.12.7 to 0.12.10. ([\#18865](https://github.com/element-hq/synapse/issues/18865))
|
||||
* Bump serde_json from 1.0.142 to 1.0.143. ([\#18866](https://github.com/element-hq/synapse/issues/18866))
|
||||
* Bump types-bleach from 6.2.0.20250514 to 6.2.0.20250809. ([\#18838](https://github.com/element-hq/synapse/issues/18838))
|
||||
* Bump types-jsonschema from 4.25.0.20250720 to 4.25.1.20250822. ([\#18867](https://github.com/element-hq/synapse/issues/18867))
|
||||
* Bump types-psycopg2 from 2.9.21.20250718 to 2.9.21.20250809. ([\#18836](https://github.com/element-hq/synapse/issues/18836))
|
||||
|
||||
# Synapse 1.137.0 (2025-08-26)
|
||||
|
||||
No significant changes since 1.137.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.137.0rc1 (2025-08-19)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix a bug which could corrupt auth chains making it impossible to perform state resolution. ([\#18746](https://github.com/element-hq/synapse/issues/18746))
|
||||
- Fix error message in `register_new_matrix_user` utility script for empty `registration_shared_secret`. ([\#18780](https://github.com/element-hq/synapse/issues/18780))
|
||||
- Allow enabling [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108) when the stable Matrix Authentication Service integration is enabled. ([\#18832](https://github.com/element-hq/synapse/issues/18832))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Include IPv6 networks in `denied-peer-ips` of coturn setup. Contributed by @litetex. ([\#18781](https://github.com/element-hq/synapse/issues/18781))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Update tests to ensure all database tables are emptied when purging a room. ([\#18794](https://github.com/element-hq/synapse/issues/18794))
|
||||
- Instrument the `encode_response` part of Sliding Sync requests for more complete traces in Jaeger. ([\#18815](https://github.com/element-hq/synapse/issues/18815))
|
||||
- Tag Sliding Sync traces when we `wait_for_events`. ([\#18816](https://github.com/element-hq/synapse/issues/18816))
|
||||
- Fix `portdb` CI by hardcoding the new `pg_dump` restrict key that was added due to [CVE-2025-8714](https://nvd.nist.gov/vuln/detail/cve-2025-8714). ([\#18824](https://github.com/element-hq/synapse/issues/18824))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump actions/add-to-project from 5b1a254a3546aef88e0a7724a77a623fa2e47c36 to 0c37450c4be3b6a7582b2fb013c9ebfd9c8e9300. ([\#18557](https://github.com/element-hq/synapse/issues/18557))
|
||||
* Bump actions/cache from 4.2.3 to 4.2.4. ([\#18799](https://github.com/element-hq/synapse/issues/18799))
|
||||
* Bump actions/checkout from 4.2.2 to 4.3.0. ([\#18800](https://github.com/element-hq/synapse/issues/18800))
|
||||
* Bump actions/download-artifact from 4.3.0 to 5.0.0. ([\#18801](https://github.com/element-hq/synapse/issues/18801))
|
||||
* Bump docker/metadata-action from 5.7.0 to 5.8.0. ([\#18773](https://github.com/element-hq/synapse/issues/18773))
|
||||
* Bump mypy from 1.16.1 to 1.17.1. ([\#18775](https://github.com/element-hq/synapse/issues/18775))
|
||||
* Bump phonenumbers from 9.0.10 to 9.0.11. ([\#18797](https://github.com/element-hq/synapse/issues/18797))
|
||||
* Bump pygithub from 2.6.1 to 2.7.0. ([\#18779](https://github.com/element-hq/synapse/issues/18779))
|
||||
* Bump serde_json from 1.0.141 to 1.0.142. ([\#18776](https://github.com/element-hq/synapse/issues/18776))
|
||||
* Bump slab from 0.4.10 to 0.4.11. ([\#18809](https://github.com/element-hq/synapse/issues/18809))
|
||||
* Bump tokio from 1.47.0 to 1.47.1. ([\#18774](https://github.com/element-hq/synapse/issues/18774))
|
||||
* Bump types-pyyaml from 6.0.12.20250516 to 6.0.12.20250809. ([\#18798](https://github.com/element-hq/synapse/issues/18798))
|
||||
* Bump types-setuptools from 80.9.0.20250529 to 80.9.0.20250809. ([\#18796](https://github.com/element-hq/synapse/issues/18796))
|
||||
|
||||
# Synapse 1.136.0 (2025-08-12)
|
||||
|
||||
Note: This release includes the security fixes from `1.135.2` and `1.136.0rc2`, detailed below.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug introduced in 1.135.2 and 1.136.0rc2 where the [Make Room Admin API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#make-room-admin-api) would not treat a room v12's creator power level as the highest in room. ([\#18805](https://github.com/element-hq/synapse/issues/18805))
|
||||
|
||||
|
||||
# Synapse 1.135.2 (2025-08-11)
|
||||
|
||||
This is the Synapse portion of the [Matrix coordinated security release](https://matrix.org/blog/2025/07/security-predisclosure/). This release includes support for [room version](https://spec.matrix.org/v1.15/rooms/) 12 which fixes a number of security vulnerabilities, including [CVE-2025-49090](https://www.cve.org/CVERecord?id=CVE-2025-49090).
|
||||
|
||||
The default room version is not changed. Not all clients will support room version 12 immediately, and not all users will be using the latest version of their clients. Large, public rooms are advised to wait a few weeks before upgrading to room version 12 to allow users throughout the Matrix ecosystem to update their clients.
|
||||
|
||||
Note: release 1.135.1 was skipped due to issues discovered during the release process.
|
||||
|
||||
Two patched Synapse releases are now available:
|
||||
|
||||
* `1.135.2`: stable release comprised of `1.135.0` + security patches
|
||||
* Upgrade to this release **if you are currently running 1.135.0 or below**.
|
||||
* `1.136.0rc2`: unstable release candidate comprised of `1.136.0rc1` + security patches.
|
||||
* Upgrade to this release **only if you are on 1.136.0rc1**.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix invalidation of storage cache that was broken in 1.135.0. ([\#18786](https://github.com/element-hq/synapse/issues/18786))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Add a parameter to `upgrade_rooms(..)` to allow auto join local users. ([\#82](https://github.com/element-hq/synapse/issues/82))
|
||||
- Speed up upgrading a room with large numbers of banned users. ([\#18574](https://github.com/element-hq/synapse/issues/18574))
|
||||
|
||||
|
||||
# Synapse 1.136.0rc2 (2025-08-11)
|
||||
|
||||
- Update MSC4293 redaction logic for room v12. ([\#80](https://github.com/element-hq/synapse/issues/80))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Add a parameter to `upgrade_rooms(..)` to allow auto join local users. ([\#83](https://github.com/element-hq/synapse/issues/83))
|
||||
|
||||
|
||||
# Synapse 1.136.0rc1 (2025-08-05)
|
||||
|
||||
Please check [the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/develop/docs/upgrade.md#upgrading-to-v11360) as this release contains changes to MAS support, metrics labels and the module API which may require your attention when upgrading.
|
||||
|
||||
24
Cargo.lock
generated
24
Cargo.lock
generated
@@ -28,9 +28,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.98"
|
||||
version = "1.0.99"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
|
||||
checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -1062,9 +1062,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.11.1"
|
||||
version = "1.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
|
||||
checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -1091,9 +1091,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
version = "0.12.22"
|
||||
version = "0.12.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531"
|
||||
checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"bytes",
|
||||
@@ -1270,9 +1270,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.141"
|
||||
version = "1.0.143"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3"
|
||||
checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
@@ -1322,9 +1322,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
version = "0.4.10"
|
||||
version = "0.4.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d"
|
||||
checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589"
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
@@ -1478,9 +1478,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.47.0"
|
||||
version = "1.47.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35"
|
||||
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"bytes",
|
||||
|
||||
1
changelog.d/18791.misc
Normal file
1
changelog.d/18791.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix `LaterGauge` metrics to collect from all servers.
|
||||
1
changelog.d/18871.misc
Normal file
1
changelog.d/18871.misc
Normal file
@@ -0,0 +1 @@
|
||||
Store the `LoggingContext` in a `ContextVar` instead of a thread-local variable.
|
||||
1
changelog.d/18878.docker
Normal file
1
changelog.d/18878.docker
Normal file
@@ -0,0 +1 @@
|
||||
Suppress "Applying schema" log noise bulk when `SYNAPSE_LOG_TESTING` is set.
|
||||
42
debian/changelog
vendored
42
debian/changelog
vendored
@@ -1,9 +1,51 @@
|
||||
matrix-synapse-py3 (1.138.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.138.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Sep 2025 12:16:14 +0000
|
||||
|
||||
matrix-synapse-py3 (1.137.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.137.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Aug 2025 10:23:41 +0100
|
||||
|
||||
matrix-synapse-py3 (1.137.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.137.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Aug 2025 10:55:22 +0100
|
||||
|
||||
matrix-synapse-py3 (1.136.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.136.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Aug 2025 13:18:03 +0100
|
||||
|
||||
matrix-synapse-py3 (1.136.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.136.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 11 Aug 2025 12:18:52 -0600
|
||||
|
||||
matrix-synapse-py3 (1.136.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.136.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Aug 2025 08:13:30 -0600
|
||||
|
||||
matrix-synapse-py3 (1.135.2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.135.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 11 Aug 2025 11:52:01 -0600
|
||||
|
||||
matrix-synapse-py3 (1.135.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.135.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 11 Aug 2025 11:13:15 -0600
|
||||
|
||||
matrix-synapse-py3 (1.135.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.135.0.
|
||||
|
||||
@@ -77,6 +77,13 @@ loggers:
|
||||
#}
|
||||
synapse.visibility.filtered_event_debug:
|
||||
level: DEBUG
|
||||
|
||||
{#
|
||||
If Synapse is under test, we don't care about seeing the "Applying schema" log
|
||||
lines at the INFO level every time we run the tests (it's 100 lines of bulk)
|
||||
#}
|
||||
synapse.storage.prepare_database:
|
||||
level: WARN
|
||||
{% endif %}
|
||||
|
||||
root:
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
# Log Contexts
|
||||
|
||||
To help track the processing of individual requests, synapse uses a
|
||||
'`log context`' to track which request it is handling at any given
|
||||
moment. This is done via a thread-local variable; a `logging.Filter` is
|
||||
then used to fish the information back out of the thread-local variable
|
||||
`LoggingContext` to track which request it is handling at any given
|
||||
moment. This is done via a `ContextVar` variable; a `logging.Filter` is
|
||||
then used to fish the information back out of the `ContextVar` variable
|
||||
and add it to each log record.
|
||||
|
||||
Logcontexts are also used for CPU and database accounting, so that we
|
||||
Log contexts are also used for CPU and database accounting, so that we
|
||||
can track which requests were responsible for high CPU use or database
|
||||
activity.
|
||||
|
||||
@@ -14,18 +14,11 @@ The `synapse.logging.context` module provides facilities for managing
|
||||
the current log context (as well as providing the `LoggingContextFilter`
|
||||
class).
|
||||
|
||||
Asynchronous functions make the whole thing complicated, so this document describes
|
||||
how it all works, and how to write code which follows the rules.
|
||||
|
||||
In this document, "awaitable" refers to any object which can be `await`ed. In the context of
|
||||
Synapse, that normally means either a coroutine or a Twisted
|
||||
In this document, "awaitable" refers to any object which can be `await`ed. In the
|
||||
context of Synapse, that normally means either a coroutine or a Twisted
|
||||
[`Deferred`](https://twistedmatrix.com/documents/current/api/twisted.internet.defer.Deferred.html).
|
||||
|
||||
## Logcontexts without asynchronous code
|
||||
|
||||
In the absence of any asynchronous voodoo, things are simple enough. As with
|
||||
any code of this nature, the rule is that our function should leave
|
||||
things as it found them:
|
||||
## Basic usage
|
||||
|
||||
```python
|
||||
from synapse.logging import context # omitted from future snippets
|
||||
@@ -45,7 +38,7 @@ def do_request_handling():
|
||||
logger.debug("phew") # this will be logged against request_id
|
||||
```
|
||||
|
||||
LoggingContext implements the context management methods, so the above
|
||||
`LoggingContext` implements the context management methods, so the above
|
||||
can be written much more succinctly as:
|
||||
|
||||
```python
|
||||
@@ -59,197 +52,76 @@ def do_request_handling():
|
||||
logger.debug("phew")
|
||||
```
|
||||
|
||||
## Using logcontexts with awaitables
|
||||
### The `sentinel` context
|
||||
|
||||
Awaitables break the linear flow of code so that there is no longer a single entry point
|
||||
where we should set the logcontext and a single exit point where we should remove it.
|
||||
The default context is `context.SENTINEL_CONTEXT`, which is a sentinel value to
|
||||
represent the root context. This is what is used when there is no other context set.
|
||||
|
||||
Consider the example above, where `do_request_handling` needs to do some
|
||||
blocking operation, and returns an awaitable:
|
||||
No CPU/database usage metrics are recorded against the `sentinel` context.
|
||||
|
||||
Ideally, nothing from the Synapse homeserver would be logged against the `sentinel`
|
||||
context as we want to know where the logs came from. In practice, this is not always the
|
||||
case yet especially outside of request handling.
|
||||
|
||||
Previously, the `sentinel` context played a bigger role when we had to carefully deal
|
||||
with thread-local storage; as we had to make sure to not leak another context to another
|
||||
task after we gave up control to the reactor so we set the
|
||||
|
||||
|
||||
|
||||
### `PreserveLoggingContext`
|
||||
|
||||
In a similar vein of no longer as relevant, `PreserveLoggingContext` is another context
|
||||
manager helper and a little bit of syntactic sugar to set the current log context
|
||||
(without finishing it) and restore the previous context on exit.
|
||||
|
||||
```python
|
||||
async def handle_request(request_id):
|
||||
with context.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
await do_request_handling()
|
||||
import logging
|
||||
from synapse.logging.context import LoggingContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def main() -> None:
|
||||
with context.LoggingContext("main"):
|
||||
task_context = context.LoggingContext("task")
|
||||
|
||||
with task_context:
|
||||
logger.debug("foo")
|
||||
|
||||
# Bad: will throw an error because `task_context` is already finished
|
||||
with task_context:
|
||||
logger.debug("bar")
|
||||
|
||||
logger.debug("finished")
|
||||
```
|
||||
|
||||
In the above flow:
|
||||
|
||||
- The logcontext is set
|
||||
- `do_request_handling` is called, and returns an awaitable
|
||||
- `handle_request` awaits the awaitable
|
||||
- Execution of `handle_request` is suspended
|
||||
|
||||
So we have stopped processing the request (and will probably go on to
|
||||
start processing the next), without clearing the logcontext.
|
||||
|
||||
To circumvent this problem, synapse code assumes that, wherever you have
|
||||
an awaitable, you will want to `await` it. To that end, wherever
|
||||
functions return awaitables, we adopt the following conventions:
|
||||
|
||||
**Rules for functions returning awaitables:**
|
||||
|
||||
> - If the awaitable is already complete, the function returns with the
|
||||
> same logcontext it started with.
|
||||
> - If the awaitable is incomplete, the function clears the logcontext
|
||||
> before returning; when the awaitable completes, it restores the
|
||||
> logcontext before running any callbacks.
|
||||
|
||||
That sounds complicated, but actually it means a lot of code (including
|
||||
the example above) "just works". There are two cases:
|
||||
|
||||
- If `do_request_handling` returns a completed awaitable, then the
|
||||
logcontext will still be in place. In this case, execution will
|
||||
continue immediately after the `await`; the "finished" line will
|
||||
be logged against the right context, and the `with` block restores
|
||||
the original context before we return to the caller.
|
||||
- If the returned awaitable is incomplete, `do_request_handling` clears
|
||||
the logcontext before returning. The logcontext is therefore clear
|
||||
when `handle_request` `await`s the awaitable.
|
||||
|
||||
Once `do_request_handling`'s awaitable completes, it will reinstate
|
||||
the logcontext, before running the second half of `handle_request`,
|
||||
so again the "finished" line will be logged against the right context,
|
||||
and the `with` block restores the original context.
|
||||
|
||||
As an aside, it's worth noting that `handle_request` follows our rules
|
||||
- though that only matters if the caller has its own logcontext which it
|
||||
cares about.
|
||||
|
||||
The following sections describe pitfalls and helpful patterns when
|
||||
implementing these rules.
|
||||
|
||||
Always await your awaitables
|
||||
----------------------------
|
||||
|
||||
Whenever you get an awaitable back from a function, you should `await` on
|
||||
it as soon as possible. Do not pass go; do not do any logging; do not
|
||||
call any other functions.
|
||||
This can be fixed by using `PreserveLoggingContext`:
|
||||
|
||||
```python
|
||||
async def fun():
|
||||
logger.debug("starting")
|
||||
await do_some_stuff() # just like this
|
||||
import logging
|
||||
from synapse.logging.context import LoggingContext
|
||||
|
||||
coro = more_stuff()
|
||||
result = await coro # also fine, of course
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
return result
|
||||
def main() -> None:
|
||||
with context.LoggingContext("main"):
|
||||
task_context = context.LoggingContext("task")
|
||||
|
||||
with PreserveLoggingContext(task_context):
|
||||
logger.debug("foo")
|
||||
with PreserveLoggingContext(task_context):
|
||||
logger.debug("bar")
|
||||
|
||||
logger.debug("finished") # this will be logged against main
|
||||
```
|
||||
|
||||
Provided this pattern is followed all the way back up to the callchain
|
||||
to where the logcontext was set, this will make things work out ok:
|
||||
provided `do_some_stuff` and `more_stuff` follow the rules above, then
|
||||
so will `fun`.
|
||||
Or you could equivalently just manage the log context manually via
|
||||
`set_current_context`.
|
||||
|
||||
It's all too easy to forget to `await`: for instance if we forgot that
|
||||
`do_some_stuff` returned an awaitable, we might plough on regardless. This
|
||||
leads to a mess; it will probably work itself out eventually, but not
|
||||
before a load of stuff has been logged against the wrong context.
|
||||
(Normally, other things will break, more obviously, if you forget to
|
||||
`await`, so this tends not to be a major problem in practice.)
|
||||
|
||||
Of course sometimes you need to do something a bit fancier with your
|
||||
awaitable - not all code follows the linear A-then-B-then-C pattern.
|
||||
Notes on implementing more complex patterns are in later sections.
|
||||
|
||||
## Where you create a new awaitable, make it follow the rules
|
||||
|
||||
Most of the time, an awaitable comes from another synapse function.
|
||||
Sometimes, though, we need to make up a new awaitable, or we get an awaitable
|
||||
back from external code. We need to make it follow our rules.
|
||||
|
||||
The easy way to do it is by using `context.make_deferred_yieldable`. Suppose we want to implement
|
||||
`sleep`, which returns a deferred which will run its callbacks after a
|
||||
given number of seconds. That might look like:
|
||||
|
||||
```python
|
||||
# not a logcontext-rules-compliant function
|
||||
def get_sleep_deferred(seconds):
|
||||
d = defer.Deferred()
|
||||
reactor.callLater(seconds, d.callback, None)
|
||||
return d
|
||||
```
|
||||
|
||||
That doesn't follow the rules, but we can fix it by calling it through
|
||||
`context.make_deferred_yieldable`:
|
||||
|
||||
```python
|
||||
async def sleep(seconds):
|
||||
return await context.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||
```
|
||||
|
||||
## Fire-and-forget
|
||||
|
||||
Sometimes you want to fire off a chain of execution, but not wait for
|
||||
its result. That might look a bit like this:
|
||||
|
||||
```python
|
||||
async def do_request_handling():
|
||||
await foreground_operation()
|
||||
|
||||
# *don't* do this
|
||||
background_operation()
|
||||
|
||||
logger.debug("Request handling complete")
|
||||
|
||||
async def background_operation():
|
||||
await first_background_step()
|
||||
logger.debug("Completed first step")
|
||||
await second_background_step()
|
||||
logger.debug("Completed second step")
|
||||
```
|
||||
|
||||
The above code does a couple of steps in the background after
|
||||
`do_request_handling` has finished. The log lines are still logged
|
||||
against the `request_context` logcontext, which may or may not be
|
||||
desirable. There are two big problems with the above, however. The first
|
||||
problem is that, if `background_operation` returns an incomplete
|
||||
awaitable, it will expect its caller to `await` immediately, so will have
|
||||
cleared the logcontext. In this example, that means that 'Request
|
||||
handling complete' will be logged without any context.
|
||||
|
||||
The second problem, which is potentially even worse, is that when the
|
||||
awaitable returned by `background_operation` completes, it will restore
|
||||
the original logcontext. There is nothing waiting on that awaitable, so
|
||||
the logcontext will leak into the reactor and possibly get attached to
|
||||
some arbitrary future operation.
|
||||
|
||||
There are two potential solutions to this.
|
||||
|
||||
One option is to surround the call to `background_operation` with a
|
||||
`PreserveLoggingContext` call. That will reset the logcontext before
|
||||
starting `background_operation` (so the context restored when the
|
||||
deferred completes will be the empty logcontext), and will restore the
|
||||
current logcontext before continuing the foreground process:
|
||||
|
||||
```python
|
||||
async def do_request_handling():
|
||||
await foreground_operation()
|
||||
|
||||
# start background_operation off in the empty logcontext, to
|
||||
# avoid leaking the current context into the reactor.
|
||||
with PreserveLoggingContext():
|
||||
background_operation()
|
||||
|
||||
# this will now be logged against the request context
|
||||
logger.debug("Request handling complete")
|
||||
```
|
||||
|
||||
Obviously that option means that the operations done in
|
||||
`background_operation` would be not be logged against a logcontext
|
||||
(though that might be fixed by setting a different logcontext via a
|
||||
`with LoggingContext(...)` in `background_operation`).
|
||||
|
||||
The second option is to use `context.run_in_background`, which wraps a
|
||||
function so that it doesn't reset the logcontext even when it returns
|
||||
an incomplete awaitable, and adds a callback to the returned awaitable to
|
||||
reset the logcontext. In other words, it turns a function that follows
|
||||
the Synapse rules about logcontexts and awaitables into one which behaves
|
||||
more like an external function --- the opposite operation to that
|
||||
described in the previous section. It can be used like this:
|
||||
To drive an awaitable in the background, you can use `context.run_in_background`:
|
||||
|
||||
```python
|
||||
async def do_request_handling():
|
||||
@@ -261,104 +133,13 @@ async def do_request_handling():
|
||||
logger.debug("Request handling complete")
|
||||
```
|
||||
|
||||
## Passing synapse deferreds into third-party functions
|
||||
|
||||
A typical example of this is where we want to collect together two or
|
||||
more awaitables via `defer.gatherResults`:
|
||||
|
||||
```python
|
||||
a1 = operation1()
|
||||
a2 = operation2()
|
||||
a3 = defer.gatherResults([a1, a2])
|
||||
```
|
||||
|
||||
This is really a variation of the fire-and-forget problem above, in that
|
||||
we are firing off `a1` and `a2` without awaiting on them. The difference
|
||||
is that we now have third-party code attached to their callbacks. Anyway
|
||||
either technique given in the [Fire-and-forget](#fire-and-forget)
|
||||
section will work.
|
||||
|
||||
Of course, the new awaitable returned by `gather` needs to be
|
||||
wrapped in order to make it follow the logcontext rules before we can
|
||||
yield it, as described in [Where you create a new awaitable, make it
|
||||
follow the
|
||||
rules](#where-you-create-a-new-awaitable-make-it-follow-the-rules).
|
||||
|
||||
So, option one: reset the logcontext before starting the operations to
|
||||
be gathered:
|
||||
|
||||
```python
|
||||
async def do_request_handling():
|
||||
with PreserveLoggingContext():
|
||||
a1 = operation1()
|
||||
a2 = operation2()
|
||||
result = await defer.gatherResults([a1, a2])
|
||||
```
|
||||
|
||||
In this case particularly, though, option two, of using
|
||||
`context.run_in_background` almost certainly makes more sense, so that
|
||||
`operation1` and `operation2` are both logged against the original
|
||||
logcontext. This looks like:
|
||||
|
||||
```python
|
||||
async def do_request_handling():
|
||||
a1 = context.run_in_background(operation1)
|
||||
a2 = context.run_in_background(operation2)
|
||||
|
||||
result = await make_deferred_yieldable(defer.gatherResults([a1, a2]))
|
||||
result = await defer.gatherResults([a1, a2])
|
||||
```
|
||||
|
||||
## A note on garbage-collection of awaitable chains
|
||||
|
||||
It turns out that our logcontext rules do not play nicely with awaitable
|
||||
chains which get orphaned and garbage-collected.
|
||||
|
||||
Imagine we have some code that looks like this:
|
||||
|
||||
```python
|
||||
listener_queue = []
|
||||
|
||||
def on_something_interesting():
|
||||
for d in listener_queue:
|
||||
d.callback("foo")
|
||||
|
||||
async def await_something_interesting():
|
||||
new_awaitable = defer.Deferred()
|
||||
listener_queue.append(new_awaitable)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
await new_awaitable
|
||||
```
|
||||
|
||||
Obviously, the idea here is that we have a bunch of things which are
|
||||
waiting for an event. (It's just an example of the problem here, but a
|
||||
relatively common one.)
|
||||
|
||||
Now let's imagine two further things happen. First of all, whatever was
|
||||
waiting for the interesting thing goes away. (Perhaps the request times
|
||||
out, or something *even more* interesting happens.)
|
||||
|
||||
Secondly, let's suppose that we decide that the interesting thing is
|
||||
never going to happen, and we reset the listener queue:
|
||||
|
||||
```python
|
||||
def reset_listener_queue():
|
||||
listener_queue.clear()
|
||||
```
|
||||
|
||||
So, both ends of the awaitable chain have now dropped their references,
|
||||
and the awaitable chain is now orphaned, and will be garbage-collected at
|
||||
some point. Note that `await_something_interesting` is a coroutine,
|
||||
which Python implements as a generator function. When Python
|
||||
garbage-collects generator functions, it gives them a chance to
|
||||
clean up by making the `await` (or `yield`) raise a `GeneratorExit`
|
||||
exception. In our case, that means that the `__exit__` handler of
|
||||
`PreserveLoggingContext` will carefully restore the request context, but
|
||||
there is now nothing waiting for its return, so the request context is
|
||||
never cleared.
|
||||
|
||||
To reiterate, this problem only arises when *both* ends of a awaitable
|
||||
chain are dropped. Dropping the the reference to an awaitable you're
|
||||
supposed to be awaiting is bad practice, so this doesn't
|
||||
actually happen too much. Unfortunately, when it does happen, it will
|
||||
lead to leaked logcontexts which are incredibly hard to track down.
|
||||
`background_process_metrics.run_as_background_process` also exists if you want some
|
||||
automatic tracing and metrics for the background task.
|
||||
|
||||
@@ -88,7 +88,8 @@ This will install and start a systemd service called `coturn`.
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
|
||||
# recommended additional local peers to block, to mitigate external access to internal services.
|
||||
# https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
# https://www.enablesecurity.com/blog/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
# https://www.enablesecurity.com/blog/cve-2020-26262-bypass-of-coturns-access-control-protection/#further-concerns-what-else
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
@@ -101,6 +102,14 @@ This will install and start a systemd service called `coturn`.
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
denied-peer-ip=::1
|
||||
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
|
||||
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
|
||||
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
|
||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||
# this should be one of the turn server's listening IPs
|
||||
|
||||
@@ -4174,7 +4174,7 @@ The default power levels for each preset are:
|
||||
"m.room.history_visibility": 100
|
||||
"m.room.canonical_alias": 50
|
||||
"m.room.avatar": 50
|
||||
"m.room.tombstone": 100
|
||||
"m.room.tombstone": 100 (150 if MSC4289 is used)
|
||||
"m.room.server_acl": 100
|
||||
"m.room.encryption": 100
|
||||
```
|
||||
|
||||
@@ -252,7 +252,7 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
|
||||
^/_matrix/client/(r0|v3|unstable)/capabilities$
|
||||
^/_matrix/client/(r0|v3|unstable)/notifications$
|
||||
^/_synapse/admin/v1/rooms/
|
||||
^/_synapse/admin/v1/rooms/[^/]+$
|
||||
|
||||
# Encryption requests
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/query$
|
||||
|
||||
261
poetry.lock
generated
261
poetry.lock
generated
@@ -441,24 +441,6 @@ files = [
|
||||
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deprecated"
|
||||
version = "1.2.13"
|
||||
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "Deprecated-1.2.13-py2.py3-none-any.whl", hash = "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"},
|
||||
{file = "Deprecated-1.2.13.tar.gz", hash = "sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
wrapt = ">=1.10,<2"
|
||||
|
||||
[package.extras]
|
||||
dev = ["PyTest (<5) ; python_version < \"3.6\"", "PyTest ; python_version >= \"3.6\"", "PyTest-Cov (<2.6) ; python_version < \"3.6\"", "PyTest-Cov ; python_version >= \"3.6\"", "bump2version (<1)", "configparser (<5) ; python_version < \"3\"", "importlib-metadata (<3) ; python_version < \"3\"", "importlib-resources (<4) ; python_version < \"3\"", "sphinx (<2)", "sphinxcontrib-websupport (<2) ; python_version < \"3\"", "tox", "zipp (<2) ; python_version < \"3\""]
|
||||
|
||||
[[package]]
|
||||
name = "docutils"
|
||||
version = "0.19"
|
||||
@@ -1387,44 +1369,50 @@ docs = ["sphinx (>=8,<9)", "sphinx-autobuild"]
|
||||
|
||||
[[package]]
|
||||
name = "mypy"
|
||||
version = "1.16.1"
|
||||
version = "1.17.1"
|
||||
description = "Optional static typing for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "mypy-1.16.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b4f0fed1022a63c6fec38f28b7fc77fca47fd490445c69d0a66266c59dd0b88a"},
|
||||
{file = "mypy-1.16.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:86042bbf9f5a05ea000d3203cf87aa9d0ccf9a01f73f71c58979eb9249f46d72"},
|
||||
{file = "mypy-1.16.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ea7469ee5902c95542bea7ee545f7006508c65c8c54b06dc2c92676ce526f3ea"},
|
||||
{file = "mypy-1.16.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:352025753ef6a83cb9e7f2427319bb7875d1fdda8439d1e23de12ab164179574"},
|
||||
{file = "mypy-1.16.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ff9fa5b16e4c1364eb89a4d16bcda9987f05d39604e1e6c35378a2987c1aac2d"},
|
||||
{file = "mypy-1.16.1-cp310-cp310-win_amd64.whl", hash = "sha256:1256688e284632382f8f3b9e2123df7d279f603c561f099758e66dd6ed4e8bd6"},
|
||||
{file = "mypy-1.16.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:472e4e4c100062488ec643f6162dd0d5208e33e2f34544e1fc931372e806c0cc"},
|
||||
{file = "mypy-1.16.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea16e2a7d2714277e349e24d19a782a663a34ed60864006e8585db08f8ad1782"},
|
||||
{file = "mypy-1.16.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08e850ea22adc4d8a4014651575567b0318ede51e8e9fe7a68f25391af699507"},
|
||||
{file = "mypy-1.16.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22d76a63a42619bfb90122889b903519149879ddbf2ba4251834727944c8baca"},
|
||||
{file = "mypy-1.16.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2c7ce0662b6b9dc8f4ed86eb7a5d505ee3298c04b40ec13b30e572c0e5ae17c4"},
|
||||
{file = "mypy-1.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:211287e98e05352a2e1d4e8759c5490925a7c784ddc84207f4714822f8cf99b6"},
|
||||
{file = "mypy-1.16.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:af4792433f09575d9eeca5c63d7d90ca4aeceda9d8355e136f80f8967639183d"},
|
||||
{file = "mypy-1.16.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:66df38405fd8466ce3517eda1f6640611a0b8e70895e2a9462d1d4323c5eb4b9"},
|
||||
{file = "mypy-1.16.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44e7acddb3c48bd2713994d098729494117803616e116032af192871aed80b79"},
|
||||
{file = "mypy-1.16.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0ab5eca37b50188163fa7c1b73c685ac66c4e9bdee4a85c9adac0e91d8895e15"},
|
||||
{file = "mypy-1.16.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb6229b2c9086247e21a83c309754b9058b438704ad2f6807f0d8227f6ebdd"},
|
||||
{file = "mypy-1.16.1-cp312-cp312-win_amd64.whl", hash = "sha256:1f0435cf920e287ff68af3d10a118a73f212deb2ce087619eb4e648116d1fe9b"},
|
||||
{file = "mypy-1.16.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ddc91eb318c8751c69ddb200a5937f1232ee8efb4e64e9f4bc475a33719de438"},
|
||||
{file = "mypy-1.16.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:87ff2c13d58bdc4bbe7dc0dedfe622c0f04e2cb2a492269f3b418df2de05c536"},
|
||||
{file = "mypy-1.16.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a7cfb0fe29fe5a9841b7c8ee6dffb52382c45acdf68f032145b75620acfbd6f"},
|
||||
{file = "mypy-1.16.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:051e1677689c9d9578b9c7f4d206d763f9bbd95723cd1416fad50db49d52f359"},
|
||||
{file = "mypy-1.16.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d5d2309511cc56c021b4b4e462907c2b12f669b2dbeb68300110ec27723971be"},
|
||||
{file = "mypy-1.16.1-cp313-cp313-win_amd64.whl", hash = "sha256:4f58ac32771341e38a853c5d0ec0dfe27e18e27da9cdb8bbc882d2249c71a3ee"},
|
||||
{file = "mypy-1.16.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7fc688329af6a287567f45cc1cefb9db662defeb14625213a5b7da6e692e2069"},
|
||||
{file = "mypy-1.16.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e198ab3f55924c03ead626ff424cad1732d0d391478dfbf7bb97b34602395da"},
|
||||
{file = "mypy-1.16.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09aa4f91ada245f0a45dbc47e548fd94e0dd5a8433e0114917dc3b526912a30c"},
|
||||
{file = "mypy-1.16.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13c7cd5b1cb2909aa318a90fd1b7e31f17c50b242953e7dd58345b2a814f6383"},
|
||||
{file = "mypy-1.16.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:58e07fb958bc5d752a280da0e890c538f1515b79a65757bbdc54252ba82e0b40"},
|
||||
{file = "mypy-1.16.1-cp39-cp39-win_amd64.whl", hash = "sha256:f895078594d918f93337a505f8add9bd654d1a24962b4c6ed9390e12531eb31b"},
|
||||
{file = "mypy-1.16.1-py3-none-any.whl", hash = "sha256:5fc2ac4027d0ef28d6ba69a0343737a23c4d1b83672bf38d1fe237bdc0643b37"},
|
||||
{file = "mypy-1.16.1.tar.gz", hash = "sha256:6bd00a0a2094841c5e47e7374bb42b83d64c527a502e3334e1173a0c24437bab"},
|
||||
{file = "mypy-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3fbe6d5555bf608c47203baa3e72dbc6ec9965b3d7c318aa9a4ca76f465bd972"},
|
||||
{file = "mypy-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80ef5c058b7bce08c83cac668158cb7edea692e458d21098c7d3bce35a5d43e7"},
|
||||
{file = "mypy-1.17.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4a580f8a70c69e4a75587bd925d298434057fe2a428faaf927ffe6e4b9a98df"},
|
||||
{file = "mypy-1.17.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dd86bb649299f09d987a2eebb4d52d10603224500792e1bee18303bbcc1ce390"},
|
||||
{file = "mypy-1.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a76906f26bd8d51ea9504966a9c25419f2e668f012e0bdf3da4ea1526c534d94"},
|
||||
{file = "mypy-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:e79311f2d904ccb59787477b7bd5d26f3347789c06fcd7656fa500875290264b"},
|
||||
{file = "mypy-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad37544be07c5d7fba814eb370e006df58fed8ad1ef33ed1649cb1889ba6ff58"},
|
||||
{file = "mypy-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:064e2ff508e5464b4bd807a7c1625bc5047c5022b85c70f030680e18f37273a5"},
|
||||
{file = "mypy-1.17.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70401bbabd2fa1aa7c43bb358f54037baf0586f41e83b0ae67dd0534fc64edfd"},
|
||||
{file = "mypy-1.17.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e92bdc656b7757c438660f775f872a669b8ff374edc4d18277d86b63edba6b8b"},
|
||||
{file = "mypy-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c1fdf4abb29ed1cb091cf432979e162c208a5ac676ce35010373ff29247bcad5"},
|
||||
{file = "mypy-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:ff2933428516ab63f961644bc49bc4cbe42bbffb2cd3b71cc7277c07d16b1a8b"},
|
||||
{file = "mypy-1.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:69e83ea6553a3ba79c08c6e15dbd9bfa912ec1e493bf75489ef93beb65209aeb"},
|
||||
{file = "mypy-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b16708a66d38abb1e6b5702f5c2c87e133289da36f6a1d15f6a5221085c6403"},
|
||||
{file = "mypy-1.17.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:89e972c0035e9e05823907ad5398c5a73b9f47a002b22359b177d40bdaee7056"},
|
||||
{file = "mypy-1.17.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:03b6d0ed2b188e35ee6d5c36b5580cffd6da23319991c49ab5556c023ccf1341"},
|
||||
{file = "mypy-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c837b896b37cd103570d776bda106eabb8737aa6dd4f248451aecf53030cdbeb"},
|
||||
{file = "mypy-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:665afab0963a4b39dff7c1fa563cc8b11ecff7910206db4b2e64dd1ba25aed19"},
|
||||
{file = "mypy-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93378d3203a5c0800c6b6d850ad2f19f7a3cdf1a3701d3416dbf128805c6a6a7"},
|
||||
{file = "mypy-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15d54056f7fe7a826d897789f53dd6377ec2ea8ba6f776dc83c2902b899fee81"},
|
||||
{file = "mypy-1.17.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:209a58fed9987eccc20f2ca94afe7257a8f46eb5df1fb69958650973230f91e6"},
|
||||
{file = "mypy-1.17.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:099b9a5da47de9e2cb5165e581f158e854d9e19d2e96b6698c0d64de911dd849"},
|
||||
{file = "mypy-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ffadfbe6994d724c5a1bb6123a7d27dd68fc9c059561cd33b664a79578e14"},
|
||||
{file = "mypy-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:9a2b7d9180aed171f033c9f2fc6c204c1245cf60b0cb61cf2e7acc24eea78e0a"},
|
||||
{file = "mypy-1.17.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:15a83369400454c41ed3a118e0cc58bd8123921a602f385cb6d6ea5df050c733"},
|
||||
{file = "mypy-1.17.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55b918670f692fc9fba55c3298d8a3beae295c5cded0a55dccdc5bbead814acd"},
|
||||
{file = "mypy-1.17.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:62761474061feef6f720149d7ba876122007ddc64adff5ba6f374fda35a018a0"},
|
||||
{file = "mypy-1.17.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c49562d3d908fd49ed0938e5423daed8d407774a479b595b143a3d7f87cdae6a"},
|
||||
{file = "mypy-1.17.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:397fba5d7616a5bc60b45c7ed204717eaddc38f826e3645402c426057ead9a91"},
|
||||
{file = "mypy-1.17.1-cp314-cp314-win_amd64.whl", hash = "sha256:9d6b20b97d373f41617bd0708fd46aa656059af57f2ef72aa8c7d6a2b73b74ed"},
|
||||
{file = "mypy-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5d1092694f166a7e56c805caaf794e0585cabdbf1df36911c414e4e9abb62ae9"},
|
||||
{file = "mypy-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79d44f9bfb004941ebb0abe8eff6504223a9c1ac51ef967d1263c6572bbebc99"},
|
||||
{file = "mypy-1.17.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b01586eed696ec905e61bd2568f48740f7ac4a45b3a468e6423a03d3788a51a8"},
|
||||
{file = "mypy-1.17.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43808d9476c36b927fbcd0b0255ce75efe1b68a080154a38ae68a7e62de8f0f8"},
|
||||
{file = "mypy-1.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:feb8cc32d319edd5859da2cc084493b3e2ce5e49a946377663cc90f6c15fb259"},
|
||||
{file = "mypy-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d7598cf74c3e16539d4e2f0b8d8c318e00041553d83d4861f87c7a72e95ac24d"},
|
||||
{file = "mypy-1.17.1-py3-none-any.whl", hash = "sha256:a9f52c0351c21fe24c21d8c0eb1f62967b262d6729393397b6f443c3b773c3b9"},
|
||||
{file = "mypy-1.17.1.tar.gz", hash = "sha256:25e01ec741ab5bb3eec8ba9cdb0f769230368a22c959c4937360efb89b7e9f01"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1543,14 +1531,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "phonenumbers"
|
||||
version = "9.0.10"
|
||||
version = "9.0.12"
|
||||
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "phonenumbers-9.0.10-py2.py3-none-any.whl", hash = "sha256:13b12d269be1f2b363c9bc2868656a7e2e8b50f1a1cef629c75005da6c374c6b"},
|
||||
{file = "phonenumbers-9.0.10.tar.gz", hash = "sha256:c2d15a6a9d0534b14a7764f51246ada99563e263f65b80b0251d1a760ac4a1ba"},
|
||||
{file = "phonenumbers-9.0.12-py2.py3-none-any.whl", hash = "sha256:900633afc3e12191458d710262df5efc117838bd1e2e613b64fa254a86bb20a1"},
|
||||
{file = "phonenumbers-9.0.12.tar.gz", hash = "sha256:ccadff6b949494bd606836d8c9678bee5b55cb1cbad1e98bf7adae108e6fd0be"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1920,22 +1908,21 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pygithub"
|
||||
version = "2.6.1"
|
||||
version = "2.7.0"
|
||||
description = "Use the full Github API v3"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "PyGithub-2.6.1-py3-none-any.whl", hash = "sha256:6f2fa6d076ccae475f9fc392cc6cdbd54db985d4f69b8833a28397de75ed6ca3"},
|
||||
{file = "pygithub-2.6.1.tar.gz", hash = "sha256:b5c035392991cca63959e9453286b41b54d83bf2de2daa7d7ff7e4312cebf3bf"},
|
||||
{file = "pygithub-2.7.0-py3-none-any.whl", hash = "sha256:40ecbfe26dc55cc34ab4b0ffa1d455e6f816ef9a2bc8d6f5ad18ce572f163700"},
|
||||
{file = "pygithub-2.7.0.tar.gz", hash = "sha256:7cd6eafabb09b5369afba3586d86b1f1ad6f1326d2ff01bc47bb26615dce4cbb"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
Deprecated = "*"
|
||||
pyjwt = {version = ">=2.4.0", extras = ["crypto"]}
|
||||
pynacl = ">=1.4.0"
|
||||
requests = ">=2.14.0"
|
||||
typing-extensions = ">=4.0.0"
|
||||
typing-extensions = ">=4.5.0"
|
||||
urllib3 = ">=1.26.0"
|
||||
|
||||
[[package]]
|
||||
@@ -2409,30 +2396,31 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.12.7"
|
||||
version = "0.12.10"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "ruff-0.12.7-py3-none-linux_armv6l.whl", hash = "sha256:76e4f31529899b8c434c3c1dede98c4483b89590e15fb49f2d46183801565303"},
|
||||
{file = "ruff-0.12.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:789b7a03e72507c54fb3ba6209e4bb36517b90f1a3569ea17084e3fd295500fb"},
|
||||
{file = "ruff-0.12.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2e1c2a3b8626339bb6369116e7030a4cf194ea48f49b64bb505732a7fce4f4e3"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32dec41817623d388e645612ec70d5757a6d9c035f3744a52c7b195a57e03860"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47ef751f722053a5df5fa48d412dbb54d41ab9b17875c6840a58ec63ff0c247c"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a828a5fc25a3efd3e1ff7b241fd392686c9386f20e5ac90aa9234a5faa12c423"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5726f59b171111fa6a69d82aef48f00b56598b03a22f0f4170664ff4d8298efb"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74e6f5c04c4dd4aba223f4fe6e7104f79e0eebf7d307e4f9b18c18362124bccd"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d0bfe4e77fba61bf2ccadf8cf005d6133e3ce08793bbe870dd1c734f2699a3e"},
|
||||
{file = "ruff-0.12.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06bfb01e1623bf7f59ea749a841da56f8f653d641bfd046edee32ede7ff6c606"},
|
||||
{file = "ruff-0.12.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e41df94a957d50083fd09b916d6e89e497246698c3f3d5c681c8b3e7b9bb4ac8"},
|
||||
{file = "ruff-0.12.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:4000623300563c709458d0ce170c3d0d788c23a058912f28bbadc6f905d67afa"},
|
||||
{file = "ruff-0.12.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:69ffe0e5f9b2cf2b8e289a3f8945b402a1b19eff24ec389f45f23c42a3dd6fb5"},
|
||||
{file = "ruff-0.12.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a07a5c8ffa2611a52732bdc67bf88e243abd84fe2d7f6daef3826b59abbfeda4"},
|
||||
{file = "ruff-0.12.7-py3-none-win32.whl", hash = "sha256:c928f1b2ec59fb77dfdf70e0419408898b63998789cc98197e15f560b9e77f77"},
|
||||
{file = "ruff-0.12.7-py3-none-win_amd64.whl", hash = "sha256:9c18f3d707ee9edf89da76131956aba1270c6348bfee8f6c647de841eac7194f"},
|
||||
{file = "ruff-0.12.7-py3-none-win_arm64.whl", hash = "sha256:dfce05101dbd11833a0776716d5d1578641b7fddb537fe7fa956ab85d1769b69"},
|
||||
{file = "ruff-0.12.7.tar.gz", hash = "sha256:1fc3193f238bc2d7968772c82831a4ff69252f673be371fb49663f0068b7ec71"},
|
||||
{file = "ruff-0.12.10-py3-none-linux_armv6l.whl", hash = "sha256:8b593cb0fb55cc8692dac7b06deb29afda78c721c7ccfed22db941201b7b8f7b"},
|
||||
{file = "ruff-0.12.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ebb7333a45d56efc7c110a46a69a1b32365d5c5161e7244aaf3aa20ce62399c1"},
|
||||
{file = "ruff-0.12.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d59e58586829f8e4a9920788f6efba97a13d1fa320b047814e8afede381c6839"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:822d9677b560f1fdeab69b89d1f444bf5459da4aa04e06e766cf0121771ab844"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b4a64f4062a50c75019c61c7017ff598cb444984b638511f48539d3a1c98db"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c6f4064c69d2542029b2a61d39920c85240c39837599d7f2e32e80d36401d6e"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:059e863ea3a9ade41407ad71c1de2badfbe01539117f38f763ba42a1206f7559"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1bef6161e297c68908b7218fa6e0e93e99a286e5ed9653d4be71e687dff101cf"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4f1345fbf8fb0531cd722285b5f15af49b2932742fc96b633e883da8d841896b"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f68433c4fbc63efbfa3ba5db31727db229fa4e61000f452c540474b03de52a9"},
|
||||
{file = "ruff-0.12.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:141ce3d88803c625257b8a6debf4a0473eb6eed9643a6189b68838b43e78165a"},
|
||||
{file = "ruff-0.12.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f3fc21178cd44c98142ae7590f42ddcb587b8e09a3b849cbc84edb62ee95de60"},
|
||||
{file = "ruff-0.12.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7d1a4e0bdfafcd2e3e235ecf50bf0176f74dd37902f241588ae1f6c827a36c56"},
|
||||
{file = "ruff-0.12.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:e67d96827854f50b9e3e8327b031647e7bcc090dbe7bb11101a81a3a2cbf1cc9"},
|
||||
{file = "ruff-0.12.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ae479e1a18b439c59138f066ae79cc0f3ee250712a873d00dbafadaad9481e5b"},
|
||||
{file = "ruff-0.12.10-py3-none-win32.whl", hash = "sha256:9de785e95dc2f09846c5e6e1d3a3d32ecd0b283a979898ad427a9be7be22b266"},
|
||||
{file = "ruff-0.12.10-py3-none-win_amd64.whl", hash = "sha256:7837eca8787f076f67aba2ca559cefd9c5cbc3a9852fd66186f4201b87c1563e"},
|
||||
{file = "ruff-0.12.10-py3-none-win_arm64.whl", hash = "sha256:cc138cc06ed9d4bfa9d667a65af7172b47840e1a98b02ce7011c391e54635ffc"},
|
||||
{file = "ruff-0.12.10.tar.gz", hash = "sha256:189ab65149d11ea69a2d775343adf5f49bb2426fc4780f65ee33b423ad2e47f9"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2890,14 +2878,14 @@ twisted = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-bleach"
|
||||
version = "6.2.0.20250514"
|
||||
version = "6.2.0.20250809"
|
||||
description = "Typing stubs for bleach"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_bleach-6.2.0.20250514-py3-none-any.whl", hash = "sha256:380cb74f0db1e3c3b2e0cde217221108e975e07e95ef0970c9d41f7cd4e8ea3c"},
|
||||
{file = "types_bleach-6.2.0.20250514.tar.gz", hash = "sha256:38c2e51d9cac51dc70c1b66121a11f4dad8bbf47fbad494bb7a77d8b8f3c4323"},
|
||||
{file = "types_bleach-6.2.0.20250809-py3-none-any.whl", hash = "sha256:0b372a75117947d9ac8a31ae733fd0f8d92ec75c4772e7b37093ba3fa5b48fb9"},
|
||||
{file = "types_bleach-6.2.0.20250809.tar.gz", hash = "sha256:188d7a1119f6c953140b513ed57ba4213755695815472c19d0c22ac09c79b90b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2932,14 +2920,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-jsonschema"
|
||||
version = "4.25.0.20250720"
|
||||
version = "4.25.1.20250822"
|
||||
description = "Typing stubs for jsonschema"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_jsonschema-4.25.0.20250720-py3-none-any.whl", hash = "sha256:7d7897c715310d8bf9ae27a2cedba78bbb09e4cad83ce06d2aa79b73a88941df"},
|
||||
{file = "types_jsonschema-4.25.0.20250720.tar.gz", hash = "sha256:765a3b6144798fe3161fd8cbe570a756ed3e8c0e5adb7c09693eb49faad39dbd"},
|
||||
{file = "types_jsonschema-4.25.1.20250822-py3-none-any.whl", hash = "sha256:f82c2d7fa1ce1c0b84ba1de4ed6798469768188884db04e66421913a4e181294"},
|
||||
{file = "types_jsonschema-4.25.1.20250822.tar.gz", hash = "sha256:aac69ed4b23f49aaceb7fcb834141d61b9e4e6a7f6008cb2f0d3b831dfa8464a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2983,14 +2971,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-psycopg2"
|
||||
version = "2.9.21.20250718"
|
||||
version = "2.9.21.20250809"
|
||||
description = "Typing stubs for psycopg2"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_psycopg2-2.9.21.20250718-py3-none-any.whl", hash = "sha256:bcf085d4293bda48f5943a46dadf0389b2f98f7e8007722f7e1c12ee0f541858"},
|
||||
{file = "types_psycopg2-2.9.21.20250718.tar.gz", hash = "sha256:dc09a97272ef67e739e57b9f4740b761208f4514257e311c0b05c8c7a37d04b4"},
|
||||
{file = "types_psycopg2-2.9.21.20250809-py3-none-any.whl", hash = "sha256:59b7b0ed56dcae9efae62b8373497274fc1a0484bdc5135cdacbe5a8f44e1d7b"},
|
||||
{file = "types_psycopg2-2.9.21.20250809.tar.gz", hash = "sha256:b7c2cbdcf7c0bd16240f59ba694347329b0463e43398de69784ea4dee45f3c6d"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3011,14 +2999,14 @@ types-cffi = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-pyyaml"
|
||||
version = "6.0.12.20250516"
|
||||
version = "6.0.12.20250809"
|
||||
description = "Typing stubs for PyYAML"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_pyyaml-6.0.12.20250516-py3-none-any.whl", hash = "sha256:8478208feaeb53a34cb5d970c56a7cd76b72659442e733e268a94dc72b2d0530"},
|
||||
{file = "types_pyyaml-6.0.12.20250516.tar.gz", hash = "sha256:9f21a70216fc0fa1b216a8176db5f9e0af6eb35d2f2932acb87689d03a5bf6ba"},
|
||||
{file = "types_pyyaml-6.0.12.20250809-py3-none-any.whl", hash = "sha256:032b6003b798e7de1a1ddfeefee32fac6486bdfe4845e0ae0e7fb3ee4512b52f"},
|
||||
{file = "types_pyyaml-6.0.12.20250809.tar.gz", hash = "sha256:af4a1aca028f18e75297da2ee0da465f799627370d74073e96fee876524f61b5"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3038,14 +3026,14 @@ urllib3 = ">=2"
|
||||
|
||||
[[package]]
|
||||
name = "types-setuptools"
|
||||
version = "80.9.0.20250529"
|
||||
version = "80.9.0.20250809"
|
||||
description = "Typing stubs for setuptools"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_setuptools-80.9.0.20250529-py3-none-any.whl", hash = "sha256:00dfcedd73e333a430e10db096e4d46af93faf9314f832f13b6bbe3d6757e95f"},
|
||||
{file = "types_setuptools-80.9.0.20250529.tar.gz", hash = "sha256:79e088ba0cba2186c8d6499cbd3e143abb142d28a44b042c28d3148b1e353c91"},
|
||||
{file = "types_setuptools-80.9.0.20250809-py3-none-any.whl", hash = "sha256:7c6539b4c7ac7b4ab4db2be66d8a58fb1e28affa3ee3834be48acafd94f5976a"},
|
||||
{file = "types_setuptools-80.9.0.20250809.tar.gz", hash = "sha256:e986ba37ffde364073d76189e1d79d9928fb6f5278c7d07589cde353d0218864"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3117,91 +3105,6 @@ files = [
|
||||
{file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wrapt"
|
||||
version = "1.15.0"
|
||||
description = "Module for decorators, wrappers and monkey patching."
|
||||
optional = false
|
||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"},
|
||||
{file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"},
|
||||
{file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"},
|
||||
{file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"},
|
||||
{file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"},
|
||||
{file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"},
|
||||
{file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"},
|
||||
{file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"},
|
||||
{file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"},
|
||||
{file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"},
|
||||
{file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"},
|
||||
{file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"},
|
||||
{file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"},
|
||||
{file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"},
|
||||
{file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"},
|
||||
{file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"},
|
||||
{file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"},
|
||||
{file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"},
|
||||
{file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"},
|
||||
{file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"},
|
||||
{file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"},
|
||||
{file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"},
|
||||
{file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"},
|
||||
{file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"},
|
||||
{file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"},
|
||||
{file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"},
|
||||
{file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"},
|
||||
{file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"},
|
||||
{file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"},
|
||||
{file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"},
|
||||
{file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"},
|
||||
{file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"},
|
||||
{file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"},
|
||||
{file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"},
|
||||
{file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"},
|
||||
{file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"},
|
||||
{file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"},
|
||||
{file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"},
|
||||
{file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"},
|
||||
{file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"},
|
||||
{file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"},
|
||||
{file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"},
|
||||
{file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"},
|
||||
{file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"},
|
||||
{file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"},
|
||||
{file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"},
|
||||
{file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"},
|
||||
{file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"},
|
||||
{file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"},
|
||||
{file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"},
|
||||
{file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"},
|
||||
{file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"},
|
||||
{file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"},
|
||||
{file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"},
|
||||
{file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"},
|
||||
{file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"},
|
||||
{file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"},
|
||||
{file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"},
|
||||
{file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"},
|
||||
{file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"},
|
||||
{file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"},
|
||||
{file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"},
|
||||
{file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"},
|
||||
{file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"},
|
||||
{file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"},
|
||||
{file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"},
|
||||
{file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"},
|
||||
{file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"},
|
||||
{file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"},
|
||||
{file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"},
|
||||
{file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"},
|
||||
{file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"},
|
||||
{file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"},
|
||||
{file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"},
|
||||
{file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "xmlschema"
|
||||
version = "2.4.0"
|
||||
@@ -3353,4 +3256,4 @@ url-preview = ["lxml"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = "^3.9.0"
|
||||
content-hash = "600a349d08dde732df251583094a121b5385eb43ae0c6ceff10dcf9749359446"
|
||||
content-hash = "2e8ea085e1a0c6f0ac051d4bc457a96827d01f621b1827086de01a5ffa98cf79"
|
||||
|
||||
@@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.136.0rc1"
|
||||
version = "1.138.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -324,7 +324,7 @@ all = [
|
||||
# failing on new releases. Keeping lower bounds loose here means that dependabot
|
||||
# can bump versions without having to update the content-hash in the lockfile.
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
ruff = "0.12.7"
|
||||
ruff = "0.12.10"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
$schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json
|
||||
$id: https://element-hq.github.io/synapse/schema/synapse/v1.136/synapse-config.schema.json
|
||||
$id: https://element-hq.github.io/synapse/schema/synapse/v1.138/synapse-config.schema.json
|
||||
type: object
|
||||
properties:
|
||||
modules:
|
||||
@@ -5184,7 +5184,7 @@ properties:
|
||||
|
||||
"m.room.avatar": 50
|
||||
|
||||
"m.room.tombstone": 100
|
||||
"m.room.tombstone": 100 (150 if MSC4289 is used)
|
||||
|
||||
"m.room.server_acl": 100
|
||||
|
||||
|
||||
@@ -86,12 +86,5 @@ import synapse.util # noqa: E402
|
||||
|
||||
__version__ = synapse.util.SYNAPSE_VERSION
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
# running the packaging tox test.
|
||||
from synapse.util.patch_inline_callbacks import do_patch
|
||||
|
||||
do_patch()
|
||||
|
||||
|
||||
check_rust_lib_up_to_date()
|
||||
|
||||
@@ -153,9 +153,13 @@ def get_registered_paths_for_default(
|
||||
"""
|
||||
|
||||
hs = MockHomeserver(base_config, worker_app)
|
||||
|
||||
# TODO We only do this to avoid an error, but don't need the database etc
|
||||
hs.setup()
|
||||
return get_registered_paths_for_hs(hs)
|
||||
registered_paths = get_registered_paths_for_hs(hs)
|
||||
hs.cleanup()
|
||||
|
||||
return registered_paths
|
||||
|
||||
|
||||
def elide_http_methods_if_unconflicting(
|
||||
|
||||
@@ -30,6 +30,7 @@ from typing import Any, Callable, Dict, Optional
|
||||
|
||||
import requests
|
||||
import yaml
|
||||
from typing_extensions import Never
|
||||
|
||||
_CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\
|
||||
Conflicting options 'registration_shared_secret' and 'registration_shared_secret_path'
|
||||
@@ -40,6 +41,10 @@ _NO_SHARED_SECRET_OPTS_ERROR = """\
|
||||
No 'registration_shared_secret' or 'registration_shared_secret_path' defined in config.
|
||||
"""
|
||||
|
||||
_EMPTY_SHARED_SECRET_PATH_OPTS_ERROR = """\
|
||||
The secret given via `registration_shared_secret_path` must not be empty.
|
||||
"""
|
||||
|
||||
_DEFAULT_SERVER_URL = "http://localhost:8008"
|
||||
|
||||
|
||||
@@ -170,6 +175,12 @@ def register_new_user(
|
||||
)
|
||||
|
||||
|
||||
def bail(err_msg: str) -> Never:
|
||||
"""Prints the given message to stderr and exits."""
|
||||
print(err_msg, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
logging.captureWarnings(True)
|
||||
|
||||
@@ -262,15 +273,20 @@ def main() -> None:
|
||||
assert config is not None
|
||||
|
||||
secret = config.get("registration_shared_secret")
|
||||
if not isinstance(secret, (str, type(None))):
|
||||
bail("registration_shared_secret is not a string.")
|
||||
secret_file = config.get("registration_shared_secret_path")
|
||||
if secret_file:
|
||||
if secret:
|
||||
print(_CONFLICTING_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
if not isinstance(secret_file, (str, type(None))):
|
||||
bail("registration_shared_secret_path is not a string.")
|
||||
|
||||
if not secret and not secret_file:
|
||||
bail(_NO_SHARED_SECRET_OPTS_ERROR)
|
||||
elif secret and secret_file:
|
||||
bail(_CONFLICTING_SHARED_SECRET_OPTS_ERROR)
|
||||
elif not secret and secret_file:
|
||||
secret = _read_file(secret_file, "registration_shared_secret_path").strip()
|
||||
if not secret:
|
||||
print(_NO_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
if not secret:
|
||||
bail(_EMPTY_SHARED_SECRET_PATH_OPTS_ERROR)
|
||||
|
||||
if args.password_file:
|
||||
password = _read_file(args.password_file, "password-file").strip()
|
||||
|
||||
@@ -99,6 +99,7 @@ from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util import SYNAPSE_VERSION, Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
# Cast safety: Twisted does some naughty magic which replaces the
|
||||
# twisted.internet.reactor module with a Reactor instance at runtime.
|
||||
@@ -323,6 +324,7 @@ class MockHomeserver:
|
||||
self.config = config
|
||||
self.hostname = config.server.server_name
|
||||
self.version_string = SYNAPSE_VERSION
|
||||
self.instance_id = random_string(5)
|
||||
|
||||
def get_clock(self) -> Clock:
|
||||
return self.clock
|
||||
@@ -330,6 +332,9 @@ class MockHomeserver:
|
||||
def get_reactor(self) -> ISynapseReactor:
|
||||
return reactor
|
||||
|
||||
def get_instance_id(self) -> str:
|
||||
return self.instance_id
|
||||
|
||||
def get_instance_name(self) -> str:
|
||||
return "master"
|
||||
|
||||
@@ -685,7 +690,15 @@ class Porter:
|
||||
)
|
||||
prepare_database(db_conn, engine, config=self.hs_config)
|
||||
# Type safety: ignore that we're using Mock homeservers here.
|
||||
store = Store(DatabasePool(hs, db_config, engine), db_conn, hs) # type: ignore[arg-type]
|
||||
store = Store(
|
||||
DatabasePool(
|
||||
hs, # type: ignore[arg-type]
|
||||
db_config,
|
||||
engine,
|
||||
),
|
||||
db_conn,
|
||||
hs, # type: ignore[arg-type]
|
||||
)
|
||||
db_conn.commit()
|
||||
|
||||
return store
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
from typing import TYPE_CHECKING, Optional, Set
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from synapse._pydantic_compat import (
|
||||
@@ -57,8 +57,10 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
# Scope as defined by MSC2967
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/2967
|
||||
SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
UNSTABLE_SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
STABLE_SCOPE_MATRIX_API = "urn:matrix:client:api:*"
|
||||
STABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:client:device:"
|
||||
|
||||
|
||||
class ServerMetadata(BaseModel):
|
||||
@@ -334,7 +336,10 @@ class MasDelegatedAuth(BaseAuth):
|
||||
scope = introspection_result.get_scope_set()
|
||||
|
||||
# Determine type of user based on presence of particular scopes
|
||||
if SCOPE_MATRIX_API not in scope:
|
||||
if (
|
||||
UNSTABLE_SCOPE_MATRIX_API not in scope
|
||||
and STABLE_SCOPE_MATRIX_API not in scope
|
||||
):
|
||||
raise InvalidClientTokenError(
|
||||
"Token doesn't grant access to the Matrix C-S API"
|
||||
)
|
||||
@@ -366,11 +371,12 @@ class MasDelegatedAuth(BaseAuth):
|
||||
# We only allow a single device_id in the scope, so we find them all in the
|
||||
# scope list, and raise if there are more than one. The OIDC server should be
|
||||
# the one enforcing valid scopes, so we raise a 500 if we find an invalid scope.
|
||||
device_ids = [
|
||||
tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :]
|
||||
for tok in scope
|
||||
if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX)
|
||||
]
|
||||
device_ids: Set[str] = set()
|
||||
for tok in scope:
|
||||
if tok.startswith(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX):
|
||||
device_ids.add(tok[len(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX) :])
|
||||
elif tok.startswith(STABLE_SCOPE_MATRIX_DEVICE_PREFIX):
|
||||
device_ids.add(tok[len(STABLE_SCOPE_MATRIX_DEVICE_PREFIX) :])
|
||||
|
||||
if len(device_ids) > 1:
|
||||
raise AuthError(
|
||||
@@ -378,7 +384,7 @@ class MasDelegatedAuth(BaseAuth):
|
||||
"Multiple device IDs in scope",
|
||||
)
|
||||
|
||||
device_id = device_ids[0] if device_ids else None
|
||||
device_id = next(iter(device_ids), None)
|
||||
|
||||
if device_id is not None:
|
||||
# Sanity check the device_id
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from authlib.oauth2 import ClientAuth
|
||||
@@ -34,7 +34,6 @@ from synapse.api.errors import (
|
||||
AuthError,
|
||||
HttpResponseException,
|
||||
InvalidClientTokenError,
|
||||
OAuthInsufficientScopeError,
|
||||
SynapseError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
@@ -63,9 +62,10 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
# Scope as defined by MSC2967
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/2967
|
||||
SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
SCOPE_MATRIX_GUEST = "urn:matrix:org.matrix.msc2967.client:api:guest"
|
||||
SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
UNSTABLE_SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
STABLE_SCOPE_MATRIX_API = "urn:matrix:client:api:*"
|
||||
STABLE_SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:client:device:"
|
||||
|
||||
# Scope which allows access to the Synapse admin API
|
||||
SCOPE_SYNAPSE_ADMIN = "urn:synapse:admin:*"
|
||||
@@ -444,9 +444,6 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
if not self._is_access_token_the_admin_token(access_token):
|
||||
await self._record_request(request, requester)
|
||||
|
||||
if not allow_guest and requester.is_guest:
|
||||
raise OAuthInsufficientScopeError([SCOPE_MATRIX_API])
|
||||
|
||||
request.requester = requester
|
||||
|
||||
return requester
|
||||
@@ -528,10 +525,11 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
scope: List[str] = introspection_result.get_scope_list()
|
||||
|
||||
# Determine type of user based on presence of particular scopes
|
||||
has_user_scope = SCOPE_MATRIX_API in scope
|
||||
has_guest_scope = SCOPE_MATRIX_GUEST in scope
|
||||
has_user_scope = (
|
||||
UNSTABLE_SCOPE_MATRIX_API in scope or STABLE_SCOPE_MATRIX_API in scope
|
||||
)
|
||||
|
||||
if not has_user_scope and not has_guest_scope:
|
||||
if not has_user_scope:
|
||||
raise InvalidClientTokenError("No scope in token granting user rights")
|
||||
|
||||
# Match via the sub claim
|
||||
@@ -579,11 +577,12 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
# We only allow a single device_id in the scope, so we find them all in the
|
||||
# scope list, and raise if there are more than one. The OIDC server should be
|
||||
# the one enforcing valid scopes, so we raise a 500 if we find an invalid scope.
|
||||
device_ids = [
|
||||
tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :]
|
||||
for tok in scope
|
||||
if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX)
|
||||
]
|
||||
device_ids: Set[str] = set()
|
||||
for tok in scope:
|
||||
if tok.startswith(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX):
|
||||
device_ids.add(tok[len(UNSTABLE_SCOPE_MATRIX_DEVICE_PREFIX) :])
|
||||
elif tok.startswith(STABLE_SCOPE_MATRIX_DEVICE_PREFIX):
|
||||
device_ids.add(tok[len(STABLE_SCOPE_MATRIX_DEVICE_PREFIX) :])
|
||||
|
||||
if len(device_ids) > 1:
|
||||
raise AuthError(
|
||||
@@ -591,7 +590,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
"Multiple device IDs in scope",
|
||||
)
|
||||
|
||||
device_id = device_ids[0] if device_ids else None
|
||||
device_id = next(iter(device_ids), None)
|
||||
|
||||
if device_id is not None:
|
||||
# Sanity check the device_id
|
||||
@@ -617,5 +616,4 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
scope=scope,
|
||||
is_guest=(has_guest_scope and not has_user_scope),
|
||||
)
|
||||
|
||||
@@ -46,6 +46,9 @@ MAX_USERID_LENGTH = 255
|
||||
# Constant value used for the pseudo-thread which is the main timeline.
|
||||
MAIN_TIMELINE: Final = "main"
|
||||
|
||||
# MAX_INT + 1, so it always trumps any PL in canonical JSON.
|
||||
CREATOR_POWER_LEVEL = 2**53
|
||||
|
||||
|
||||
class Membership:
|
||||
"""Represents the membership states of a user in a room."""
|
||||
@@ -235,6 +238,8 @@ class EventContentFields:
|
||||
#
|
||||
# This is deprecated in MSC2175.
|
||||
ROOM_CREATOR: Final = "creator"
|
||||
# MSC4289
|
||||
ADDITIONAL_CREATORS: Final = "additional_creators"
|
||||
|
||||
# The version of the room for `m.room.create` events.
|
||||
ROOM_VERSION: Final = "room_version"
|
||||
|
||||
@@ -36,12 +36,14 @@ class EventFormatVersions:
|
||||
ROOM_V1_V2 = 1 # $id:server event id format: used for room v1 and v2
|
||||
ROOM_V3 = 2 # MSC1659-style $hash event id format: used for room v3
|
||||
ROOM_V4_PLUS = 3 # MSC1884-style $hash format: introduced for room v4
|
||||
ROOM_V11_HYDRA_PLUS = 4 # MSC4291 room IDs as hashes: introduced for room HydraV11
|
||||
|
||||
|
||||
KNOWN_EVENT_FORMAT_VERSIONS = {
|
||||
EventFormatVersions.ROOM_V1_V2,
|
||||
EventFormatVersions.ROOM_V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
EventFormatVersions.ROOM_V11_HYDRA_PLUS,
|
||||
}
|
||||
|
||||
|
||||
@@ -50,6 +52,7 @@ class StateResolutionVersions:
|
||||
|
||||
V1 = 1 # room v1 state res
|
||||
V2 = 2 # MSC1442 state res: room v2 and later
|
||||
V2_1 = 3 # MSC4297 state res
|
||||
|
||||
|
||||
class RoomDisposition:
|
||||
@@ -109,6 +112,10 @@ class RoomVersion:
|
||||
msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
|
||||
# MSC3757: Restricting who can overwrite a state event
|
||||
msc3757_enabled: bool
|
||||
# MSC4289: Creator power enabled
|
||||
msc4289_creator_power_enabled: bool
|
||||
# MSC4291: Room IDs as hashes of the create event
|
||||
msc4291_room_ids_as_hashes: bool
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@@ -131,6 +138,8 @@ class RoomVersions:
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -151,6 +160,8 @@ class RoomVersions:
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -171,6 +182,8 @@ class RoomVersions:
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -191,6 +204,8 @@ class RoomVersions:
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -211,6 +226,8 @@ class RoomVersions:
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@@ -231,6 +248,8 @@ class RoomVersions:
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
@@ -251,6 +270,8 @@ class RoomVersions:
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
@@ -271,6 +292,8 @@ class RoomVersions:
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
@@ -291,6 +314,8 @@ class RoomVersions:
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
V10 = RoomVersion(
|
||||
"10",
|
||||
@@ -311,6 +336,8 @@ class RoomVersions:
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
MSC1767v10 = RoomVersion(
|
||||
# MSC1767 (Extensible Events) based on room version "10"
|
||||
@@ -332,6 +359,8 @@ class RoomVersions:
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
MSC3757v10 = RoomVersion(
|
||||
# MSC3757 (Restricting who can overwrite a state event) based on room version "10"
|
||||
@@ -353,6 +382,8 @@ class RoomVersions:
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=True,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
V11 = RoomVersion(
|
||||
"11",
|
||||
@@ -373,6 +404,8 @@ class RoomVersions:
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
MSC3757v11 = RoomVersion(
|
||||
# MSC3757 (Restricting who can overwrite a state event) based on room version "11"
|
||||
@@ -394,6 +427,52 @@ class RoomVersions:
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=True,
|
||||
msc4289_creator_power_enabled=False,
|
||||
msc4291_room_ids_as_hashes=False,
|
||||
)
|
||||
HydraV11 = RoomVersion(
|
||||
"org.matrix.hydra.11",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V11_HYDRA_PLUS,
|
||||
StateResolutionVersions.V2_1, # Changed from v11
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
implicit_room_creator=True, # Used by MSC3820
|
||||
updated_redaction_rules=True, # Used by MSC3820
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=True, # Changed from v11
|
||||
msc4291_room_ids_as_hashes=True, # Changed from v11
|
||||
)
|
||||
V12 = RoomVersion(
|
||||
"12",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V11_HYDRA_PLUS,
|
||||
StateResolutionVersions.V2_1, # Changed from v11
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
implicit_room_creator=True, # Used by MSC3820
|
||||
updated_redaction_rules=True, # Used by MSC3820
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3757_enabled=False,
|
||||
msc4289_creator_power_enabled=True, # Changed from v11
|
||||
msc4291_room_ids_as_hashes=True, # Changed from v11
|
||||
)
|
||||
|
||||
|
||||
@@ -411,8 +490,10 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||
RoomVersions.V9,
|
||||
RoomVersions.V10,
|
||||
RoomVersions.V11,
|
||||
RoomVersions.V12,
|
||||
RoomVersions.MSC3757v10,
|
||||
RoomVersions.MSC3757v11,
|
||||
RoomVersions.HydraV11,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -601,6 +601,12 @@ async def start(hs: "HomeServer") -> None:
|
||||
hs.get_datastores().main.db_pool.start_profiling()
|
||||
hs.get_pusherpool().start()
|
||||
|
||||
# Register background tasks required by this server. This must be done
|
||||
# somewhat manually due to the background tasks not being registered
|
||||
# unless handlers are instantiated.
|
||||
if hs.config.worker.run_background_tasks:
|
||||
hs.start_background_tasks()
|
||||
|
||||
# Log when we start the shut down process.
|
||||
hs.get_reactor().addSystemEventTrigger(
|
||||
"before", "shutdown", logger.info, "Shutting down..."
|
||||
|
||||
@@ -535,11 +535,15 @@ class ExperimentalConfig(Config):
|
||||
"msc4108_delegation_endpoint", None
|
||||
)
|
||||
|
||||
auth_delegated = self.msc3861.enabled or (
|
||||
config.get("matrix_authentication_service") or {}
|
||||
).get("enabled", False)
|
||||
|
||||
if (
|
||||
self.msc4108_enabled or self.msc4108_delegation_endpoint is not None
|
||||
) and not self.msc3861.enabled:
|
||||
) and not auth_delegated:
|
||||
raise ConfigError(
|
||||
"MSC4108 requires MSC3861 to be enabled",
|
||||
"MSC4108 requires MSC3861 or matrix_authentication_service to be enabled",
|
||||
("experimental", "msc4108_delegation_endpoint"),
|
||||
)
|
||||
|
||||
|
||||
@@ -101,6 +101,9 @@ def compute_content_hash(
|
||||
event_dict.pop("outlier", None)
|
||||
event_dict.pop("destinations", None)
|
||||
|
||||
# N.B. no need to pop the room_id from create events in MSC4291 rooms
|
||||
# as they shouldn't have one.
|
||||
|
||||
event_json_bytes = encode_canonical_json(event_dict)
|
||||
|
||||
hashed = hash_algorithm(event_json_bytes)
|
||||
|
||||
@@ -45,6 +45,7 @@ from signedjson.sign import SignatureVerifyException, verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from synapse.api.constants import (
|
||||
CREATOR_POWER_LEVEL,
|
||||
MAX_PDU_SIZE,
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
@@ -64,6 +65,7 @@ from synapse.api.room_versions import (
|
||||
RoomVersion,
|
||||
RoomVersions,
|
||||
)
|
||||
from synapse.events import is_creator
|
||||
from synapse.state import CREATE_KEY
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.types import (
|
||||
@@ -261,7 +263,8 @@ async def check_state_independent_auth_rules(
|
||||
f"Event {event.event_id} has unexpected auth_event for {k}: {auth_event_id}",
|
||||
)
|
||||
|
||||
# We also need to check that the auth event itself is not rejected.
|
||||
# 2.3 ... If there are entries which were themselves rejected under the checks performed on receipt
|
||||
# of a PDU, reject.
|
||||
if auth_event.rejected_reason:
|
||||
raise AuthError(
|
||||
403,
|
||||
@@ -271,7 +274,7 @@ async def check_state_independent_auth_rules(
|
||||
|
||||
auth_dict[k] = auth_event_id
|
||||
|
||||
# 3. If event does not have a m.room.create in its auth_events, reject.
|
||||
# 2.4. If event does not have a m.room.create in its auth_events, reject.
|
||||
creation_event = auth_dict.get((EventTypes.Create, ""), None)
|
||||
if not creation_event:
|
||||
raise AuthError(403, "No create event in auth events")
|
||||
@@ -311,13 +314,14 @@ def check_state_dependent_auth_rules(
|
||||
|
||||
# Later code relies on there being a create event e.g _can_federate, _is_membership_change_allowed
|
||||
# so produce a more intelligible error if we don't have one.
|
||||
if auth_dict.get(CREATE_KEY) is None:
|
||||
create_event = auth_dict.get(CREATE_KEY)
|
||||
if create_event is None:
|
||||
raise AuthError(
|
||||
403, f"Event {event.event_id} is missing a create event in auth_events."
|
||||
)
|
||||
|
||||
# additional check for m.federate
|
||||
creating_domain = get_domain_from_id(event.room_id)
|
||||
creating_domain = get_domain_from_id(create_event.sender)
|
||||
originating_domain = get_domain_from_id(event.sender)
|
||||
if creating_domain != originating_domain:
|
||||
if not _can_federate(event, auth_dict):
|
||||
@@ -470,12 +474,20 @@ def _check_create(event: "EventBase") -> None:
|
||||
if event.prev_event_ids():
|
||||
raise AuthError(403, "Create event has prev events")
|
||||
|
||||
# 1.2 If the domain of the room_id does not match the domain of the sender,
|
||||
# reject.
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
room_id_domain = get_domain_from_id(event.room_id)
|
||||
if room_id_domain != sender_domain:
|
||||
raise AuthError(403, "Creation event's room_id domain does not match sender's")
|
||||
if event.room_version.msc4291_room_ids_as_hashes:
|
||||
# 1.2 If the create event has a room_id, reject
|
||||
if "room_id" in event:
|
||||
raise AuthError(403, "Create event has a room_id")
|
||||
else:
|
||||
# 1.2 If the domain of the room_id does not match the domain of the sender,
|
||||
# reject.
|
||||
if not event.room_version.msc4291_room_ids_as_hashes:
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
room_id_domain = get_domain_from_id(event.room_id)
|
||||
if room_id_domain != sender_domain:
|
||||
raise AuthError(
|
||||
403, "Creation event's room_id domain does not match sender's"
|
||||
)
|
||||
|
||||
# 1.3 If content.room_version is present and is not a recognised version, reject
|
||||
room_version_prop = event.content.get("room_version", "1")
|
||||
@@ -492,6 +504,16 @@ def _check_create(event: "EventBase") -> None:
|
||||
):
|
||||
raise AuthError(403, "Create event lacks a 'creator' property")
|
||||
|
||||
# 1.5 If the additional_creators field is present and is not an array of strings where each
|
||||
# string is a valid user ID, reject.
|
||||
if (
|
||||
event.room_version.msc4289_creator_power_enabled
|
||||
and EventContentFields.ADDITIONAL_CREATORS in event.content
|
||||
):
|
||||
check_valid_additional_creators(
|
||||
event.content[EventContentFields.ADDITIONAL_CREATORS]
|
||||
)
|
||||
|
||||
|
||||
def _can_federate(event: "EventBase", auth_events: StateMap["EventBase"]) -> bool:
|
||||
creation_event = auth_events.get((EventTypes.Create, ""))
|
||||
@@ -533,7 +555,13 @@ def _is_membership_change_allowed(
|
||||
|
||||
target_user_id = event.state_key
|
||||
|
||||
creating_domain = get_domain_from_id(event.room_id)
|
||||
# We need the create event in order to check if we can federate or not.
|
||||
# If it's missing, yell loudly. Previously we only did this inside the
|
||||
# _can_federate check.
|
||||
create_event = auth_events.get((EventTypes.Create, ""))
|
||||
if not create_event:
|
||||
raise AuthError(403, "Create event missing from auth_events")
|
||||
creating_domain = get_domain_from_id(create_event.sender)
|
||||
target_domain = get_domain_from_id(target_user_id)
|
||||
if creating_domain != target_domain:
|
||||
if not _can_federate(event, auth_events):
|
||||
@@ -903,6 +931,32 @@ def _check_power_levels(
|
||||
except Exception:
|
||||
raise SynapseError(400, "Not a valid power level: %s" % (v,))
|
||||
|
||||
if room_version_obj.msc4289_creator_power_enabled:
|
||||
# Enforce the creator does not appear in the users map
|
||||
create_event = auth_events.get((EventTypes.Create, ""))
|
||||
if not create_event:
|
||||
raise SynapseError(
|
||||
400, "Cannot check power levels without a create event in auth_events"
|
||||
)
|
||||
if create_event.sender in user_list:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Creator user %s must not appear in content.users"
|
||||
% (create_event.sender,),
|
||||
)
|
||||
additional_creators = create_event.content.get(
|
||||
EventContentFields.ADDITIONAL_CREATORS, []
|
||||
)
|
||||
if additional_creators:
|
||||
creators_in_user_list = set(additional_creators).intersection(
|
||||
set(user_list)
|
||||
)
|
||||
if len(creators_in_user_list) > 0:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Additional creators users must not appear in content.users",
|
||||
)
|
||||
|
||||
# Reject events with stringy power levels if required by room version
|
||||
if (
|
||||
event.type == EventTypes.PowerLevels
|
||||
@@ -1028,6 +1082,9 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in
|
||||
"A create event in the auth events chain is required to calculate user power level correctly,"
|
||||
" but was not found. This indicates a bug"
|
||||
)
|
||||
if create_event.room_version.msc4289_creator_power_enabled:
|
||||
if is_creator(create_event, user_id):
|
||||
return CREATOR_POWER_LEVEL
|
||||
power_level_event = get_power_level_event(auth_events)
|
||||
if power_level_event:
|
||||
level = power_level_event.content.get("users", {}).get(user_id)
|
||||
@@ -1188,3 +1245,26 @@ def auth_types_for_event(
|
||||
auth_types.add(key)
|
||||
|
||||
return auth_types
|
||||
|
||||
|
||||
def check_valid_additional_creators(additional_creators: Any) -> None:
|
||||
"""Check if the additional_creators provided is valid according to MSC4289.
|
||||
|
||||
The additional_creators can be supplied from an m.room.create event or from an /upgrade request.
|
||||
|
||||
Raises:
|
||||
AuthError if the additional_creators is invalid for some reason.
|
||||
"""
|
||||
if type(additional_creators) is not list:
|
||||
raise AuthError(400, "additional_creators must be an array")
|
||||
for entry in additional_creators:
|
||||
if type(entry) is not str:
|
||||
raise AuthError(400, "entry in additional_creators is not a string")
|
||||
if not UserID.is_valid(entry):
|
||||
raise AuthError(400, "entry in additional_creators is not a valid user ID")
|
||||
# UserID.is_valid doesn't actually validate everything, so check the rest manually.
|
||||
if len(entry) > 255 or len(entry.encode("utf-8")) > 255:
|
||||
raise AuthError(
|
||||
400,
|
||||
"entry in additional_creators too long",
|
||||
)
|
||||
|
||||
@@ -41,10 +41,13 @@ from typing import (
|
||||
import attr
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from synapse.api.constants import EventTypes, RelationTypes
|
||||
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
|
||||
from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
|
||||
from synapse.synapse_rust.events import EventInternalMetadata
|
||||
from synapse.types import JsonDict, StrCollection
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
StrCollection,
|
||||
)
|
||||
from synapse.util.caches import intern_dict
|
||||
from synapse.util.frozenutils import freeze
|
||||
|
||||
@@ -209,7 +212,6 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
content: DictProperty[JsonDict] = DictProperty("content")
|
||||
hashes: DictProperty[Dict[str, str]] = DictProperty("hashes")
|
||||
origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts")
|
||||
room_id: DictProperty[str] = DictProperty("room_id")
|
||||
sender: DictProperty[str] = DictProperty("sender")
|
||||
# TODO state_key should be Optional[str]. This is generally asserted in Synapse
|
||||
# by calling is_state() first (which ensures it is not None), but it is hard (not possible?)
|
||||
@@ -224,6 +226,10 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
def event_id(self) -> str:
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def room_id(self) -> str:
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def membership(self) -> str:
|
||||
return self.content["membership"]
|
||||
@@ -386,6 +392,10 @@ class FrozenEvent(EventBase):
|
||||
def event_id(self) -> str:
|
||||
return self._event_id
|
||||
|
||||
@property
|
||||
def room_id(self) -> str:
|
||||
return self._dict["room_id"]
|
||||
|
||||
|
||||
class FrozenEventV2(EventBase):
|
||||
format_version = EventFormatVersions.ROOM_V3 # All events of this type are V2
|
||||
@@ -443,6 +453,10 @@ class FrozenEventV2(EventBase):
|
||||
self._event_id = "$" + encode_base64(compute_event_reference_hash(self)[1])
|
||||
return self._event_id
|
||||
|
||||
@property
|
||||
def room_id(self) -> str:
|
||||
return self._dict["room_id"]
|
||||
|
||||
def prev_event_ids(self) -> List[str]:
|
||||
"""Returns the list of prev event IDs. The order matches the order
|
||||
specified in the event, though there is no meaning to it.
|
||||
@@ -481,6 +495,67 @@ class FrozenEventV3(FrozenEventV2):
|
||||
return self._event_id
|
||||
|
||||
|
||||
class FrozenEventV4(FrozenEventV3):
|
||||
"""FrozenEventV4 for MSC4291 room IDs are hashes"""
|
||||
|
||||
format_version = EventFormatVersions.ROOM_V11_HYDRA_PLUS
|
||||
|
||||
"""Override the room_id for m.room.create events"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
event_dict: JsonDict,
|
||||
room_version: RoomVersion,
|
||||
internal_metadata_dict: Optional[JsonDict] = None,
|
||||
rejected_reason: Optional[str] = None,
|
||||
):
|
||||
super().__init__(
|
||||
event_dict=event_dict,
|
||||
room_version=room_version,
|
||||
internal_metadata_dict=internal_metadata_dict,
|
||||
rejected_reason=rejected_reason,
|
||||
)
|
||||
self._room_id: Optional[str] = None
|
||||
|
||||
@property
|
||||
def room_id(self) -> str:
|
||||
# if we have calculated the room ID already, don't do it again.
|
||||
if self._room_id:
|
||||
return self._room_id
|
||||
|
||||
is_create_event = self.type == EventTypes.Create and self.get_state_key() == ""
|
||||
|
||||
# for non-create events: use the supplied value from the JSON, as per FrozenEventV3
|
||||
if not is_create_event:
|
||||
self._room_id = self._dict["room_id"]
|
||||
assert self._room_id is not None
|
||||
return self._room_id
|
||||
|
||||
# for create events: calculate the room ID
|
||||
from synapse.crypto.event_signing import compute_event_reference_hash
|
||||
|
||||
self._room_id = "!" + encode_base64(
|
||||
compute_event_reference_hash(self)[1], urlsafe=True
|
||||
)
|
||||
return self._room_id
|
||||
|
||||
def auth_event_ids(self) -> StrCollection:
|
||||
"""Returns the list of auth event IDs. The order matches the order
|
||||
specified in the event, though there is no meaning to it.
|
||||
Returns:
|
||||
The list of event IDs of this event's auth_events
|
||||
Includes the creation event ID for convenience of all the codepaths
|
||||
which expects the auth chain to include the creator ID, even though
|
||||
it's explicitly not included on the wire. Excludes the create event
|
||||
for the create event itself.
|
||||
"""
|
||||
create_event_id = "$" + self.room_id[1:]
|
||||
assert create_event_id not in self._dict["auth_events"]
|
||||
if self.type == EventTypes.Create and self.get_state_key() == "":
|
||||
return self._dict["auth_events"] # should be []
|
||||
return self._dict["auth_events"] + [create_event_id]
|
||||
|
||||
|
||||
def _event_type_from_format_version(
|
||||
format_version: int,
|
||||
) -> Type[Union[FrozenEvent, FrozenEventV2, FrozenEventV3]]:
|
||||
@@ -500,6 +575,8 @@ def _event_type_from_format_version(
|
||||
return FrozenEventV2
|
||||
elif format_version == EventFormatVersions.ROOM_V4_PLUS:
|
||||
return FrozenEventV3
|
||||
elif format_version == EventFormatVersions.ROOM_V11_HYDRA_PLUS:
|
||||
return FrozenEventV4
|
||||
else:
|
||||
raise Exception("No event format %r" % (format_version,))
|
||||
|
||||
@@ -559,6 +636,23 @@ def relation_from_event(event: EventBase) -> Optional[_EventRelation]:
|
||||
return _EventRelation(parent_id, rel_type, aggregation_key)
|
||||
|
||||
|
||||
def is_creator(create: EventBase, user_id: str) -> bool:
|
||||
"""
|
||||
Return true if the provided user ID is the room creator.
|
||||
|
||||
This includes additional creators in MSC4289.
|
||||
"""
|
||||
assert create.type == EventTypes.Create
|
||||
if create.sender == user_id:
|
||||
return True
|
||||
if create.room_version.msc4289_creator_power_enabled:
|
||||
additional_creators = set(
|
||||
create.content.get(EventContentFields.ADDITIONAL_CREATORS, [])
|
||||
)
|
||||
return user_id in additional_creators
|
||||
return False
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class StrippedStateEvent:
|
||||
"""
|
||||
|
||||
@@ -82,7 +82,8 @@ class EventBuilder:
|
||||
|
||||
room_version: RoomVersion
|
||||
|
||||
room_id: str
|
||||
# MSC4291 makes the room ID == the create event ID. This means the create event has no room_id.
|
||||
room_id: Optional[str]
|
||||
type: str
|
||||
sender: str
|
||||
|
||||
@@ -142,7 +143,14 @@ class EventBuilder:
|
||||
Returns:
|
||||
The signed and hashed event.
|
||||
"""
|
||||
# Create events always have empty auth_events.
|
||||
if self.type == EventTypes.Create and self.is_state() and self.state_key == "":
|
||||
auth_event_ids = []
|
||||
|
||||
# Calculate auth_events for non-create events
|
||||
if auth_event_ids is None:
|
||||
# Every non-create event must have a room ID
|
||||
assert self.room_id is not None
|
||||
state_ids = await self._state.compute_state_after_events(
|
||||
self.room_id,
|
||||
prev_event_ids,
|
||||
@@ -224,12 +232,31 @@ class EventBuilder:
|
||||
"auth_events": auth_events,
|
||||
"prev_events": prev_events,
|
||||
"type": self.type,
|
||||
"room_id": self.room_id,
|
||||
"sender": self.sender,
|
||||
"content": self.content,
|
||||
"unsigned": self.unsigned,
|
||||
"depth": depth,
|
||||
}
|
||||
if self.room_id is not None:
|
||||
event_dict["room_id"] = self.room_id
|
||||
|
||||
if self.room_version.msc4291_room_ids_as_hashes:
|
||||
# In MSC4291: the create event has no room ID as the create event ID /is/ the room ID.
|
||||
if (
|
||||
self.type == EventTypes.Create
|
||||
and self.is_state()
|
||||
and self._state_key == ""
|
||||
):
|
||||
assert self.room_id is None
|
||||
else:
|
||||
# All other events do not reference the create event in auth_events, as the room ID
|
||||
# /is/ the create event. However, the rest of the code (for consistency between room
|
||||
# versions) assume that the create event remains part of the auth events. c.f. event
|
||||
# class which automatically adds the create event when `.auth_event_ids()` is called
|
||||
assert self.room_id is not None
|
||||
create_event_id = "$" + self.room_id[1:]
|
||||
auth_event_ids.remove(create_event_id)
|
||||
event_dict["auth_events"] = auth_event_ids
|
||||
|
||||
if self.is_state():
|
||||
event_dict["state_key"] = self._state_key
|
||||
@@ -285,7 +312,7 @@ class EventBuilderFactory:
|
||||
room_version=room_version,
|
||||
type=key_values["type"],
|
||||
state_key=key_values.get("state_key"),
|
||||
room_id=key_values["room_id"],
|
||||
room_id=key_values.get("room_id"),
|
||||
sender=key_values["sender"],
|
||||
content=key_values.get("content", {}),
|
||||
unsigned=key_values.get("unsigned", {}),
|
||||
|
||||
@@ -306,6 +306,12 @@ class EventContext(UnpersistedEventContextBase):
|
||||
)
|
||||
|
||||
|
||||
EventPersistencePair = Tuple[EventBase, EventContext]
|
||||
"""
|
||||
The combination of an event to be persisted and its context.
|
||||
"""
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class UnpersistedEventContext(UnpersistedEventContextBase):
|
||||
"""
|
||||
@@ -363,7 +369,7 @@ class UnpersistedEventContext(UnpersistedEventContextBase):
|
||||
room_id: str,
|
||||
last_known_state_group: int,
|
||||
datastore: "StateGroupDataStore",
|
||||
) -> List[Tuple[EventBase, EventContext]]:
|
||||
) -> List[EventPersistencePair]:
|
||||
"""
|
||||
Takes a list of events and their associated unpersisted contexts and persists
|
||||
the unpersisted contexts, returning a list of events and persisted contexts.
|
||||
|
||||
@@ -26,8 +26,8 @@ from typing import (
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
Match,
|
||||
@@ -49,6 +49,7 @@ from synapse.api.constants import (
|
||||
)
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.logging.opentracing import SynapseTags, set_tag, trace
|
||||
from synapse.types import JsonDict, Requester
|
||||
|
||||
from . import EventBase, StrippedStateEvent, make_event_from_dict
|
||||
@@ -176,9 +177,12 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
||||
if room_version.updated_redaction_rules:
|
||||
# MSC2176 rules state that create events cannot have their `content` redacted.
|
||||
new_content = event_dict["content"]
|
||||
elif not room_version.implicit_room_creator:
|
||||
if not room_version.implicit_room_creator:
|
||||
# Some room versions give meaning to `creator`
|
||||
add_fields("creator")
|
||||
if room_version.msc4291_room_ids_as_hashes:
|
||||
# room_id is not allowed on the create event as it's derived from the event ID
|
||||
allowed_keys.remove("room_id")
|
||||
|
||||
elif event_type == EventTypes.JoinRules:
|
||||
add_fields("join_rule")
|
||||
@@ -527,6 +531,10 @@ def serialize_event(
|
||||
if config.as_client_event:
|
||||
d = config.event_format(d)
|
||||
|
||||
# Ensure the room_id field is set for create events in MSC4291 rooms
|
||||
if e.type == EventTypes.Create and e.room_version.msc4291_room_ids_as_hashes:
|
||||
d["room_id"] = e.room_id
|
||||
|
||||
# If the event is a redaction, the field with the redacted event ID appears
|
||||
# in a different location depending on the room version. e.redacts handles
|
||||
# fetching from the proper location; copy it to the other location for forwards-
|
||||
@@ -703,9 +711,10 @@ class EventClientSerializer:
|
||||
"m.relations", {}
|
||||
).update(serialized_aggregations)
|
||||
|
||||
@trace
|
||||
async def serialize_events(
|
||||
self,
|
||||
events: Iterable[Union[JsonDict, EventBase]],
|
||||
events: Collection[Union[JsonDict, EventBase]],
|
||||
time_now: int,
|
||||
*,
|
||||
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
|
||||
@@ -724,6 +733,11 @@ class EventClientSerializer:
|
||||
Returns:
|
||||
The list of serialized events
|
||||
"""
|
||||
set_tag(
|
||||
SynapseTags.FUNC_ARG_PREFIX + "events.length",
|
||||
str(len(events)),
|
||||
)
|
||||
|
||||
return [
|
||||
await self.serialize_event(
|
||||
event,
|
||||
@@ -872,6 +886,14 @@ def strip_event(event: EventBase) -> JsonDict:
|
||||
Stripped state events can only have the `sender`, `type`, `state_key` and `content`
|
||||
properties present.
|
||||
"""
|
||||
# MSC4311: Ensure the create event is available on invites and knocks.
|
||||
# TODO: Implement the rest of MSC4311
|
||||
if (
|
||||
event.room_version.msc4291_room_ids_as_hashes
|
||||
and event.type == EventTypes.Create
|
||||
and event.get_state_key() == ""
|
||||
):
|
||||
return event.get_pdu_json()
|
||||
|
||||
return {
|
||||
"type": event.type,
|
||||
|
||||
@@ -183,8 +183,18 @@ class EventValidator:
|
||||
fields an event would have
|
||||
"""
|
||||
|
||||
create_event_as_room_id = (
|
||||
event.room_version.msc4291_room_ids_as_hashes
|
||||
and event.type == EventTypes.Create
|
||||
and hasattr(event, "state_key")
|
||||
and event.state_key == ""
|
||||
)
|
||||
|
||||
strings = ["room_id", "sender", "type"]
|
||||
|
||||
if create_event_as_room_id:
|
||||
strings.remove("room_id")
|
||||
|
||||
if hasattr(event, "state_key"):
|
||||
strings.append("state_key")
|
||||
|
||||
@@ -192,7 +202,14 @@ class EventValidator:
|
||||
if not isinstance(getattr(event, s), str):
|
||||
raise SynapseError(400, "Not '%s' a string type" % (s,))
|
||||
|
||||
RoomID.from_string(event.room_id)
|
||||
if not create_event_as_room_id:
|
||||
assert event.room_id is not None
|
||||
RoomID.from_string(event.room_id)
|
||||
if event.room_version.msc4291_room_ids_as_hashes and not RoomID.is_valid(
|
||||
event.room_id
|
||||
):
|
||||
raise SynapseError(400, f"Invalid room ID '{event.room_id}'")
|
||||
|
||||
UserID.from_string(event.sender)
|
||||
|
||||
if event.type == EventTypes.Message:
|
||||
|
||||
@@ -343,6 +343,21 @@ def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventB
|
||||
if room_version.strict_canonicaljson:
|
||||
validate_canonicaljson(pdu_json)
|
||||
|
||||
# enforce that MSC4291 auth events don't include the create event.
|
||||
# N.B. if they DO include a spurious create event, it'll fail auth checks elsewhere, so we don't
|
||||
# need to do expensive DB lookups to find which event ID is the create event here.
|
||||
if room_version.msc4291_room_ids_as_hashes:
|
||||
room_id = pdu_json.get("room_id")
|
||||
if room_id:
|
||||
create_event_id = "$" + room_id[1:]
|
||||
auth_events = pdu_json.get("auth_events")
|
||||
if auth_events:
|
||||
if create_event_id in auth_events:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"auth_events must not contain the create event",
|
||||
Codes.BAD_JSON,
|
||||
)
|
||||
event = make_event_from_dict(pdu_json, room_version)
|
||||
return event
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ from synapse.api.errors import (
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events.snapshot import EventPersistencePair
|
||||
from synapse.federation.federation_base import (
|
||||
FederationBase,
|
||||
InvalidEventSignatureError,
|
||||
@@ -914,7 +914,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
async def _on_send_membership_event(
|
||||
self, origin: str, content: JsonDict, membership_type: str, room_id: str
|
||||
) -> Tuple[EventBase, EventContext]:
|
||||
) -> EventPersistencePair:
|
||||
"""Handle an on_send_{join,leave,knock} request
|
||||
|
||||
Does some preliminary validation before passing the request on to the
|
||||
|
||||
@@ -37,6 +37,7 @@ Events are replicated via a separate events stream.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict,
|
||||
@@ -67,6 +68,25 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class QueueNames(str, Enum):
|
||||
PRESENCE_MAP = "presence_map"
|
||||
KEYED_EDU = "keyed_edu"
|
||||
KEYED_EDU_CHANGED = "keyed_edu_changed"
|
||||
EDUS = "edus"
|
||||
POS_TIME = "pos_time"
|
||||
PRESENCE_DESTINATIONS = "presence_destinations"
|
||||
|
||||
|
||||
queue_name_to_gauge_map: Dict[QueueNames, LaterGauge] = {}
|
||||
|
||||
for queue_name in QueueNames:
|
||||
queue_name_to_gauge_map[queue_name] = LaterGauge(
|
||||
name=f"synapse_federation_send_queue_{queue_name.value}_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
|
||||
class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
"""A drop in replacement for FederationSender"""
|
||||
|
||||
@@ -111,23 +131,16 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
# we make a new function, so we need to make a new function so the inner
|
||||
# lambda binds to the queue rather than to the name of the queue which
|
||||
# changes. ARGH.
|
||||
def register(name: str, queue: Sized) -> None:
|
||||
LaterGauge(
|
||||
name="synapse_federation_send_queue_%s_size" % (queue_name,),
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(queue)},
|
||||
def register(queue_name: QueueNames, queue: Sized) -> None:
|
||||
queue_name_to_gauge_map[queue_name].register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(queue)},
|
||||
)
|
||||
|
||||
for queue_name in [
|
||||
"presence_map",
|
||||
"keyed_edu",
|
||||
"keyed_edu_changed",
|
||||
"edus",
|
||||
"pos_time",
|
||||
"presence_destinations",
|
||||
]:
|
||||
register(queue_name, getattr(self, queue_name))
|
||||
for queue_name in QueueNames:
|
||||
queue = getattr(self, queue_name.value)
|
||||
assert isinstance(queue, Sized)
|
||||
register(queue_name, queue=queue)
|
||||
|
||||
self.clock.looping_call(self._clear_queue, 30 * 1000)
|
||||
|
||||
|
||||
@@ -199,6 +199,24 @@ sent_pdus_destination_dist_total = Counter(
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
transaction_queue_pending_destinations_gauge = LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_destinations",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
transaction_queue_pending_pdus_gauge = LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_pdus",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
transaction_queue_pending_edus_gauge = LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_edus",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
# Time (in s) to wait before trying to wake up destinations that have
|
||||
# catch-up outstanding.
|
||||
# Please note that rate limiting still applies, so while the loop is
|
||||
@@ -398,11 +416,9 @@ class FederationSender(AbstractFederationSender):
|
||||
# map from destination to PerDestinationQueue
|
||||
self._per_destination_queues: Dict[str, PerDestinationQueue] = {}
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_destinations",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
transaction_queue_pending_destinations_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
(self.server_name,): sum(
|
||||
1
|
||||
for d in self._per_destination_queues.values()
|
||||
@@ -410,22 +426,17 @@ class FederationSender(AbstractFederationSender):
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_pdus",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
transaction_queue_pending_pdus_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
(self.server_name,): sum(
|
||||
d.pending_pdu_count() for d in self._per_destination_queues.values()
|
||||
)
|
||||
},
|
||||
)
|
||||
LaterGauge(
|
||||
name="synapse_federation_transaction_queue_pending_edus",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
transaction_queue_pending_edus_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
(self.server_name,): sum(
|
||||
d.pending_edu_count() for d in self._per_destination_queues.values()
|
||||
)
|
||||
|
||||
@@ -215,9 +215,9 @@ class DelayedEventsHandler:
|
||||
"Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id
|
||||
)
|
||||
|
||||
event = await self._store.get_event(
|
||||
delta.event_id, check_room_id=delta.room_id
|
||||
)
|
||||
event = await self._store.get_event(delta.event_id, allow_none=True)
|
||||
if not event:
|
||||
continue
|
||||
sender = UserID.from_string(event.sender)
|
||||
|
||||
next_send_ts = await self._store.cancel_delayed_state_events(
|
||||
|
||||
@@ -23,6 +23,8 @@ from typing import TYPE_CHECKING, List, Mapping, Optional, Union
|
||||
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import (
|
||||
CREATOR_POWER_LEVEL,
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
JoinRules,
|
||||
Membership,
|
||||
@@ -141,6 +143,8 @@ class EventAuthHandler:
|
||||
Raises:
|
||||
SynapseError if no appropriate user is found.
|
||||
"""
|
||||
create_event_id = current_state_ids[(EventTypes.Create, "")]
|
||||
create_event = await self._store.get_event(create_event_id)
|
||||
power_level_event_id = current_state_ids.get((EventTypes.PowerLevels, ""))
|
||||
invite_level = 0
|
||||
users_default_level = 0
|
||||
@@ -156,15 +160,28 @@ class EventAuthHandler:
|
||||
|
||||
# Find the user with the highest power level (only interested in local
|
||||
# users).
|
||||
user_power_level = 0
|
||||
chosen_user = None
|
||||
local_users_in_room = await self._store.get_local_users_in_room(room_id)
|
||||
chosen_user = max(
|
||||
local_users_in_room,
|
||||
key=lambda user: users.get(user, users_default_level),
|
||||
default=None,
|
||||
)
|
||||
if create_event.room_version.msc4289_creator_power_enabled:
|
||||
creators = set(
|
||||
create_event.content.get(EventContentFields.ADDITIONAL_CREATORS, [])
|
||||
)
|
||||
creators.add(create_event.sender)
|
||||
local_creators = creators.intersection(set(local_users_in_room))
|
||||
if len(local_creators) > 0:
|
||||
chosen_user = local_creators.pop() # random creator
|
||||
user_power_level = CREATOR_POWER_LEVEL
|
||||
else:
|
||||
chosen_user = max(
|
||||
local_users_in_room,
|
||||
key=lambda user: users.get(user, users_default_level),
|
||||
default=None,
|
||||
)
|
||||
# Return the chosen if they can issue invites.
|
||||
if chosen_user:
|
||||
user_power_level = users.get(chosen_user, users_default_level)
|
||||
|
||||
# Return the chosen if they can issue invites.
|
||||
user_power_level = users.get(chosen_user, users_default_level)
|
||||
if chosen_user and user_power_level >= invite_level:
|
||||
logger.debug(
|
||||
"Found a user who can issue invites %s with power level %d >= invite level %d",
|
||||
|
||||
@@ -66,7 +66,11 @@ from synapse.event_auth import (
|
||||
validate_event_for_room_version,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
|
||||
from synapse.events.snapshot import (
|
||||
EventContext,
|
||||
EventPersistencePair,
|
||||
UnpersistedEventContextBase,
|
||||
)
|
||||
from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo
|
||||
from synapse.logging.context import nested_logging_context
|
||||
from synapse.logging.opentracing import (
|
||||
@@ -341,7 +345,7 @@ class FederationEventHandler:
|
||||
|
||||
async def on_send_membership_event(
|
||||
self, origin: str, event: EventBase
|
||||
) -> Tuple[EventBase, EventContext]:
|
||||
) -> EventPersistencePair:
|
||||
"""
|
||||
We have received a join/leave/knock event for a room via send_join/leave/knock.
|
||||
|
||||
@@ -1712,7 +1716,7 @@ class FederationEventHandler:
|
||||
)
|
||||
auth_map.update(persisted_events)
|
||||
|
||||
events_and_contexts_to_persist: List[Tuple[EventBase, EventContext]] = []
|
||||
events_and_contexts_to_persist: List[EventPersistencePair] = []
|
||||
|
||||
async def prep(event: EventBase) -> None:
|
||||
with nested_logging_context(suffix=event.event_id):
|
||||
@@ -1728,6 +1732,9 @@ class FederationEventHandler:
|
||||
event,
|
||||
auth_event_id,
|
||||
)
|
||||
# Drop the event from the auth_map too, else we may incorrectly persist
|
||||
# events which depend on this dropped event.
|
||||
auth_map.pop(event.event_id, None)
|
||||
return
|
||||
auth.append(ae)
|
||||
|
||||
@@ -2222,7 +2229,7 @@ class FederationEventHandler:
|
||||
async def persist_events_and_notify(
|
||||
self,
|
||||
room_id: str,
|
||||
event_and_contexts: Sequence[Tuple[EventBase, EventContext]],
|
||||
event_and_contexts: Sequence[EventPersistencePair],
|
||||
backfilled: bool = False,
|
||||
) -> int:
|
||||
"""Persists events and tells the notifier/pushers about them, if
|
||||
|
||||
@@ -57,6 +57,7 @@ from synapse.events import EventBase, relation_from_event
|
||||
from synapse.events.builder import EventBuilder
|
||||
from synapse.events.snapshot import (
|
||||
EventContext,
|
||||
EventPersistencePair,
|
||||
UnpersistedEventContext,
|
||||
UnpersistedEventContextBase,
|
||||
)
|
||||
@@ -688,7 +689,10 @@ class EventCreationHandler:
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
|
||||
is_create_event = (
|
||||
event_dict["type"] == EventTypes.Create and event_dict["state_key"] == ""
|
||||
)
|
||||
if is_create_event:
|
||||
room_version_id = event_dict["content"]["room_version"]
|
||||
maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
||||
if not maybe_room_version_obj:
|
||||
@@ -794,6 +798,7 @@ class EventCreationHandler:
|
||||
"""
|
||||
# the only thing the user can do is join the server notices room.
|
||||
if builder.type == EventTypes.Member:
|
||||
assert builder.room_id is not None
|
||||
membership = builder.content.get("membership", None)
|
||||
if membership == Membership.JOIN:
|
||||
return await self.store.is_server_notice_room(builder.room_id)
|
||||
@@ -1259,13 +1264,40 @@ class EventCreationHandler:
|
||||
for_verification=False,
|
||||
)
|
||||
|
||||
if (
|
||||
builder.room_version.msc4291_room_ids_as_hashes
|
||||
and builder.type == EventTypes.Create
|
||||
and builder.is_state()
|
||||
):
|
||||
if builder.room_id is not None:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Cannot resend m.room.create event",
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
else:
|
||||
assert builder.room_id is not None
|
||||
|
||||
if prev_event_ids is not None:
|
||||
assert len(prev_event_ids) <= 10, (
|
||||
"Attempting to create an event with %i prev_events"
|
||||
% (len(prev_event_ids),)
|
||||
)
|
||||
else:
|
||||
prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
|
||||
if builder.room_id:
|
||||
prev_event_ids = await self.store.get_prev_events_for_room(
|
||||
builder.room_id
|
||||
)
|
||||
else:
|
||||
prev_event_ids = [] # can only happen for the create event in MSC4291 rooms
|
||||
|
||||
if builder.type == EventTypes.Create and builder.is_state():
|
||||
if len(prev_event_ids) != 0:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Cannot resend m.room.create event",
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
# We now ought to have some `prev_events` (unless it's a create event).
|
||||
#
|
||||
@@ -1408,7 +1440,7 @@ class EventCreationHandler:
|
||||
async def handle_new_client_event(
|
||||
self,
|
||||
requester: Requester,
|
||||
events_and_context: List[Tuple[EventBase, EventContext]],
|
||||
events_and_context: List[EventPersistencePair],
|
||||
ratelimit: bool = True,
|
||||
extra_users: Optional[List[UserID]] = None,
|
||||
ignore_shadow_ban: bool = False,
|
||||
@@ -1529,7 +1561,7 @@ class EventCreationHandler:
|
||||
self,
|
||||
requester: Requester,
|
||||
room_id: str,
|
||||
prev_event_id: str,
|
||||
prev_event_id: Optional[str],
|
||||
event_dicts: Sequence[JsonDict],
|
||||
ratelimit: bool = True,
|
||||
ignore_shadow_ban: bool = False,
|
||||
@@ -1560,6 +1592,12 @@ class EventCreationHandler:
|
||||
# Nothing to do.
|
||||
return
|
||||
|
||||
if prev_event_id is None:
|
||||
# Pick the latest forward extremity as the previous event ID.
|
||||
prev_event_ids = await self.store.get_forward_extremities_for_room(room_id)
|
||||
prev_event_ids.sort(key=lambda x: x[2]) # Sort by depth.
|
||||
prev_event_id = prev_event_ids[-1][0]
|
||||
|
||||
state_groups = await self._storage_controllers.state.get_state_group_for_events(
|
||||
[prev_event_id]
|
||||
)
|
||||
@@ -1614,7 +1652,7 @@ class EventCreationHandler:
|
||||
async def _persist_events(
|
||||
self,
|
||||
requester: Requester,
|
||||
events_and_context: List[Tuple[EventBase, EventContext]],
|
||||
events_and_context: List[EventPersistencePair],
|
||||
ratelimit: bool = True,
|
||||
extra_users: Optional[List[UserID]] = None,
|
||||
) -> EventBase:
|
||||
@@ -1700,7 +1738,7 @@ class EventCreationHandler:
|
||||
raise
|
||||
|
||||
async def cache_joined_hosts_for_events(
|
||||
self, events_and_context: List[Tuple[EventBase, EventContext]]
|
||||
self, events_and_context: List[EventPersistencePair]
|
||||
) -> None:
|
||||
"""Precalculate the joined hosts at each of the given events, when using Redis, so that
|
||||
external federation senders don't have to recalculate it themselves.
|
||||
@@ -1806,7 +1844,7 @@ class EventCreationHandler:
|
||||
async def persist_and_notify_client_events(
|
||||
self,
|
||||
requester: Requester,
|
||||
events_and_context: List[Tuple[EventBase, EventContext]],
|
||||
events_and_context: List[EventPersistencePair],
|
||||
ratelimit: bool = True,
|
||||
extra_users: Optional[List[UserID]] = None,
|
||||
) -> EventBase:
|
||||
@@ -2228,6 +2266,7 @@ class EventCreationHandler:
|
||||
original_event.room_version, third_party_result
|
||||
)
|
||||
self.validator.validate_builder(builder)
|
||||
assert builder.room_id is not None
|
||||
except SynapseError as e:
|
||||
raise Exception(
|
||||
"Third party rules module created an invalid event: " + e.msg,
|
||||
|
||||
@@ -173,6 +173,18 @@ state_transition_counter = Counter(
|
||||
labelnames=["locality", "from", "to", SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
presence_user_to_current_state_size_gauge = LaterGauge(
|
||||
name="synapse_handlers_presence_user_to_current_state_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
presence_wheel_timer_size_gauge = LaterGauge(
|
||||
name="synapse_handlers_presence_wheel_timer_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
|
||||
# "currently_active"
|
||||
LAST_ACTIVE_GRANULARITY = 60 * 1000
|
||||
@@ -779,11 +791,9 @@ class PresenceHandler(BasePresenceHandler):
|
||||
EduTypes.PRESENCE, self.incoming_presence
|
||||
)
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_handlers_presence_user_to_current_state_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(self.user_to_current_state)},
|
||||
presence_user_to_current_state_size_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(self.user_to_current_state)},
|
||||
)
|
||||
|
||||
# The per-device presence state, maps user to devices to per-device presence state.
|
||||
@@ -882,11 +892,9 @@ class PresenceHandler(BasePresenceHandler):
|
||||
60 * 1000,
|
||||
)
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_handlers_presence_wheel_timer_size",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(self.wheel_timer)},
|
||||
presence_wheel_timer_size_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(self.wheel_timer)},
|
||||
)
|
||||
|
||||
# Used to handle sending of presence to newly joined users/servers
|
||||
|
||||
@@ -82,6 +82,7 @@ from synapse.types import (
|
||||
Requester,
|
||||
RoomAlias,
|
||||
RoomID,
|
||||
RoomIdWithDomain,
|
||||
RoomStreamToken,
|
||||
StateMap,
|
||||
StrCollection,
|
||||
@@ -93,6 +94,7 @@ from synapse.types import (
|
||||
from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import concurrently_execute
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.util.iterutils import batch_iter
|
||||
from synapse.util.stringutils import parse_and_validate_server_name
|
||||
@@ -195,7 +197,13 @@ class RoomCreationHandler:
|
||||
)
|
||||
|
||||
async def upgrade_room(
|
||||
self, requester: Requester, old_room_id: str, new_version: RoomVersion
|
||||
self,
|
||||
requester: Requester,
|
||||
old_room_id: str,
|
||||
new_version: RoomVersion,
|
||||
additional_creators: Optional[List[str]],
|
||||
auto_member: bool = False,
|
||||
ratelimit: bool = True,
|
||||
) -> str:
|
||||
"""Replace a room with a new room with a different version
|
||||
|
||||
@@ -203,6 +211,9 @@ class RoomCreationHandler:
|
||||
requester: the user requesting the upgrade
|
||||
old_room_id: the id of the room to be replaced
|
||||
new_version: the new room version to use
|
||||
additional_creators: additional room creators, for MSC4289.
|
||||
auto_member: Whether to automatically join local users to the new
|
||||
room and send out invites to remote users.
|
||||
|
||||
Returns:
|
||||
the new room id
|
||||
@@ -210,11 +221,12 @@ class RoomCreationHandler:
|
||||
Raises:
|
||||
ShadowBanError if the requester is shadow-banned.
|
||||
"""
|
||||
await self.creation_ratelimiter.ratelimit(requester, update=False)
|
||||
if ratelimit:
|
||||
await self.creation_ratelimiter.ratelimit(requester, update=False)
|
||||
|
||||
# then apply the ratelimits
|
||||
await self.common_request_ratelimiter.ratelimit(requester)
|
||||
await self.creation_ratelimiter.ratelimit(requester)
|
||||
# then apply the ratelimits
|
||||
await self.common_request_ratelimiter.ratelimit(requester)
|
||||
await self.creation_ratelimiter.ratelimit(requester)
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
@@ -235,8 +247,29 @@ class RoomCreationHandler:
|
||||
old_room = await self.store.get_room(old_room_id)
|
||||
if old_room is None:
|
||||
raise NotFoundError("Unknown room id %s" % (old_room_id,))
|
||||
old_room_is_public, _ = old_room
|
||||
|
||||
new_room_id = self._generate_room_id()
|
||||
creation_event_with_context = None
|
||||
if new_version.msc4291_room_ids_as_hashes:
|
||||
old_room_create_event = await self.store.get_create_event_for_room(
|
||||
old_room_id
|
||||
)
|
||||
creation_content = self._calculate_upgraded_room_creation_content(
|
||||
old_room_create_event,
|
||||
tombstone_event_id=None,
|
||||
new_room_version=new_version,
|
||||
additional_creators=additional_creators,
|
||||
)
|
||||
creation_event_with_context = await self._generate_create_event_for_room_id(
|
||||
requester,
|
||||
creation_content,
|
||||
old_room_is_public,
|
||||
new_version,
|
||||
)
|
||||
(create_event, _) = creation_event_with_context
|
||||
new_room_id = create_event.room_id
|
||||
else:
|
||||
new_room_id = self._generate_room_id()
|
||||
|
||||
# Try several times, it could fail with PartialStateConflictError
|
||||
# in _upgrade_room, cf comment in except block.
|
||||
@@ -285,6 +318,9 @@ class RoomCreationHandler:
|
||||
new_version,
|
||||
tombstone_event,
|
||||
tombstone_context,
|
||||
additional_creators,
|
||||
creation_event_with_context,
|
||||
auto_member=auto_member,
|
||||
)
|
||||
|
||||
return ret
|
||||
@@ -308,6 +344,11 @@ class RoomCreationHandler:
|
||||
new_version: RoomVersion,
|
||||
tombstone_event: EventBase,
|
||||
tombstone_context: synapse.events.snapshot.EventContext,
|
||||
additional_creators: Optional[List[str]],
|
||||
creation_event_with_context: Optional[
|
||||
Tuple[EventBase, synapse.events.snapshot.EventContext]
|
||||
] = None,
|
||||
auto_member: bool = False,
|
||||
) -> str:
|
||||
"""
|
||||
Args:
|
||||
@@ -319,6 +360,10 @@ class RoomCreationHandler:
|
||||
new_version: the version to upgrade the room to
|
||||
tombstone_event: the tombstone event to send to the old room
|
||||
tombstone_context: the context for the tombstone event
|
||||
additional_creators: additional room creators, for MSC4289.
|
||||
creation_event_with_context: The new room's create event, for room IDs as create event IDs.
|
||||
auto_member: Whether to automatically join local users to the new
|
||||
room and send out invites to remote users.
|
||||
|
||||
Raises:
|
||||
ShadowBanError if the requester is shadow-banned.
|
||||
@@ -328,14 +373,16 @@ class RoomCreationHandler:
|
||||
|
||||
logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
|
||||
|
||||
# create the new room. may raise a `StoreError` in the exceedingly unlikely
|
||||
# event of a room ID collision.
|
||||
await self.store.store_room(
|
||||
room_id=new_room_id,
|
||||
room_creator_user_id=user_id,
|
||||
is_public=old_room[0],
|
||||
room_version=new_version,
|
||||
)
|
||||
# We've already stored the room if we have the create event
|
||||
if not creation_event_with_context:
|
||||
# create the new room. may raise a `StoreError` in the exceedingly unlikely
|
||||
# event of a room ID collision.
|
||||
await self.store.store_room(
|
||||
room_id=new_room_id,
|
||||
room_creator_user_id=user_id,
|
||||
is_public=old_room[0],
|
||||
room_version=new_version,
|
||||
)
|
||||
|
||||
await self.clone_existing_room(
|
||||
requester,
|
||||
@@ -343,6 +390,9 @@ class RoomCreationHandler:
|
||||
new_room_id=new_room_id,
|
||||
new_room_version=new_version,
|
||||
tombstone_event_id=tombstone_event.event_id,
|
||||
additional_creators=additional_creators,
|
||||
creation_event_with_context=creation_event_with_context,
|
||||
auto_member=auto_member,
|
||||
)
|
||||
|
||||
# now send the tombstone
|
||||
@@ -376,6 +426,7 @@ class RoomCreationHandler:
|
||||
old_room_id,
|
||||
new_room_id,
|
||||
old_room_state,
|
||||
additional_creators,
|
||||
)
|
||||
|
||||
return new_room_id
|
||||
@@ -386,6 +437,7 @@ class RoomCreationHandler:
|
||||
old_room_id: str,
|
||||
new_room_id: str,
|
||||
old_room_state: StateMap[str],
|
||||
additional_creators: Optional[List[str]],
|
||||
) -> None:
|
||||
"""Send updated power levels in both rooms after an upgrade
|
||||
|
||||
@@ -394,7 +446,7 @@ class RoomCreationHandler:
|
||||
old_room_id: the id of the room to be replaced
|
||||
new_room_id: the id of the replacement room
|
||||
old_room_state: the state map for the old room
|
||||
|
||||
additional_creators: Additional creators in the new room.
|
||||
Raises:
|
||||
ShadowBanError if the requester is shadow-banned.
|
||||
"""
|
||||
@@ -450,6 +502,14 @@ class RoomCreationHandler:
|
||||
except AuthError as e:
|
||||
logger.warning("Unable to update PLs in old room: %s", e)
|
||||
|
||||
new_room_version = await self.store.get_room_version(new_room_id)
|
||||
if new_room_version.msc4289_creator_power_enabled:
|
||||
self._remove_creators_from_pl_users_map(
|
||||
old_room_pl_state.content.get("users", {}),
|
||||
requester.user.to_string(),
|
||||
additional_creators,
|
||||
)
|
||||
|
||||
await self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
@@ -464,6 +524,36 @@ class RoomCreationHandler:
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
def _calculate_upgraded_room_creation_content(
|
||||
self,
|
||||
old_room_create_event: EventBase,
|
||||
tombstone_event_id: Optional[str],
|
||||
new_room_version: RoomVersion,
|
||||
additional_creators: Optional[List[str]],
|
||||
) -> JsonDict:
|
||||
creation_content: JsonDict = {
|
||||
"room_version": new_room_version.identifier,
|
||||
"predecessor": {
|
||||
"room_id": old_room_create_event.room_id,
|
||||
},
|
||||
}
|
||||
if tombstone_event_id is not None:
|
||||
creation_content["predecessor"]["event_id"] = tombstone_event_id
|
||||
if (
|
||||
additional_creators is not None
|
||||
and new_room_version.msc4289_creator_power_enabled
|
||||
):
|
||||
creation_content["additional_creators"] = additional_creators
|
||||
# Check if old room was non-federatable
|
||||
if not old_room_create_event.content.get(EventContentFields.FEDERATE, True):
|
||||
# If so, mark the new room as non-federatable as well
|
||||
creation_content[EventContentFields.FEDERATE] = False
|
||||
# Copy the room type as per MSC3818.
|
||||
room_type = old_room_create_event.content.get(EventContentFields.ROOM_TYPE)
|
||||
if room_type is not None:
|
||||
creation_content[EventContentFields.ROOM_TYPE] = room_type
|
||||
return creation_content
|
||||
|
||||
async def clone_existing_room(
|
||||
self,
|
||||
requester: Requester,
|
||||
@@ -471,6 +561,11 @@ class RoomCreationHandler:
|
||||
new_room_id: str,
|
||||
new_room_version: RoomVersion,
|
||||
tombstone_event_id: str,
|
||||
additional_creators: Optional[List[str]],
|
||||
creation_event_with_context: Optional[
|
||||
Tuple[EventBase, synapse.events.snapshot.EventContext]
|
||||
] = None,
|
||||
auto_member: bool = False,
|
||||
) -> None:
|
||||
"""Populate a new room based on an old room
|
||||
|
||||
@@ -481,24 +576,27 @@ class RoomCreationHandler:
|
||||
created with _generate_room_id())
|
||||
new_room_version: the new room version to use
|
||||
tombstone_event_id: the ID of the tombstone event in the old room.
|
||||
additional_creators: additional room creators, for MSC4289.
|
||||
creation_event_with_context: The create event of the new room, if the new room supports
|
||||
room ID as create event ID hash.
|
||||
auto_member: Whether to automatically join local users to the new
|
||||
room and send out invites to remote users.
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
creation_content: JsonDict = {
|
||||
"room_version": new_room_version.identifier,
|
||||
"predecessor": {"room_id": old_room_id, "event_id": tombstone_event_id},
|
||||
}
|
||||
|
||||
# Check if old room was non-federatable
|
||||
|
||||
# Get old room's create event
|
||||
old_room_create_event = await self.store.get_create_event_for_room(old_room_id)
|
||||
|
||||
# Check if the create event specified a non-federatable room
|
||||
if not old_room_create_event.content.get(EventContentFields.FEDERATE, True):
|
||||
# If so, mark the new room as non-federatable as well
|
||||
creation_content[EventContentFields.FEDERATE] = False
|
||||
|
||||
if creation_event_with_context:
|
||||
create_event, _ = creation_event_with_context
|
||||
creation_content = create_event.content
|
||||
else:
|
||||
creation_content = self._calculate_upgraded_room_creation_content(
|
||||
old_room_create_event,
|
||||
tombstone_event_id,
|
||||
new_room_version,
|
||||
additional_creators=additional_creators,
|
||||
)
|
||||
initial_state = {}
|
||||
|
||||
# Replicate relevant room events
|
||||
@@ -514,11 +612,8 @@ class RoomCreationHandler:
|
||||
(EventTypes.PowerLevels, ""),
|
||||
]
|
||||
|
||||
# Copy the room type as per MSC3818.
|
||||
room_type = old_room_create_event.content.get(EventContentFields.ROOM_TYPE)
|
||||
if room_type is not None:
|
||||
creation_content[EventContentFields.ROOM_TYPE] = room_type
|
||||
|
||||
# If the old room was a space, copy over the rooms in the space.
|
||||
if room_type == RoomTypes.SPACE:
|
||||
types_to_copy.append((EventTypes.SpaceChild, None))
|
||||
@@ -590,6 +685,14 @@ class RoomCreationHandler:
|
||||
if current_power_level_int < needed_power_level:
|
||||
user_power_levels[user_id] = needed_power_level
|
||||
|
||||
if new_room_version.msc4289_creator_power_enabled:
|
||||
# the creator(s) cannot be in the users map
|
||||
self._remove_creators_from_pl_users_map(
|
||||
user_power_levels,
|
||||
user_id,
|
||||
additional_creators,
|
||||
)
|
||||
|
||||
# We construct what the body of a call to /createRoom would look like for passing
|
||||
# to the spam checker. We don't include a preset here, as we expect the
|
||||
# initial state to contain everything we need.
|
||||
@@ -618,6 +721,7 @@ class RoomCreationHandler:
|
||||
invite_list=[],
|
||||
initial_state=initial_state,
|
||||
creation_content=creation_content,
|
||||
creation_event_with_context=creation_event_with_context,
|
||||
)
|
||||
|
||||
# Transfer membership events
|
||||
@@ -631,7 +735,7 @@ class RoomCreationHandler:
|
||||
# `update_membership`, however in this case its fine to bypass as
|
||||
# these bans don't need any special treatment, i.e. the sender is in
|
||||
# the room and they don't need any extra signatures, etc.
|
||||
for batched_events in batch_iter(ban_events, 1000):
|
||||
for batched_ban_events in batch_iter(ban_events, 1000):
|
||||
await self.event_creation_handler.create_and_send_new_client_events(
|
||||
requester=requester,
|
||||
room_id=new_room_id,
|
||||
@@ -644,13 +748,201 @@ class RoomCreationHandler:
|
||||
"sender": requester.user.to_string(),
|
||||
"content": ban_event.content,
|
||||
}
|
||||
for ban_event in batched_events
|
||||
for ban_event in batched_ban_events
|
||||
],
|
||||
ratelimit=False,
|
||||
ratelimit=False, # We ratelimit the entire upgrade, not individual events.
|
||||
)
|
||||
|
||||
# XXX invites/joins
|
||||
# XXX 3pid invites
|
||||
if auto_member:
|
||||
logger.info("Joining local users to %s", new_room_id)
|
||||
|
||||
# 1. Copy over all joins for local
|
||||
joined_profiles = await self.store.get_users_in_room_with_profiles(
|
||||
old_room_id
|
||||
)
|
||||
|
||||
local_user_ids = [
|
||||
user_id for user_id in joined_profiles if self.hs.is_mine_id(user_id)
|
||||
]
|
||||
|
||||
logger.info("Local user IDs %s", local_user_ids)
|
||||
|
||||
for batched_local_user_ids in batch_iter(local_user_ids, 1000):
|
||||
invites_to_send = []
|
||||
|
||||
# For each local user we create an invite event (from the
|
||||
# upgrading user) plus a join event.
|
||||
for local_user_id in batched_local_user_ids:
|
||||
if local_user_id == user_id:
|
||||
# Ignore the upgrading user, as they are already in the
|
||||
# new room.
|
||||
continue
|
||||
|
||||
invites_to_send.append(
|
||||
{
|
||||
"type": EventTypes.Member,
|
||||
"state_key": local_user_id,
|
||||
"room_id": new_room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"content": {
|
||||
"membership": Membership.INVITE,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
# If the user has profile information in the previous join,
|
||||
# add it to the content.
|
||||
#
|
||||
# We could instead copy over the contents from the old join
|
||||
# event, however a) that would require us to fetch all the
|
||||
# old join events (which is slow), and b) generally the join
|
||||
# events have no extra information in them. (We also believe
|
||||
# that most clients don't copy this information over either,
|
||||
# but we could be wrong.)
|
||||
content_profile = {}
|
||||
user_profile = joined_profiles[local_user_id]
|
||||
if user_profile.display_name:
|
||||
content_profile["displayname"] = user_profile.display_name
|
||||
if user_profile.avatar_url:
|
||||
content_profile["avatar_url"] = user_profile.avatar_url
|
||||
|
||||
invites_to_send.append(
|
||||
{
|
||||
"type": EventTypes.Member,
|
||||
"state_key": local_user_id,
|
||||
"room_id": new_room_id,
|
||||
"sender": local_user_id,
|
||||
"content": {
|
||||
"membership": Membership.JOIN,
|
||||
**content_profile,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
await self.event_creation_handler.create_and_send_new_client_events(
|
||||
requester=requester,
|
||||
room_id=new_room_id,
|
||||
prev_event_id=None,
|
||||
event_dicts=invites_to_send,
|
||||
ratelimit=False, # We ratelimit the entire upgrade, not individual events.
|
||||
)
|
||||
|
||||
# Invite other users if the room is not public. If the room *is*
|
||||
# public then users can simply directly join, and inviting them as
|
||||
# well may lead to confusion.
|
||||
|
||||
join_rule_content = initial_state.get((EventTypes.JoinRules, ""), None)
|
||||
is_public = False
|
||||
if join_rule_content:
|
||||
is_public = join_rule_content["join_rule"] == JoinRules.PUBLIC
|
||||
|
||||
if not is_public:
|
||||
# Copy invites
|
||||
# TODO: Copy over 3pid invites as well.
|
||||
invited_users = await self.store.get_invited_users_in_room(
|
||||
room_id=old_room_id
|
||||
)
|
||||
|
||||
# For local users we can just batch send the invites.
|
||||
local_invited_users = [
|
||||
user_id for user_id in invited_users if self.hs.is_mine_id(user_id)
|
||||
]
|
||||
|
||||
logger.info(
|
||||
"Joining local user IDs %s to new room %s",
|
||||
local_invited_users,
|
||||
new_room_id,
|
||||
)
|
||||
|
||||
for batched_local_invited_users in batch_iter(
|
||||
local_invited_users, 1000
|
||||
):
|
||||
invites_to_send = []
|
||||
leaves_to_send = []
|
||||
|
||||
# For each local user we create an invite event (from the
|
||||
# upgrading user), and reject the invite event in the old
|
||||
# room.
|
||||
#
|
||||
# This ensures that the user ends up with a single invite to
|
||||
# the new room (rather than multiple invites which may be
|
||||
# noisy and confusing).
|
||||
for local_user_id in batched_local_invited_users:
|
||||
leaves_to_send.append(
|
||||
{
|
||||
"type": EventTypes.Member,
|
||||
"state_key": local_user_id,
|
||||
"room_id": old_room_id,
|
||||
"sender": local_user_id,
|
||||
"content": {
|
||||
"membership": Membership.LEAVE,
|
||||
},
|
||||
}
|
||||
)
|
||||
invites_to_send.append(
|
||||
{
|
||||
"type": EventTypes.Member,
|
||||
"state_key": local_user_id,
|
||||
"room_id": new_room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"content": {
|
||||
"membership": Membership.INVITE,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
await self.event_creation_handler.create_and_send_new_client_events(
|
||||
requester=requester,
|
||||
room_id=old_room_id,
|
||||
prev_event_id=None,
|
||||
event_dicts=leaves_to_send,
|
||||
ratelimit=False, # We ratelimit the entire upgrade, not individual events.
|
||||
)
|
||||
await self.event_creation_handler.create_and_send_new_client_events(
|
||||
requester=requester,
|
||||
room_id=new_room_id,
|
||||
prev_event_id=None,
|
||||
event_dicts=invites_to_send,
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
# For remote users we send invites one by one, as we need to
|
||||
# send each one to the remote server.
|
||||
#
|
||||
# We also invite joined remote users who were in the old room.
|
||||
remote_user_ids = [
|
||||
user_id
|
||||
for user_id in itertools.chain(invited_users, joined_profiles)
|
||||
if not self.hs.is_mine_id(user_id)
|
||||
]
|
||||
|
||||
logger.debug("Inviting remote user IDs %s", remote_user_ids)
|
||||
|
||||
async def remote_invite(remote_user: str) -> None:
|
||||
try:
|
||||
await self.room_member_handler.update_membership(
|
||||
requester,
|
||||
UserID.from_string(remote_user),
|
||||
new_room_id,
|
||||
Membership.INVITE,
|
||||
ratelimit=False, # We ratelimit the entire upgrade, not individual events.
|
||||
)
|
||||
except SynapseError as e:
|
||||
# If we fail to invite a remote user, we log it but continue
|
||||
# on with the upgrade.
|
||||
logger.warning(
|
||||
"Failed to invite remote user %s to new room %s: %s",
|
||||
remote_user,
|
||||
new_room_id,
|
||||
e,
|
||||
)
|
||||
|
||||
# We do this concurrently, as it can take a while to invite
|
||||
await concurrently_execute(
|
||||
remote_invite,
|
||||
remote_user_ids,
|
||||
10,
|
||||
)
|
||||
|
||||
async def _move_aliases_to_new_room(
|
||||
self,
|
||||
@@ -904,6 +1196,7 @@ class RoomCreationHandler:
|
||||
power_level_content_override = config.get("power_level_content_override")
|
||||
if (
|
||||
power_level_content_override
|
||||
and not room_version.msc4289_creator_power_enabled # this validation doesn't apply in MSC4289 rooms
|
||||
and "users" in power_level_content_override
|
||||
and user_id not in power_level_content_override["users"]
|
||||
):
|
||||
@@ -933,11 +1226,41 @@ class RoomCreationHandler:
|
||||
additional_fields=spam_check[1],
|
||||
)
|
||||
|
||||
room_id = await self._generate_and_create_room_id(
|
||||
creator_id=user_id,
|
||||
is_public=is_public,
|
||||
room_version=room_version,
|
||||
)
|
||||
creation_content = config.get("creation_content", {})
|
||||
# override any attempt to set room versions via the creation_content
|
||||
creation_content["room_version"] = room_version.identifier
|
||||
|
||||
# trusted private chats have the invited users marked as additional creators
|
||||
if (
|
||||
room_version.msc4289_creator_power_enabled
|
||||
and config.get("preset", None) == RoomCreationPreset.TRUSTED_PRIVATE_CHAT
|
||||
and len(config.get("invite", [])) > 0
|
||||
):
|
||||
# the other user(s) are additional creators
|
||||
invitees = config.get("invite", [])
|
||||
# we don't want to replace any additional_creators additionally specified, and we want
|
||||
# to remove duplicates.
|
||||
creation_content[EventContentFields.ADDITIONAL_CREATORS] = list(
|
||||
set(creation_content.get(EventContentFields.ADDITIONAL_CREATORS, []))
|
||||
| set(invitees)
|
||||
)
|
||||
|
||||
creation_event_with_context = None
|
||||
if room_version.msc4291_room_ids_as_hashes:
|
||||
creation_event_with_context = await self._generate_create_event_for_room_id(
|
||||
requester,
|
||||
creation_content,
|
||||
is_public,
|
||||
room_version,
|
||||
)
|
||||
(create_event, _) = creation_event_with_context
|
||||
room_id = create_event.room_id
|
||||
else:
|
||||
room_id = await self._generate_and_create_room_id(
|
||||
creator_id=user_id,
|
||||
is_public=is_public,
|
||||
room_version=room_version,
|
||||
)
|
||||
|
||||
# Check whether this visibility value is blocked by a third party module
|
||||
allowed_by_third_party_rules = await (
|
||||
@@ -974,11 +1297,6 @@ class RoomCreationHandler:
|
||||
for val in raw_initial_state:
|
||||
initial_state[(val["type"], val.get("state_key", ""))] = val["content"]
|
||||
|
||||
creation_content = config.get("creation_content", {})
|
||||
|
||||
# override any attempt to set room versions via the creation_content
|
||||
creation_content["room_version"] = room_version.identifier
|
||||
|
||||
(
|
||||
last_stream_id,
|
||||
last_sent_event_id,
|
||||
@@ -995,6 +1313,7 @@ class RoomCreationHandler:
|
||||
power_level_content_override=power_level_content_override,
|
||||
creator_join_profile=creator_join_profile,
|
||||
ignore_forced_encryption=ignore_forced_encryption,
|
||||
creation_event_with_context=creation_event_with_context,
|
||||
)
|
||||
|
||||
# we avoid dropping the lock between invites, as otherwise joins can
|
||||
@@ -1060,6 +1379,38 @@ class RoomCreationHandler:
|
||||
|
||||
return room_id, room_alias, last_stream_id
|
||||
|
||||
async def _generate_create_event_for_room_id(
|
||||
self,
|
||||
creator: Requester,
|
||||
creation_content: JsonDict,
|
||||
is_public: bool,
|
||||
room_version: RoomVersion,
|
||||
) -> Tuple[EventBase, synapse.events.snapshot.EventContext]:
|
||||
(
|
||||
creation_event,
|
||||
new_unpersisted_context,
|
||||
) = await self.event_creation_handler.create_event(
|
||||
creator,
|
||||
{
|
||||
"content": creation_content,
|
||||
"sender": creator.user.to_string(),
|
||||
"type": EventTypes.Create,
|
||||
"state_key": "",
|
||||
},
|
||||
prev_event_ids=[],
|
||||
depth=1,
|
||||
state_map={},
|
||||
for_batch=False,
|
||||
)
|
||||
await self.store.store_room(
|
||||
room_id=creation_event.room_id,
|
||||
room_creator_user_id=creator.user.to_string(),
|
||||
is_public=is_public,
|
||||
room_version=room_version,
|
||||
)
|
||||
creation_context = await new_unpersisted_context.persist(creation_event)
|
||||
return (creation_event, creation_context)
|
||||
|
||||
async def _send_events_for_new_room(
|
||||
self,
|
||||
creator: Requester,
|
||||
@@ -1073,6 +1424,9 @@ class RoomCreationHandler:
|
||||
power_level_content_override: Optional[JsonDict] = None,
|
||||
creator_join_profile: Optional[JsonDict] = None,
|
||||
ignore_forced_encryption: bool = False,
|
||||
creation_event_with_context: Optional[
|
||||
Tuple[EventBase, synapse.events.snapshot.EventContext]
|
||||
] = None,
|
||||
) -> Tuple[int, str, int]:
|
||||
"""Sends the initial events into a new room. Sends the room creation, membership,
|
||||
and power level events into the room sequentially, then creates and batches up the
|
||||
@@ -1109,7 +1463,10 @@ class RoomCreationHandler:
|
||||
user in this room.
|
||||
ignore_forced_encryption:
|
||||
Ignore encryption forced by `encryption_enabled_by_default_for_room_type` setting.
|
||||
|
||||
creation_event_with_context:
|
||||
Set in MSC4291 rooms where the create event determines the room ID. If provided,
|
||||
does not create an additional create event but instead appends the remaining new
|
||||
events onto the provided create event.
|
||||
Returns:
|
||||
A tuple containing the stream ID, event ID and depth of the last
|
||||
event sent to the room.
|
||||
@@ -1174,13 +1531,26 @@ class RoomCreationHandler:
|
||||
|
||||
preset_config, config = self._room_preset_config(room_config)
|
||||
|
||||
# MSC2175 removes the creator field from the create event.
|
||||
if not room_version.implicit_room_creator:
|
||||
creation_content["creator"] = creator_id
|
||||
creation_event, unpersisted_creation_context = await create_event(
|
||||
EventTypes.Create, creation_content, False
|
||||
)
|
||||
creation_context = await unpersisted_creation_context.persist(creation_event)
|
||||
if creation_event_with_context is None:
|
||||
# MSC2175 removes the creator field from the create event.
|
||||
if not room_version.implicit_room_creator:
|
||||
creation_content["creator"] = creator_id
|
||||
creation_event, unpersisted_creation_context = await create_event(
|
||||
EventTypes.Create, creation_content, False
|
||||
)
|
||||
creation_context = await unpersisted_creation_context.persist(
|
||||
creation_event
|
||||
)
|
||||
else:
|
||||
(creation_event, creation_context) = creation_event_with_context
|
||||
# we had to do the above already in order to have a room ID, so just updates local vars
|
||||
# and continue.
|
||||
depth = 2
|
||||
prev_event = [creation_event.event_id]
|
||||
state_map[(creation_event.type, creation_event.state_key)] = (
|
||||
creation_event.event_id
|
||||
)
|
||||
|
||||
logger.debug("Sending %s in new room", EventTypes.Member)
|
||||
ev = await self.event_creation_handler.handle_new_client_event(
|
||||
requester=creator,
|
||||
@@ -1229,7 +1599,9 @@ class RoomCreationHandler:
|
||||
# Please update the docs for `default_power_level_content_override` when
|
||||
# updating the `events` dict below
|
||||
power_level_content: JsonDict = {
|
||||
"users": {creator_id: 100},
|
||||
"users": {creator_id: 100}
|
||||
if not room_version.msc4289_creator_power_enabled
|
||||
else {},
|
||||
"users_default": 0,
|
||||
"events": {
|
||||
EventTypes.Name: 50,
|
||||
@@ -1237,7 +1609,9 @@ class RoomCreationHandler:
|
||||
EventTypes.RoomHistoryVisibility: 100,
|
||||
EventTypes.CanonicalAlias: 50,
|
||||
EventTypes.RoomAvatar: 50,
|
||||
EventTypes.Tombstone: 100,
|
||||
EventTypes.Tombstone: 150
|
||||
if room_version.msc4289_creator_power_enabled
|
||||
else 100,
|
||||
EventTypes.ServerACL: 100,
|
||||
EventTypes.RoomEncryption: 100,
|
||||
},
|
||||
@@ -1250,7 +1624,13 @@ class RoomCreationHandler:
|
||||
"historical": 100,
|
||||
}
|
||||
|
||||
if config["original_invitees_have_ops"]:
|
||||
# original_invitees_have_ops is set on preset:trusted_private_chat which will already
|
||||
# have set these users as additional_creators, hence don't set the PL for creators as
|
||||
# that is invalid.
|
||||
if (
|
||||
config["original_invitees_have_ops"]
|
||||
and not room_version.msc4289_creator_power_enabled
|
||||
):
|
||||
for invitee in invite_list:
|
||||
power_level_content["users"][invitee] = 100
|
||||
|
||||
@@ -1423,6 +1803,19 @@ class RoomCreationHandler:
|
||||
)
|
||||
return preset_name, preset_config
|
||||
|
||||
def _remove_creators_from_pl_users_map(
|
||||
self,
|
||||
users_map: Dict[str, int],
|
||||
creator: str,
|
||||
additional_creators: Optional[List[str]],
|
||||
) -> None:
|
||||
creators = [creator]
|
||||
if additional_creators:
|
||||
creators.extend(additional_creators)
|
||||
for creator in creators:
|
||||
# the creator(s) cannot be in the users map
|
||||
users_map.pop(creator, None)
|
||||
|
||||
def _generate_room_id(self) -> str:
|
||||
"""Generates a random room ID.
|
||||
|
||||
@@ -1440,7 +1833,7 @@ class RoomCreationHandler:
|
||||
A random room ID of the form "!opaque_id:domain".
|
||||
"""
|
||||
random_string = stringutils.random_string(18)
|
||||
return RoomID(random_string, self.hs.hostname).to_string()
|
||||
return RoomIdWithDomain(random_string, self.hs.hostname).to_string()
|
||||
|
||||
async def _generate_and_create_room_id(
|
||||
self,
|
||||
|
||||
@@ -42,7 +42,7 @@ from synapse.api.errors import (
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.event_auth import get_named_level, get_power_level_event
|
||||
from synapse.events import EventBase
|
||||
from synapse.events import EventBase, is_creator
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.handlers.pagination import PURGE_ROOM_ACTION_NAME
|
||||
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
|
||||
@@ -1160,9 +1160,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
|
||||
elif effective_membership_state == Membership.KNOCK:
|
||||
if not is_host_in_room:
|
||||
# The knock needs to be sent over federation instead
|
||||
remote_room_hosts.append(get_domain_from_id(room_id))
|
||||
|
||||
# we used to add the domain of the room ID to remote_room_hosts.
|
||||
# This is not safe in MSC4291 rooms which do not have a domain.
|
||||
content["membership"] = Membership.KNOCK
|
||||
|
||||
try:
|
||||
@@ -2324,6 +2323,7 @@ def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[s
|
||||
|
||||
# Check which members are able to invite by ensuring they're joined and have
|
||||
# the necessary power level.
|
||||
create_event = auth_events[(EventTypes.Create, "")]
|
||||
for (event_type, state_key), event in auth_events.items():
|
||||
if event_type != EventTypes.Member:
|
||||
continue
|
||||
@@ -2331,8 +2331,12 @@ def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[s
|
||||
if event.membership != Membership.JOIN:
|
||||
continue
|
||||
|
||||
if create_event.room_version.msc4289_creator_power_enabled and is_creator(
|
||||
create_event, state_key
|
||||
):
|
||||
result.append(state_key)
|
||||
# Check if the user has a custom power level.
|
||||
if users.get(state_key, users_default_level) >= invite_level:
|
||||
elif users.get(state_key, users_default_level) >= invite_level:
|
||||
result.append(state_key)
|
||||
|
||||
return result
|
||||
|
||||
@@ -116,7 +116,7 @@ class SlidingSyncHandler:
|
||||
sync_config: SlidingSyncConfig,
|
||||
from_token: Optional[SlidingSyncStreamToken] = None,
|
||||
timeout_ms: int = 0,
|
||||
) -> SlidingSyncResult:
|
||||
) -> Tuple[SlidingSyncResult, bool]:
|
||||
"""
|
||||
Get the sync for a client if we have new data for it now. Otherwise
|
||||
wait for new data to arrive on the server. If the timeout expires, then
|
||||
@@ -128,9 +128,16 @@ class SlidingSyncHandler:
|
||||
from_token: The point in the stream to sync from. Token of the end of the
|
||||
previous batch. May be `None` if this is the initial sync request.
|
||||
timeout_ms: The time in milliseconds to wait for new data to arrive. If 0,
|
||||
we will immediately but there might not be any new data so we just return an
|
||||
empty response.
|
||||
we will respond immediately but there might not be any new data so we just
|
||||
return an empty response.
|
||||
|
||||
Returns:
|
||||
A tuple containing the `SlidingSyncResult` and whether we waited for new
|
||||
activity before responding. Knowing whether we waited is useful in traces
|
||||
to filter out long-running requests where we were just waiting.
|
||||
"""
|
||||
did_wait = False
|
||||
|
||||
# If the user is not part of the mau group, then check that limits have
|
||||
# not been exceeded (if not part of the group by this point, almost certain
|
||||
# auth_blocking will occur)
|
||||
@@ -149,7 +156,7 @@ class SlidingSyncHandler:
|
||||
logger.warning(
|
||||
"Timed out waiting for worker to catch up. Returning empty response"
|
||||
)
|
||||
return SlidingSyncResult.empty(from_token)
|
||||
return SlidingSyncResult.empty(from_token), did_wait
|
||||
|
||||
# If we've spent significant time waiting to catch up, take it off
|
||||
# the timeout.
|
||||
@@ -185,8 +192,9 @@ class SlidingSyncHandler:
|
||||
current_sync_callback,
|
||||
from_token=from_token.stream_token,
|
||||
)
|
||||
did_wait = True
|
||||
|
||||
return result
|
||||
return result, did_wait
|
||||
|
||||
@trace
|
||||
async def current_sync_for_user(
|
||||
|
||||
@@ -164,11 +164,13 @@ def _get_in_flight_counts() -> Mapping[Tuple[str, ...], int]:
|
||||
return counts
|
||||
|
||||
|
||||
LaterGauge(
|
||||
in_flight_requests = LaterGauge(
|
||||
name="synapse_http_server_in_flight_requests_count",
|
||||
desc="",
|
||||
labelnames=["method", "servlet", SERVER_NAME_LABEL],
|
||||
caller=_get_in_flight_counts,
|
||||
)
|
||||
in_flight_requests.register_hook(
|
||||
homeserver_instance_id=None, hook=_get_in_flight_counts
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -702,6 +702,10 @@ class _ByteProducer:
|
||||
self._request: Optional[Request] = request
|
||||
self._iterator = iterator
|
||||
self._paused = False
|
||||
self.tracing_scope = start_active_span(
|
||||
"write_bytes_to_request",
|
||||
)
|
||||
self.tracing_scope.__enter__()
|
||||
|
||||
try:
|
||||
self._request.registerProducer(self, True)
|
||||
@@ -712,8 +716,8 @@ class _ByteProducer:
|
||||
logger.info("Connection disconnected before response was written: %r", e)
|
||||
|
||||
# We drop our references to data we'll not use.
|
||||
self._request = None
|
||||
self._iterator = iter(())
|
||||
self.tracing_scope.__exit__(type(e), None, e.__traceback__)
|
||||
else:
|
||||
# Start producing if `registerProducer` was successful
|
||||
self.resumeProducing()
|
||||
@@ -727,6 +731,9 @@ class _ByteProducer:
|
||||
self._request.write(b"".join(data))
|
||||
|
||||
def pauseProducing(self) -> None:
|
||||
opentracing_span = active_span()
|
||||
if opentracing_span is not None:
|
||||
opentracing_span.log_kv({"event": "producer_paused"})
|
||||
self._paused = True
|
||||
|
||||
def resumeProducing(self) -> None:
|
||||
@@ -737,6 +744,10 @@ class _ByteProducer:
|
||||
|
||||
self._paused = False
|
||||
|
||||
opentracing_span = active_span()
|
||||
if opentracing_span is not None:
|
||||
opentracing_span.log_kv({"event": "producer_resumed"})
|
||||
|
||||
# Write until there's backpressure telling us to stop.
|
||||
while not self._paused:
|
||||
# Get the next chunk and write it to the request.
|
||||
@@ -771,6 +782,7 @@ class _ByteProducer:
|
||||
def stopProducing(self) -> None:
|
||||
# Clear a circular reference.
|
||||
self._request = None
|
||||
self.tracing_scope.__exit__(None, None, None)
|
||||
|
||||
|
||||
def _encode_json_bytes(json_object: object) -> bytes:
|
||||
@@ -913,8 +925,9 @@ def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None:
|
||||
# once (via `Request.write`) is that doing so starts the timeout for the
|
||||
# next request to be received: so if it takes longer than 60s to stream back
|
||||
# the response to the client, the client never gets it.
|
||||
# c.f https://github.com/twisted/twisted/issues/12498
|
||||
#
|
||||
# The correct solution is to use a Producer; then the timeout is only
|
||||
# One workaround is to use a `Producer`; then the timeout is only
|
||||
# started once all of the content is sent over the TCP connection.
|
||||
|
||||
# To make sure we don't write all of the bytes at once we split it up into
|
||||
|
||||
@@ -34,6 +34,7 @@ import logging
|
||||
import threading
|
||||
import typing
|
||||
import warnings
|
||||
from contextvars import ContextVar
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -56,7 +57,6 @@ from twisted.internet import defer, threads
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.logging.scopecontextmanager import _LogContextScope
|
||||
from synapse.types import ISynapseReactor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -230,14 +230,13 @@ LoggingContextOrSentinel = Union["LoggingContext", "_Sentinel"]
|
||||
class _Sentinel:
|
||||
"""Sentinel to represent the root context"""
|
||||
|
||||
__slots__ = ["previous_context", "finished", "request", "scope", "tag"]
|
||||
__slots__ = ["previous_context", "finished", "request", "tag"]
|
||||
|
||||
def __init__(self) -> None:
|
||||
# Minimal set for compatibility with LoggingContext
|
||||
self.previous_context = None
|
||||
self.finished = False
|
||||
self.request = None
|
||||
self.scope = None
|
||||
self.tag = None
|
||||
|
||||
def __str__(self) -> str:
|
||||
@@ -290,7 +289,6 @@ class LoggingContext:
|
||||
"finished",
|
||||
"request",
|
||||
"tag",
|
||||
"scope",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
@@ -311,7 +309,6 @@ class LoggingContext:
|
||||
self.main_thread = get_thread_id()
|
||||
self.request = None
|
||||
self.tag = ""
|
||||
self.scope: Optional["_LogContextScope"] = None
|
||||
|
||||
# keep track of whether we have hit the __exit__ block for this context
|
||||
# (suggesting that the the thing that created the context thinks it should
|
||||
@@ -324,9 +321,6 @@ class LoggingContext:
|
||||
# we track the current request_id
|
||||
self.request = self.parent_context.request
|
||||
|
||||
# we also track the current scope:
|
||||
self.scope = self.parent_context.scope
|
||||
|
||||
if request is not None:
|
||||
# the request param overrides the request from the parent context
|
||||
self.request = request
|
||||
@@ -660,13 +654,12 @@ class PreserveLoggingContext:
|
||||
)
|
||||
|
||||
|
||||
_thread_local = threading.local()
|
||||
_thread_local.current_context = SENTINEL_CONTEXT
|
||||
_current_context: ContextVar[LoggingContextOrSentinel] = ContextVar("current_context")
|
||||
|
||||
|
||||
def current_context() -> LoggingContextOrSentinel:
|
||||
"""Get the current logging context from thread local storage"""
|
||||
return getattr(_thread_local, "current_context", SENTINEL_CONTEXT)
|
||||
return _current_context.get(SENTINEL_CONTEXT)
|
||||
|
||||
|
||||
def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel:
|
||||
@@ -687,7 +680,7 @@ def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSe
|
||||
if current is not context:
|
||||
rusage = get_thread_resource_usage()
|
||||
current.stop(rusage)
|
||||
_thread_local.current_context = context
|
||||
_current_context.set(context)
|
||||
context.start(rusage)
|
||||
|
||||
return current
|
||||
@@ -803,7 +796,6 @@ def run_in_background(
|
||||
CRITICAL error about an unhandled error will be logged without much
|
||||
indication about where it came from.
|
||||
"""
|
||||
current = current_context()
|
||||
try:
|
||||
res = f(*args, **kwargs)
|
||||
except Exception:
|
||||
@@ -832,23 +824,6 @@ def run_in_background(
|
||||
# optimise out the messing about
|
||||
return d
|
||||
|
||||
# The function may have reset the context before returning, so
|
||||
# we need to restore it now.
|
||||
ctx = set_current_context(current)
|
||||
|
||||
# The original context will be restored when the deferred
|
||||
# completes, but there is nothing waiting for it, so it will
|
||||
# get leaked into the reactor or some other function which
|
||||
# wasn't expecting it. We therefore need to reset the context
|
||||
# here.
|
||||
#
|
||||
# (If this feels asymmetric, consider it this way: we are
|
||||
# effectively forking a new thread of execution. We are
|
||||
# probably currently within a ``with LoggingContext()`` block,
|
||||
# which is supposed to have a single entry and exit point. But
|
||||
# by spawning off another deferred, we are effectively
|
||||
# adding a new exit point.)
|
||||
d.addBoth(_set_context_cb, ctx)
|
||||
return d
|
||||
|
||||
|
||||
@@ -868,65 +843,20 @@ def run_coroutine_in_background(
|
||||
cannot change the log contexts.
|
||||
"""
|
||||
|
||||
current = current_context()
|
||||
d = defer.ensureDeferred(coroutine)
|
||||
|
||||
# The function may have reset the context before returning, so
|
||||
# we need to restore it now.
|
||||
ctx = set_current_context(current)
|
||||
|
||||
# The original context will be restored when the deferred
|
||||
# completes, but there is nothing waiting for it, so it will
|
||||
# get leaked into the reactor or some other function which
|
||||
# wasn't expecting it. We therefore need to reset the context
|
||||
# here.
|
||||
#
|
||||
# (If this feels asymmetric, consider it this way: we are
|
||||
# effectively forking a new thread of execution. We are
|
||||
# probably currently within a ``with LoggingContext()`` block,
|
||||
# which is supposed to have a single entry and exit point. But
|
||||
# by spawning off another deferred, we are effectively
|
||||
# adding a new exit point.)
|
||||
d.addBoth(_set_context_cb, ctx)
|
||||
return d
|
||||
return defer.ensureDeferred(coroutine)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
# TODO: This function is a no-op now and should be removed in a follow-up PR.
|
||||
def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]":
|
||||
"""Given a deferred, make it follow the Synapse logcontext rules:
|
||||
|
||||
If the deferred has completed, essentially does nothing (just returns another
|
||||
completed deferred with the result/failure).
|
||||
|
||||
If the deferred has not yet completed, resets the logcontext before
|
||||
returning a deferred. Then, when the deferred completes, restores the
|
||||
current logcontext before running callbacks/errbacks.
|
||||
|
||||
(This is more-or-less the opposite operation to run_in_background.)
|
||||
"""
|
||||
if deferred.called and not deferred.paused:
|
||||
# it looks like this deferred is ready to run any callbacks we give it
|
||||
# immediately. We may as well optimise out the logcontext faffery.
|
||||
return deferred
|
||||
|
||||
# ok, we can't be sure that a yield won't block, so let's reset the
|
||||
# logcontext, and add a callback to the deferred to restore it.
|
||||
prev_context = set_current_context(SENTINEL_CONTEXT)
|
||||
deferred.addBoth(_set_context_cb, prev_context)
|
||||
return deferred
|
||||
|
||||
|
||||
ResultT = TypeVar("ResultT")
|
||||
|
||||
|
||||
def _set_context_cb(result: ResultT, context: LoggingContextOrSentinel) -> ResultT:
|
||||
"""A callback function which just sets the logging context"""
|
||||
set_current_context(context)
|
||||
return result
|
||||
|
||||
|
||||
def defer_to_thread(
|
||||
reactor: "ISynapseReactor", f: Callable[P, R], *args: P.args, **kwargs: P.kwargs
|
||||
) -> "defer.Deferred[R]":
|
||||
@@ -938,9 +868,6 @@ def defer_to_thread(
|
||||
logcontext (so its CPU usage metrics will get attributed to the current
|
||||
logcontext). `f` should preserve the logcontext it is given.
|
||||
|
||||
The result deferred follows the Synapse logcontext rules: you should `yield`
|
||||
on it.
|
||||
|
||||
Args:
|
||||
reactor: The reactor in whose main thread the Deferred will be invoked,
|
||||
and whose threadpool we should use for the function.
|
||||
@@ -978,9 +905,6 @@ def defer_to_threadpool(
|
||||
logcontext (so its CPU usage metrics will get attributed to the current
|
||||
logcontext). `f` should preserve the logcontext it is given.
|
||||
|
||||
The result deferred follows the Synapse logcontext rules: you should `yield`
|
||||
on it.
|
||||
|
||||
Args:
|
||||
reactor: The reactor in whose main thread the Deferred will be invoked.
|
||||
Normally this will be hs.get_reactor().
|
||||
@@ -998,18 +922,6 @@ def defer_to_threadpool(
|
||||
A Deferred which fires a callback with the result of `f`, or an
|
||||
errback if `f` throws an exception.
|
||||
"""
|
||||
curr_context = current_context()
|
||||
if not curr_context:
|
||||
logger.warning(
|
||||
"Calling defer_to_threadpool from sentinel context: metrics will be lost"
|
||||
)
|
||||
parent_context = None
|
||||
else:
|
||||
assert isinstance(curr_context, LoggingContext)
|
||||
parent_context = curr_context
|
||||
|
||||
def g() -> R:
|
||||
with LoggingContext(str(curr_context), parent_context=parent_context):
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g))
|
||||
return make_deferred_yieldable(
|
||||
threads.deferToThreadPool(reactor, threadpool, f, *args, **kwargs)
|
||||
)
|
||||
|
||||
@@ -251,18 +251,17 @@ class _DummyTagNames:
|
||||
try:
|
||||
import opentracing
|
||||
import opentracing.tags
|
||||
from opentracing.scope_managers.contextvars import ContextVarsScopeManager
|
||||
|
||||
tags = opentracing.tags
|
||||
except ImportError:
|
||||
opentracing = None # type: ignore[assignment]
|
||||
tags = _DummyTagNames # type: ignore[assignment]
|
||||
ContextVarsScopeManager = None # type: ignore
|
||||
try:
|
||||
from jaeger_client import Config as JaegerConfig
|
||||
|
||||
from synapse.logging.scopecontextmanager import LogContextScopeManager
|
||||
except ImportError:
|
||||
JaegerConfig = None # type: ignore
|
||||
LogContextScopeManager = None # type: ignore
|
||||
|
||||
|
||||
try:
|
||||
@@ -484,7 +483,7 @@ def init_tracer(hs: "HomeServer") -> None:
|
||||
config = JaegerConfig(
|
||||
config=jaeger_config,
|
||||
service_name=f"{hs.config.server.server_name} {instance_name_by_type}",
|
||||
scope_manager=LogContextScopeManager(),
|
||||
scope_manager=ContextVarsScopeManager(),
|
||||
metrics_factory=PrometheusMetricsFactory(),
|
||||
)
|
||||
|
||||
|
||||
@@ -1,161 +0,0 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
# Copyright (C) 2023 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
# Originally licensed under the Apache License, Version 2.0:
|
||||
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
||||
#
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from opentracing import Scope, ScopeManager, Span
|
||||
|
||||
from synapse.logging.context import (
|
||||
LoggingContext,
|
||||
current_context,
|
||||
nested_logging_context,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LogContextScopeManager(ScopeManager):
|
||||
"""
|
||||
The LogContextScopeManager tracks the active scope in opentracing
|
||||
by using the log contexts which are native to synapse. This is so
|
||||
that the basic opentracing api can be used across twisted defereds.
|
||||
|
||||
It would be nice just to use opentracing's ContextVarsScopeManager,
|
||||
but currently that doesn't work due to https://twistedmatrix.com/trac/ticket/10301.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
@property
|
||||
def active(self) -> Optional[Scope]:
|
||||
"""
|
||||
Returns the currently active Scope which can be used to access the
|
||||
currently active Scope.span.
|
||||
If there is a non-null Scope, its wrapped Span
|
||||
becomes an implicit parent of any newly-created Span at
|
||||
Tracer.start_active_span() time.
|
||||
|
||||
Return:
|
||||
The Scope that is active, or None if not available.
|
||||
"""
|
||||
ctx = current_context()
|
||||
return ctx.scope
|
||||
|
||||
def activate(self, span: Span, finish_on_close: bool) -> Scope:
|
||||
"""
|
||||
Makes a Span active.
|
||||
Args
|
||||
span: the span that should become active.
|
||||
finish_on_close: whether Span should be automatically finished when
|
||||
Scope.close() is called.
|
||||
|
||||
Returns:
|
||||
Scope to control the end of the active period for
|
||||
*span*. It is a programming error to neglect to call
|
||||
Scope.close() on the returned instance.
|
||||
"""
|
||||
|
||||
ctx = current_context()
|
||||
|
||||
if not ctx:
|
||||
logger.error("Tried to activate scope outside of loggingcontext")
|
||||
return Scope(None, span) # type: ignore[arg-type]
|
||||
|
||||
if ctx.scope is not None:
|
||||
# start a new logging context as a child of the existing one.
|
||||
# Doing so -- rather than updating the existing logcontext -- means that
|
||||
# creating several concurrent spans under the same logcontext works
|
||||
# correctly.
|
||||
ctx = nested_logging_context("")
|
||||
enter_logcontext = True
|
||||
else:
|
||||
# if there is no span currently associated with the current logcontext, we
|
||||
# just store the scope in it.
|
||||
#
|
||||
# This feels a bit dubious, but it does hack around a problem where a
|
||||
# span outlasts its parent logcontext (which would otherwise lead to
|
||||
# "Re-starting finished log context" errors).
|
||||
enter_logcontext = False
|
||||
|
||||
scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close)
|
||||
ctx.scope = scope
|
||||
if enter_logcontext:
|
||||
ctx.__enter__()
|
||||
|
||||
return scope
|
||||
|
||||
|
||||
class _LogContextScope(Scope):
|
||||
"""
|
||||
A custom opentracing scope, associated with a LogContext
|
||||
|
||||
* When the scope is closed, the logcontext's active scope is reset to None.
|
||||
and - if enter_logcontext was set - the logcontext is finished too.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
manager: LogContextScopeManager,
|
||||
span: Span,
|
||||
logcontext: LoggingContext,
|
||||
enter_logcontext: bool,
|
||||
finish_on_close: bool,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
manager:
|
||||
the manager that is responsible for this scope.
|
||||
span:
|
||||
the opentracing span which this scope represents the local
|
||||
lifetime for.
|
||||
logcontext:
|
||||
the log context to which this scope is attached.
|
||||
enter_logcontext:
|
||||
if True the log context will be exited when the scope is finished
|
||||
finish_on_close:
|
||||
if True finish the span when the scope is closed
|
||||
"""
|
||||
super().__init__(manager, span)
|
||||
self.logcontext = logcontext
|
||||
self._finish_on_close = finish_on_close
|
||||
self._enter_logcontext = enter_logcontext
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"Scope<{self.span}>"
|
||||
|
||||
def close(self) -> None:
|
||||
active_scope = self.manager.active
|
||||
if active_scope is not self:
|
||||
logger.error(
|
||||
"Closing scope %s which is not the currently-active one %s",
|
||||
self,
|
||||
active_scope,
|
||||
)
|
||||
|
||||
if self._finish_on_close:
|
||||
self.span.finish()
|
||||
|
||||
self.logcontext.scope = None
|
||||
|
||||
if self._enter_logcontext:
|
||||
self.logcontext.__exit__(None, None, None)
|
||||
@@ -73,8 +73,6 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
METRICS_PREFIX = "/_synapse/metrics"
|
||||
|
||||
all_gauges: Dict[str, Collector] = {}
|
||||
|
||||
HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
|
||||
|
||||
SERVER_NAME_LABEL = "server_name"
|
||||
@@ -163,42 +161,110 @@ class LaterGauge(Collector):
|
||||
name: str
|
||||
desc: str
|
||||
labelnames: Optional[StrSequence] = attr.ib(hash=False)
|
||||
# callback: should either return a value (if there are no labels for this metric),
|
||||
# or dict mapping from a label tuple to a value
|
||||
caller: Callable[
|
||||
[], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]]
|
||||
]
|
||||
_instance_id_to_hook_map: Dict[
|
||||
Optional[str], # instance_id
|
||||
Callable[
|
||||
[], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]]
|
||||
],
|
||||
] = attr.ib(factory=dict, hash=False)
|
||||
"""
|
||||
Map from homeserver instance_id to a callback. Each callback should either return a
|
||||
value (if there are no labels for this metric), or dict mapping from a label tuple
|
||||
to a value.
|
||||
|
||||
We use `instance_id` instead of `server_name` because it's possible to have multiple
|
||||
workers running in the same process with the same `server_name`.
|
||||
"""
|
||||
|
||||
def collect(self) -> Iterable[Metric]:
|
||||
# The decision to add `SERVER_NAME_LABEL` is from the `LaterGauge` usage itself
|
||||
# (we don't enforce it here, one level up).
|
||||
g = GaugeMetricFamily(self.name, self.desc, labels=self.labelnames) # type: ignore[missing-server-name-label]
|
||||
|
||||
try:
|
||||
calls = self.caller()
|
||||
except Exception:
|
||||
logger.exception("Exception running callback for LaterGauge(%s)", self.name)
|
||||
yield g
|
||||
return
|
||||
for homeserver_instance_id, hook in self._instance_id_to_hook_map.items():
|
||||
try:
|
||||
hook_result = hook()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Exception running callback for LaterGauge(%s) for homeserver_instance_id=%s",
|
||||
self.name,
|
||||
homeserver_instance_id,
|
||||
)
|
||||
# Continue to return the rest of the metrics that aren't broken
|
||||
continue
|
||||
|
||||
if isinstance(calls, (int, float)):
|
||||
g.add_metric([], calls)
|
||||
else:
|
||||
for k, v in calls.items():
|
||||
g.add_metric(k, v)
|
||||
if isinstance(hook_result, (int, float)):
|
||||
g.add_metric([], hook_result)
|
||||
else:
|
||||
for k, v in hook_result.items():
|
||||
g.add_metric(k, v)
|
||||
|
||||
yield g
|
||||
|
||||
def register_hook(
|
||||
self,
|
||||
*,
|
||||
homeserver_instance_id: Optional[str],
|
||||
hook: Callable[
|
||||
[], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]]
|
||||
],
|
||||
) -> None:
|
||||
"""
|
||||
Register a callback/hook that will be called to generate a metric samples for
|
||||
the gauge.
|
||||
|
||||
Args:
|
||||
homeserver_instance_id: The unique ID for this Synapse process instance
|
||||
(`hs.get_instance_id()`) that this hook is associated with. This can be used
|
||||
later to lookup all hooks associated with a given server name in order to
|
||||
unregister them. This should only be omitted for global hooks that work
|
||||
across all homeservers.
|
||||
hook: A callback that should either return a value (if there are no
|
||||
labels for this metric), or dict mapping from a label tuple to a value
|
||||
"""
|
||||
# We shouldn't have multiple hooks registered for the same homeserver `instance_id`.
|
||||
existing_hook = self._instance_id_to_hook_map.get(homeserver_instance_id)
|
||||
assert existing_hook is None, (
|
||||
f"LaterGauge(name={self.name}) hook already registered for homeserver_instance_id={homeserver_instance_id}. "
|
||||
"This is likely a Synapse bug and you forgot to unregister the previous hooks for "
|
||||
"the server (especially in tests)."
|
||||
)
|
||||
|
||||
self._instance_id_to_hook_map[homeserver_instance_id] = hook
|
||||
|
||||
def unregister_hooks_for_homeserver_instance_id(
|
||||
self, homeserver_instance_id: str
|
||||
) -> None:
|
||||
"""
|
||||
Unregister all hooks associated with the given homeserver `instance_id`. This should be
|
||||
called when a homeserver is shutdown to avoid extra hooks sitting around.
|
||||
|
||||
Args:
|
||||
homeserver_instance_id: The unique ID for this Synapse process instance to
|
||||
unregister hooks for (`hs.get_instance_id()`).
|
||||
"""
|
||||
self._instance_id_to_hook_map.pop(homeserver_instance_id, None)
|
||||
|
||||
def __attrs_post_init__(self) -> None:
|
||||
self._register()
|
||||
|
||||
def _register(self) -> None:
|
||||
if self.name in all_gauges.keys():
|
||||
logger.warning("%s already registered, reregistering", self.name)
|
||||
REGISTRY.unregister(all_gauges.pop(self.name))
|
||||
|
||||
REGISTRY.register(self)
|
||||
all_gauges[self.name] = self
|
||||
|
||||
# We shouldn't have multiple metrics with the same name. Typically, metrics
|
||||
# should be created globally so you shouldn't be running into this and this will
|
||||
# catch any stupid mistakes. The `REGISTRY.register(self)` call above will also
|
||||
# raise an error if the metric already exists but to make things explicit, we'll
|
||||
# also check here.
|
||||
existing_gauge = all_later_gauges_to_clean_up_on_shutdown.get(self.name)
|
||||
assert existing_gauge is None, f"LaterGauge(name={self.name}) already exists. "
|
||||
|
||||
# Keep track of the gauge so we can clean it up later.
|
||||
all_later_gauges_to_clean_up_on_shutdown[self.name] = self
|
||||
|
||||
|
||||
all_later_gauges_to_clean_up_on_shutdown: Dict[str, LaterGauge] = {}
|
||||
"""
|
||||
Track all `LaterGauge` instances so we can remove any associated hooks during homeserver
|
||||
shutdown.
|
||||
"""
|
||||
|
||||
|
||||
# `MetricsEntry` only makes sense when it is a `Protocol`,
|
||||
@@ -250,7 +316,7 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
|
||||
# Protects access to _registrations
|
||||
self._lock = threading.Lock()
|
||||
|
||||
self._register_with_collector()
|
||||
REGISTRY.register(self)
|
||||
|
||||
def register(
|
||||
self,
|
||||
@@ -341,14 +407,6 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
|
||||
gauge.add_metric(labels=key, value=getattr(metrics, name))
|
||||
yield gauge
|
||||
|
||||
def _register_with_collector(self) -> None:
|
||||
if self.name in all_gauges.keys():
|
||||
logger.warning("%s already registered, reregistering", self.name)
|
||||
REGISTRY.unregister(all_gauges.pop(self.name))
|
||||
|
||||
REGISTRY.register(self)
|
||||
all_gauges[self.name] = self
|
||||
|
||||
|
||||
class GaugeHistogramMetricFamilyWithLabels(GaugeHistogramMetricFamily):
|
||||
"""
|
||||
|
||||
@@ -223,10 +223,9 @@ def run_as_background_process(
|
||||
This should be used to wrap processes which are fired off to run in the
|
||||
background, instead of being associated with a particular request.
|
||||
|
||||
It returns a Deferred which completes when the function completes, but it doesn't
|
||||
follow the synapse logcontext rules, which makes it appropriate for passing to
|
||||
clock.looping_call and friends (or for firing-and-forgetting in the middle of a
|
||||
normal synapse async function).
|
||||
It returns a Deferred which completes when the function completes, which makes it
|
||||
appropriate for passing to clock.looping_call and friends (or for
|
||||
firing-and-forgetting in the middle of a normal synapse async function).
|
||||
|
||||
Args:
|
||||
desc: a description for this background process type
|
||||
@@ -241,8 +240,6 @@ def run_as_background_process(
|
||||
|
||||
Returns:
|
||||
Deferred which returns the result of func, or `None` if func raises.
|
||||
Note that the returned Deferred does not follow the synapse logcontext
|
||||
rules.
|
||||
"""
|
||||
|
||||
async def run() -> Optional[R]:
|
||||
|
||||
@@ -237,10 +237,9 @@ def run_as_background_process(
|
||||
This should be used to wrap processes which are fired off to run in the
|
||||
background, instead of being associated with a particular request.
|
||||
|
||||
It returns a Deferred which completes when the function completes, but it doesn't
|
||||
follow the synapse logcontext rules, which makes it appropriate for passing to
|
||||
clock.looping_call and friends (or for firing-and-forgetting in the middle of a
|
||||
normal synapse async function).
|
||||
It returns a Deferred which completes when the function completes, which makes it
|
||||
appropriate for passing to clock.looping_call and friends (or for
|
||||
firing-and-forgetting in the middle of a normal synapse async function).
|
||||
|
||||
Args:
|
||||
desc: a description for this background process type
|
||||
@@ -255,8 +254,6 @@ def run_as_background_process(
|
||||
|
||||
Returns:
|
||||
Deferred which returns the result of func, or `None` if func raises.
|
||||
Note that the returned Deferred does not follow the synapse logcontext
|
||||
rules.
|
||||
"""
|
||||
|
||||
logger.warning(
|
||||
@@ -1375,9 +1372,7 @@ class ModuleApi:
|
||||
|
||||
Args:
|
||||
f: The function to call repeatedly. f can be either synchronous or
|
||||
asynchronous, and must follow Synapse's logcontext rules.
|
||||
More info about logcontexts is available at
|
||||
https://element-hq.github.io/synapse/latest/log_contexts.html
|
||||
asynchronous.
|
||||
msec: How long to wait between calls in milliseconds.
|
||||
*args: Positional arguments to pass to function.
|
||||
desc: The background task's description. Default to the function's name.
|
||||
@@ -1431,9 +1426,7 @@ class ModuleApi:
|
||||
Args:
|
||||
msec: How long to wait before calling, in milliseconds.
|
||||
f: The function to call once. f can be either synchronous or
|
||||
asynchronous, and must follow Synapse's logcontext rules.
|
||||
More info about logcontexts is available at
|
||||
https://element-hq.github.io/synapse/latest/log_contexts.html
|
||||
asynchronous.
|
||||
*args: Positional arguments to pass to function.
|
||||
desc: The background task's description. Default to the function's name.
|
||||
**kwargs: Keyword arguments to pass to function.
|
||||
@@ -1668,10 +1661,9 @@ class ModuleApi:
|
||||
This should be used to wrap processes which are fired off to run in the
|
||||
background, instead of being associated with a particular request.
|
||||
|
||||
It returns a Deferred which completes when the function completes, but it doesn't
|
||||
follow the synapse logcontext rules, which makes it appropriate for passing to
|
||||
clock.looping_call and friends (or for firing-and-forgetting in the middle of a
|
||||
normal synapse async function).
|
||||
It returns a Deferred which completes when the function completes, which makes
|
||||
it appropriate for passing to clock.looping_call and friends (or for
|
||||
firing-and-forgetting in the middle of a normal synapse async function).
|
||||
|
||||
Args:
|
||||
desc: a description for this background process type
|
||||
@@ -1686,8 +1678,6 @@ class ModuleApi:
|
||||
|
||||
Returns:
|
||||
Deferred which returns the result of func, or `None` if func raises.
|
||||
Note that the returned Deferred does not follow the synapse logcontext
|
||||
rules.
|
||||
"""
|
||||
return _run_as_background_process(
|
||||
desc, self.server_name, func, *args, bg_start_span=bg_start_span, **kwargs
|
||||
|
||||
@@ -86,6 +86,24 @@ users_woken_by_stream_counter = Counter(
|
||||
labelnames=["stream", SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
|
||||
notifier_listeners_gauge = LaterGauge(
|
||||
name="synapse_notifier_listeners",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
notifier_rooms_gauge = LaterGauge(
|
||||
name="synapse_notifier_rooms",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
notifier_users_gauge = LaterGauge(
|
||||
name="synapse_notifier_users",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
@@ -281,28 +299,20 @@ class Notifier:
|
||||
)
|
||||
}
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_notifier_listeners",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=count_listeners,
|
||||
notifier_listeners_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(), hook=count_listeners
|
||||
)
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_notifier_rooms",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
notifier_rooms_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
(self.server_name,): count(
|
||||
bool, list(self.room_to_user_streams.values())
|
||||
)
|
||||
},
|
||||
)
|
||||
LaterGauge(
|
||||
name="synapse_notifier_users",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(self.user_to_user_stream)},
|
||||
notifier_users_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(self.user_to_user_stream)},
|
||||
)
|
||||
|
||||
def add_replication_callback(self, cb: Callable[[], None]) -> None:
|
||||
|
||||
@@ -49,7 +49,7 @@ from synapse.api.constants import (
|
||||
from synapse.api.room_versions import PushRuleRoomFlag
|
||||
from synapse.event_auth import auth_types_for_event, get_user_power_level
|
||||
from synapse.events import EventBase, relation_from_event
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events.snapshot import EventContext, EventPersistencePair
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.state import CREATE_KEY, POWER_KEY
|
||||
@@ -352,7 +352,7 @@ class BulkPushRuleEvaluator:
|
||||
return related_events
|
||||
|
||||
async def action_for_events_by_user(
|
||||
self, events_and_context: List[Tuple[EventBase, EventContext]]
|
||||
self, events_and_context: List[EventPersistencePair]
|
||||
) -> None:
|
||||
"""Given a list of events and their associated contexts, evaluate the push rules
|
||||
for each event, check if the message should increment the unread count, and
|
||||
|
||||
@@ -24,8 +24,8 @@ from typing import TYPE_CHECKING, List, Tuple
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events import make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext, EventPersistencePair
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict
|
||||
@@ -86,7 +86,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
|
||||
async def _serialize_payload( # type: ignore[override]
|
||||
store: "DataStore",
|
||||
room_id: str,
|
||||
event_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
event_and_contexts: List[EventPersistencePair],
|
||||
backfilled: bool,
|
||||
) -> JsonDict:
|
||||
"""
|
||||
|
||||
@@ -25,8 +25,8 @@ from typing import TYPE_CHECKING, List, Tuple
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events import make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext, EventPersistencePair
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
@@ -85,7 +85,7 @@ class ReplicationSendEventsRestServlet(ReplicationEndpoint):
|
||||
|
||||
@staticmethod
|
||||
async def _serialize_payload( # type: ignore[override]
|
||||
events_and_context: List[Tuple[EventBase, EventContext]],
|
||||
events_and_context: List[EventPersistencePair],
|
||||
store: "DataStore",
|
||||
requester: Requester,
|
||||
ratelimit: bool,
|
||||
|
||||
@@ -106,6 +106,18 @@ user_ip_cache_counter = Counter(
|
||||
"synapse_replication_tcp_resource_user_ip_cache", "", labelnames=[SERVER_NAME_LABEL]
|
||||
)
|
||||
|
||||
tcp_resource_total_connections_gauge = LaterGauge(
|
||||
name="synapse_replication_tcp_resource_total_connections",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
tcp_command_queue_gauge = LaterGauge(
|
||||
name="synapse_replication_tcp_command_queue",
|
||||
desc="Number of inbound RDATA/POSITION commands queued for processing",
|
||||
labelnames=["stream_name", SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
|
||||
# the type of the entries in _command_queues_by_stream
|
||||
_StreamCommandQueue = Deque[
|
||||
@@ -243,11 +255,9 @@ class ReplicationCommandHandler:
|
||||
# outgoing replication commands to.)
|
||||
self._connections: List[IReplicationConnection] = []
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_replication_tcp_resource_total_connections",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(self._connections)},
|
||||
tcp_resource_total_connections_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(self._connections)},
|
||||
)
|
||||
|
||||
# When POSITION or RDATA commands arrive, we stick them in a queue and process
|
||||
@@ -266,11 +276,9 @@ class ReplicationCommandHandler:
|
||||
# from that connection.
|
||||
self._streams_by_connection: Dict[IReplicationConnection, Set[str]] = {}
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_replication_tcp_command_queue",
|
||||
desc="Number of inbound RDATA/POSITION commands queued for processing",
|
||||
labelnames=["stream_name", SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
tcp_command_queue_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
(stream_name, self.server_name): len(queue)
|
||||
for stream_name, queue in self._command_queues_by_stream.items()
|
||||
},
|
||||
|
||||
@@ -527,7 +527,10 @@ pending_commands = LaterGauge(
|
||||
name="synapse_replication_tcp_protocol_pending_commands",
|
||||
desc="",
|
||||
labelnames=["name", SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
)
|
||||
pending_commands.register_hook(
|
||||
homeserver_instance_id=None,
|
||||
hook=lambda: {
|
||||
(p.name, p.server_name): len(p.pending_commands) for p in connected_connections
|
||||
},
|
||||
)
|
||||
@@ -544,7 +547,10 @@ transport_send_buffer = LaterGauge(
|
||||
name="synapse_replication_tcp_protocol_transport_send_buffer",
|
||||
desc="",
|
||||
labelnames=["name", SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
)
|
||||
transport_send_buffer.register_hook(
|
||||
homeserver_instance_id=None,
|
||||
hook=lambda: {
|
||||
(p.name, p.server_name): transport_buffer_size(p) for p in connected_connections
|
||||
},
|
||||
)
|
||||
@@ -571,7 +577,10 @@ tcp_transport_kernel_send_buffer = LaterGauge(
|
||||
name="synapse_replication_tcp_protocol_transport_kernel_send_buffer",
|
||||
desc="",
|
||||
labelnames=["name", SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
)
|
||||
tcp_transport_kernel_send_buffer.register_hook(
|
||||
homeserver_instance_id=None,
|
||||
hook=lambda: {
|
||||
(p.name, p.server_name): transport_kernel_read_buffer_size(p, False)
|
||||
for p in connected_connections
|
||||
},
|
||||
@@ -582,7 +591,10 @@ tcp_transport_kernel_read_buffer = LaterGauge(
|
||||
name="synapse_replication_tcp_protocol_transport_kernel_read_buffer",
|
||||
desc="",
|
||||
labelnames=["name", SERVER_NAME_LABEL],
|
||||
caller=lambda: {
|
||||
)
|
||||
tcp_transport_kernel_read_buffer.register_hook(
|
||||
homeserver_instance_id=None,
|
||||
hook=lambda: {
|
||||
(p.name, p.server_name): transport_kernel_read_buffer_size(p, True)
|
||||
for p in connected_connections
|
||||
},
|
||||
|
||||
@@ -627,6 +627,15 @@ class MakeRoomAdminRestServlet(ResolveRoomIdMixin, RestServlet):
|
||||
]
|
||||
admin_users.sort(key=lambda user: user_power[user])
|
||||
|
||||
if create_event.room_version.msc4289_creator_power_enabled:
|
||||
creators = create_event.content.get("additional_creators", []) + [
|
||||
create_event.sender
|
||||
]
|
||||
for creator in creators:
|
||||
if self.is_mine_id(creator):
|
||||
# include the creator as they won't be in the PL users map.
|
||||
admin_users.append(creator)
|
||||
|
||||
if not admin_users:
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST, "No local admin user in room"
|
||||
@@ -666,7 +675,11 @@ class MakeRoomAdminRestServlet(ResolveRoomIdMixin, RestServlet):
|
||||
# updated power level event.
|
||||
new_pl_content = dict(pl_content)
|
||||
new_pl_content["users"] = dict(pl_content.get("users", {}))
|
||||
new_pl_content["users"][user_to_add] = new_pl_content["users"][admin_user_id]
|
||||
# give the new user the same PL as the admin, default to 100 in case there is no PL event.
|
||||
# This means in v12+ rooms we get PL100 if the creator promotes us.
|
||||
new_pl_content["users"][user_to_add] = new_pl_content["users"].get(
|
||||
admin_user_id, 100
|
||||
)
|
||||
|
||||
fake_requester = create_requester(
|
||||
admin_user_id,
|
||||
|
||||
@@ -76,11 +76,17 @@ class AuthMetadataServlet(RestServlet):
|
||||
Advertises the OAuth 2.0 server metadata for the homeserver.
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns(
|
||||
"/org.matrix.msc2965/auth_metadata$",
|
||||
unstable=True,
|
||||
releases=(),
|
||||
)
|
||||
PATTERNS = [
|
||||
*client_patterns(
|
||||
"/auth_metadata$",
|
||||
releases=("v1",),
|
||||
),
|
||||
*client_patterns(
|
||||
"/org.matrix.msc2965/auth_metadata$",
|
||||
unstable=True,
|
||||
releases=(),
|
||||
),
|
||||
]
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__()
|
||||
|
||||
@@ -24,6 +24,7 @@ from typing import TYPE_CHECKING, Tuple
|
||||
|
||||
from synapse.api.errors import Codes, ShadowBanError, SynapseError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.event_auth import check_valid_additional_creators
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import (
|
||||
@@ -85,13 +86,18 @@ class RoomUpgradeRestServlet(RestServlet):
|
||||
"Your homeserver does not support this room version",
|
||||
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
additional_creators = None
|
||||
if new_version.msc4289_creator_power_enabled:
|
||||
additional_creators = content.get("additional_creators")
|
||||
if additional_creators is not None:
|
||||
check_valid_additional_creators(additional_creators)
|
||||
|
||||
try:
|
||||
async with self._worker_lock_handler.acquire_read_write_lock(
|
||||
NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False
|
||||
):
|
||||
new_room_id = await self._room_creation_handler.upgrade_room(
|
||||
requester, room_id, new_version
|
||||
requester, room_id, new_version, additional_creators
|
||||
)
|
||||
except ShadowBanError:
|
||||
# Generate a random room ID.
|
||||
|
||||
@@ -994,12 +994,18 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
extensions=body.extensions,
|
||||
)
|
||||
|
||||
sliding_sync_results = await self.sliding_sync_handler.wait_for_sync_for_user(
|
||||
(
|
||||
sliding_sync_results,
|
||||
did_wait,
|
||||
) = await self.sliding_sync_handler.wait_for_sync_for_user(
|
||||
requester,
|
||||
sync_config,
|
||||
from_token,
|
||||
timeout,
|
||||
)
|
||||
# Knowing whether we waited is useful in traces to filter out long-running
|
||||
# requests where we were just waiting.
|
||||
set_tag("sliding_sync.did_wait", str(did_wait))
|
||||
|
||||
# The client may have disconnected by now; don't bother to serialize the
|
||||
# response if so.
|
||||
@@ -1011,6 +1017,7 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
|
||||
return 200, response_content
|
||||
|
||||
@trace_with_opname("sliding_sync.encode_response")
|
||||
async def encode_response(
|
||||
self,
|
||||
requester: Requester,
|
||||
@@ -1031,6 +1038,7 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
|
||||
return response
|
||||
|
||||
@trace_with_opname("sliding_sync.encode_lists")
|
||||
def encode_lists(
|
||||
self, lists: Mapping[str, SlidingSyncResult.SlidingWindowList]
|
||||
) -> JsonDict:
|
||||
@@ -1052,6 +1060,7 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
|
||||
return serialized_lists
|
||||
|
||||
@trace_with_opname("sliding_sync.encode_rooms")
|
||||
async def encode_rooms(
|
||||
self,
|
||||
requester: Requester,
|
||||
@@ -1172,6 +1181,7 @@ class SlidingSyncRestServlet(RestServlet):
|
||||
|
||||
return serialized_rooms
|
||||
|
||||
@trace_with_opname("sliding_sync.encode_extensions")
|
||||
async def encode_extensions(
|
||||
self, requester: Requester, extensions: SlidingSyncResult.Extensions
|
||||
) -> JsonDict:
|
||||
|
||||
@@ -129,7 +129,10 @@ from synapse.http.client import (
|
||||
)
|
||||
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
|
||||
from synapse.media.media_repository import MediaRepository
|
||||
from synapse.metrics import register_threadpool
|
||||
from synapse.metrics import (
|
||||
all_later_gauges_to_clean_up_on_shutdown,
|
||||
register_threadpool,
|
||||
)
|
||||
from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.module_api.callbacks import ModuleApiCallbacks
|
||||
@@ -363,11 +366,36 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
self.datastores = Databases(self.DATASTORE_CLASS, self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
# Register background tasks required by this server. This must be done
|
||||
# somewhat manually due to the background tasks not being registered
|
||||
# unless handlers are instantiated.
|
||||
if self.config.worker.run_background_tasks:
|
||||
self.setup_background_tasks()
|
||||
def __del__(self) -> None:
|
||||
"""
|
||||
Called when an the homeserver is garbage collected.
|
||||
|
||||
Make sure we actually do some clean-up, rather than leak data.
|
||||
"""
|
||||
self.cleanup()
|
||||
|
||||
def cleanup(self) -> None:
|
||||
"""
|
||||
WIP: Clean-up any references to the homeserver and stop any running related
|
||||
processes, timers, loops, replication stream, etc.
|
||||
|
||||
This should be called wherever you care about the HomeServer being completely
|
||||
garbage collected like in tests. It's not necessary to call if you plan to just
|
||||
shut down the whole Python process anyway.
|
||||
|
||||
Can be called multiple times.
|
||||
"""
|
||||
logger.info("Received cleanup request for %s.", self.hostname)
|
||||
|
||||
# TODO: Stop background processes, timers, loops, replication stream, etc.
|
||||
|
||||
# Cleanup metrics associated with the homeserver
|
||||
for later_gauge in all_later_gauges_to_clean_up_on_shutdown.values():
|
||||
later_gauge.unregister_hooks_for_homeserver_instance_id(
|
||||
self.get_instance_id()
|
||||
)
|
||||
|
||||
logger.info("Cleanup complete for %s.", self.hostname)
|
||||
|
||||
def start_listening(self) -> None: # noqa: B027 (no-op by design)
|
||||
"""Start the HTTP, manhole, metrics, etc listeners
|
||||
@@ -376,7 +404,7 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
appropriate listeners.
|
||||
"""
|
||||
|
||||
def setup_background_tasks(self) -> None:
|
||||
def start_background_tasks(self) -> None:
|
||||
"""
|
||||
Some handlers have side effects on instantiation (like registering
|
||||
background updates). This function causes them to be fetched, and
|
||||
|
||||
@@ -54,6 +54,7 @@ from synapse.logging.opentracing import tag_args, trace
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet
|
||||
from synapse.state import v1, v2
|
||||
from synapse.storage.databases.main.event_federation import StateDifference
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.types import StateMap, StrCollection
|
||||
from synapse.types.state import StateFilter
|
||||
@@ -990,17 +991,35 @@ class StateResolutionStore:
|
||||
)
|
||||
|
||||
def get_auth_chain_difference(
|
||||
self, room_id: str, state_sets: List[Set[str]]
|
||||
) -> Awaitable[Set[str]]:
|
||||
"""Given sets of state events figure out the auth chain difference (as
|
||||
self,
|
||||
room_id: str,
|
||||
state_sets: List[Set[str]],
|
||||
conflicted_state: Optional[Set[str]],
|
||||
additional_backwards_reachable_conflicted_events: Optional[Set[str]],
|
||||
) -> Awaitable[StateDifference]:
|
||||
""" "Given sets of state events figure out the auth chain difference (as
|
||||
per state res v2 algorithm).
|
||||
|
||||
This equivalent to fetching the full auth chain for each set of state
|
||||
This is equivalent to fetching the full auth chain for each set of state
|
||||
and returning the events that don't appear in each and every auth
|
||||
chain.
|
||||
|
||||
If conflicted_state is not None, calculate and return the conflicted sub-graph as per
|
||||
state res v2.1. The event IDs in the conflicted state MUST be a subset of the event IDs in
|
||||
state_sets.
|
||||
|
||||
If additional_backwards_reachable_conflicted_events is set, the provided events are included
|
||||
when calculating the conflicted subgraph. This is primarily useful for calculating the
|
||||
subgraph across a combination of persisted and unpersisted events.
|
||||
|
||||
Returns:
|
||||
An awaitable that resolves to a set of event IDs.
|
||||
information on the auth chain difference, and also the conflicted subgraph if
|
||||
conflicted_state is not None
|
||||
"""
|
||||
|
||||
return self.main_store.get_auth_chain_difference(room_id, state_sets)
|
||||
return self.main_store.get_auth_chain_difference_extended(
|
||||
room_id,
|
||||
state_sets,
|
||||
conflicted_state,
|
||||
additional_backwards_reachable_conflicted_events,
|
||||
)
|
||||
|
||||
@@ -39,10 +39,11 @@ from typing import (
|
||||
)
|
||||
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.constants import CREATOR_POWER_LEVEL, EventTypes
|
||||
from synapse.api.errors import AuthError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.events import EventBase
|
||||
from synapse.api.room_versions import RoomVersion, StateResolutionVersions
|
||||
from synapse.events import EventBase, is_creator
|
||||
from synapse.storage.databases.main.event_federation import StateDifference
|
||||
from synapse.types import MutableStateMap, StateMap, StrCollection
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -63,8 +64,12 @@ class StateResolutionStore(Protocol):
|
||||
) -> Awaitable[Dict[str, EventBase]]: ...
|
||||
|
||||
def get_auth_chain_difference(
|
||||
self, room_id: str, state_sets: List[Set[str]]
|
||||
) -> Awaitable[Set[str]]: ...
|
||||
self,
|
||||
room_id: str,
|
||||
state_sets: List[Set[str]],
|
||||
conflicted_state: Optional[Set[str]],
|
||||
additional_backwards_reachable_conflicted_events: Optional[set[str]],
|
||||
) -> Awaitable[StateDifference]: ...
|
||||
|
||||
|
||||
# We want to await to the reactor occasionally during state res when dealing
|
||||
@@ -123,12 +128,17 @@ async def resolve_events_with_store(
|
||||
logger.debug("%d conflicted state entries", len(conflicted_state))
|
||||
logger.debug("Calculating auth chain difference")
|
||||
|
||||
# Also fetch all auth events that appear in only some of the state sets'
|
||||
# auth chains.
|
||||
conflicted_set: Optional[Set[str]] = None
|
||||
if room_version.state_res == StateResolutionVersions.V2_1:
|
||||
# calculate the conflicted subgraph
|
||||
conflicted_set = set(itertools.chain.from_iterable(conflicted_state.values()))
|
||||
auth_diff = await _get_auth_chain_difference(
|
||||
room_id, state_sets, event_map, state_res_store
|
||||
room_id,
|
||||
state_sets,
|
||||
event_map,
|
||||
state_res_store,
|
||||
conflicted_set,
|
||||
)
|
||||
|
||||
full_conflicted_set = set(
|
||||
itertools.chain(
|
||||
itertools.chain.from_iterable(conflicted_state.values()), auth_diff
|
||||
@@ -168,15 +178,26 @@ async def resolve_events_with_store(
|
||||
|
||||
logger.debug("sorted %d power events", len(sorted_power_events))
|
||||
|
||||
# v2.1 starts iterative auth checks from the empty set and not the unconflicted state.
|
||||
# It relies on IAC behaviour which populates the base state with the events from auth_events
|
||||
# if the state tuple is missing from the base state. This ensures the base state is only
|
||||
# populated from auth_events rather than whatever the unconflicted state is (which could be
|
||||
# completely bogus).
|
||||
base_state = (
|
||||
{}
|
||||
if room_version.state_res == StateResolutionVersions.V2_1
|
||||
else unconflicted_state
|
||||
)
|
||||
|
||||
# Now sequentially auth each one
|
||||
resolved_state = await _iterative_auth_checks(
|
||||
clock,
|
||||
room_id,
|
||||
room_version,
|
||||
sorted_power_events,
|
||||
unconflicted_state,
|
||||
event_map,
|
||||
state_res_store,
|
||||
event_ids=sorted_power_events,
|
||||
base_state=base_state,
|
||||
event_map=event_map,
|
||||
state_res_store=state_res_store,
|
||||
)
|
||||
|
||||
logger.debug("resolved power events")
|
||||
@@ -239,13 +260,23 @@ async def _get_power_level_for_sender(
|
||||
event = await _get_event(room_id, event_id, event_map, state_res_store)
|
||||
|
||||
pl = None
|
||||
create = None
|
||||
for aid in event.auth_event_ids():
|
||||
aev = await _get_event(
|
||||
room_id, aid, event_map, state_res_store, allow_none=True
|
||||
)
|
||||
if aev and (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
|
||||
pl = aev
|
||||
break
|
||||
if aev and (aev.type, aev.state_key) == (EventTypes.Create, ""):
|
||||
create = aev
|
||||
|
||||
if event.type != EventTypes.Create:
|
||||
# we should always have a create event
|
||||
assert create is not None
|
||||
|
||||
if create and create.room_version.msc4289_creator_power_enabled:
|
||||
if is_creator(create, event.sender):
|
||||
return CREATOR_POWER_LEVEL
|
||||
|
||||
if pl is None:
|
||||
# Couldn't find power level. Check if they're the creator of the room
|
||||
@@ -286,6 +317,7 @@ async def _get_auth_chain_difference(
|
||||
state_sets: Sequence[StateMap[str]],
|
||||
unpersisted_events: Dict[str, EventBase],
|
||||
state_res_store: StateResolutionStore,
|
||||
conflicted_state: Optional[Set[str]],
|
||||
) -> Set[str]:
|
||||
"""Compare the auth chains of each state set and return the set of events
|
||||
that only appear in some, but not all of the auth chains.
|
||||
@@ -294,11 +326,18 @@ async def _get_auth_chain_difference(
|
||||
state_sets: The input state sets we are trying to resolve across.
|
||||
unpersisted_events: A map from event ID to EventBase containing all unpersisted
|
||||
events involved in this resolution.
|
||||
state_res_store:
|
||||
state_res_store: A way to retrieve events and extract graph information on the auth chains.
|
||||
conflicted_state: which event IDs are conflicted. Used in v2.1 for calculating the conflicted
|
||||
subgraph.
|
||||
|
||||
Returns:
|
||||
The auth difference of the given state sets, as a set of event IDs.
|
||||
The auth difference of the given state sets, as a set of event IDs. Also includes the
|
||||
conflicted subgraph if `conflicted_state` is set.
|
||||
"""
|
||||
is_state_res_v21 = conflicted_state is not None
|
||||
num_conflicted_state = (
|
||||
len(conflicted_state) if conflicted_state is not None else None
|
||||
)
|
||||
|
||||
# The `StateResolutionStore.get_auth_chain_difference` function assumes that
|
||||
# all events passed to it (and their auth chains) have been persisted
|
||||
@@ -318,14 +357,19 @@ async def _get_auth_chain_difference(
|
||||
# the event's auth chain with the events in `unpersisted_events` *plus* their
|
||||
# auth event IDs.
|
||||
events_to_auth_chain: Dict[str, Set[str]] = {}
|
||||
# remember the forward links when doing the graph traversal, we'll need it for v2.1 checks
|
||||
# This is a map from an event to the set of events that contain it as an auth event.
|
||||
event_to_next_event: Dict[str, Set[str]] = {}
|
||||
for event in unpersisted_events.values():
|
||||
chain = {event.event_id}
|
||||
events_to_auth_chain[event.event_id] = chain
|
||||
|
||||
to_search = [event]
|
||||
while to_search:
|
||||
for auth_id in to_search.pop().auth_event_ids():
|
||||
next_event = to_search.pop()
|
||||
for auth_id in next_event.auth_event_ids():
|
||||
chain.add(auth_id)
|
||||
event_to_next_event.setdefault(auth_id, set()).add(next_event.event_id)
|
||||
auth_event = unpersisted_events.get(auth_id)
|
||||
if auth_event:
|
||||
to_search.append(auth_event)
|
||||
@@ -335,6 +379,8 @@ async def _get_auth_chain_difference(
|
||||
#
|
||||
# Note: If there are no `unpersisted_events` (which is the common case), we can do a
|
||||
# much simpler calculation.
|
||||
additional_backwards_reachable_conflicted_events: Set[str] = set()
|
||||
unpersisted_conflicted_events: Set[str] = set()
|
||||
if unpersisted_events:
|
||||
# The list of state sets to pass to the store, where each state set is a set
|
||||
# of the event ids making up the state. This is similar to `state_sets`,
|
||||
@@ -372,7 +418,16 @@ async def _get_auth_chain_difference(
|
||||
)
|
||||
else:
|
||||
set_ids.add(event_id)
|
||||
|
||||
if conflicted_state:
|
||||
for conflicted_event_id in conflicted_state:
|
||||
# presence in this map means it is unpersisted.
|
||||
event_chain = events_to_auth_chain.get(conflicted_event_id)
|
||||
if event_chain is not None:
|
||||
unpersisted_conflicted_events.add(conflicted_event_id)
|
||||
# tell the DB layer that we have some unpersisted conflicted events
|
||||
additional_backwards_reachable_conflicted_events.update(
|
||||
e for e in event_chain if e not in unpersisted_events
|
||||
)
|
||||
# The auth chain difference of the unpersisted events of the state sets
|
||||
# is calculated by taking the difference between the union and
|
||||
# intersections.
|
||||
@@ -384,12 +439,89 @@ async def _get_auth_chain_difference(
|
||||
auth_difference_unpersisted_part = ()
|
||||
state_sets_ids = [set(state_set.values()) for state_set in state_sets]
|
||||
|
||||
difference = await state_res_store.get_auth_chain_difference(
|
||||
room_id, state_sets_ids
|
||||
)
|
||||
difference.update(auth_difference_unpersisted_part)
|
||||
if conflicted_state:
|
||||
# to ensure that conflicted state is a subset of state set IDs, we need to remove UNPERSISTED
|
||||
# conflicted state set ids as we removed them above.
|
||||
conflicted_state = conflicted_state - unpersisted_conflicted_events
|
||||
|
||||
return difference
|
||||
difference = await state_res_store.get_auth_chain_difference(
|
||||
room_id,
|
||||
state_sets_ids,
|
||||
conflicted_state,
|
||||
additional_backwards_reachable_conflicted_events,
|
||||
)
|
||||
difference.auth_difference.update(auth_difference_unpersisted_part)
|
||||
|
||||
# if we're doing v2.1 we may need to add or expand the conflicted subgraph
|
||||
if (
|
||||
is_state_res_v21
|
||||
and difference.conflicted_subgraph is not None
|
||||
and unpersisted_events
|
||||
):
|
||||
# we always include the conflicted events themselves in the subgraph.
|
||||
if conflicted_state:
|
||||
difference.conflicted_subgraph.update(conflicted_state)
|
||||
# we may need to expand the subgraph in the case where the subgraph starts in the DB and
|
||||
# ends in unpersisted events. To do this, we first need to see where the subgraph got up to,
|
||||
# which we can do by finding the intersection between the additional backwards reachable
|
||||
# conflicted events and the conflicted subgraph. Events in both sets mean A) some unpersisted
|
||||
# conflicted event could backwards reach it and B) some persisted conflicted event could forward
|
||||
# reach it.
|
||||
subgraph_frontier = difference.conflicted_subgraph.intersection(
|
||||
additional_backwards_reachable_conflicted_events
|
||||
)
|
||||
# we can now combine the 2 scenarios:
|
||||
# - subgraph starts in DB and ends in unpersisted
|
||||
# - subgraph starts in unpersisted and ends in unpersisted
|
||||
# by expanding the frontier into unpersisted events.
|
||||
# The frontier is currently all persisted events. We want to expand this into unpersisted
|
||||
# events. Mark every forwards reachable event from the frontier in the forwards_conflicted_set
|
||||
# but NOT the backwards conflicted set. This mirrors what the DB layer does but in reverse:
|
||||
# we supplied events which are backwards reachable to the DB and now the DB is providing
|
||||
# forwards reachable events from the DB.
|
||||
forwards_conflicted_set: Set[str] = set()
|
||||
# we include unpersisted conflicted events here to process exclusive unpersisted subgraphs
|
||||
search_queue = subgraph_frontier.union(unpersisted_conflicted_events)
|
||||
while search_queue:
|
||||
frontier_event = search_queue.pop()
|
||||
next_event_ids = event_to_next_event.get(frontier_event, set())
|
||||
search_queue.update(next_event_ids)
|
||||
forwards_conflicted_set.add(frontier_event)
|
||||
|
||||
# we've already calculated the backwards form as this is the auth chain for each
|
||||
# unpersisted conflicted event.
|
||||
backwards_conflicted_set: Set[str] = set()
|
||||
for uce in unpersisted_conflicted_events:
|
||||
backwards_conflicted_set.update(events_to_auth_chain.get(uce, []))
|
||||
|
||||
# the unpersisted conflicted subgraph is the intersection of the backwards/forwards sets
|
||||
conflicted_subgraph_unpersisted_part = backwards_conflicted_set.intersection(
|
||||
forwards_conflicted_set
|
||||
)
|
||||
# print(f"event_to_next_event={event_to_next_event}")
|
||||
# print(f"unpersisted_conflicted_events={unpersisted_conflicted_events}")
|
||||
# print(f"unperssited backwards_conflicted_set={backwards_conflicted_set}")
|
||||
# print(f"unperssited forwards_conflicted_set={forwards_conflicted_set}")
|
||||
difference.conflicted_subgraph.update(conflicted_subgraph_unpersisted_part)
|
||||
|
||||
if difference.conflicted_subgraph:
|
||||
old_events = difference.auth_difference.union(
|
||||
conflicted_state if conflicted_state else set()
|
||||
)
|
||||
additional_events = difference.conflicted_subgraph.difference(old_events)
|
||||
|
||||
logger.debug(
|
||||
"v2.1 %s additional events replayed=%d num_conflicts=%d conflicted_subgraph=%d auth_difference=%d",
|
||||
room_id,
|
||||
len(additional_events),
|
||||
num_conflicted_state,
|
||||
len(difference.conflicted_subgraph),
|
||||
len(difference.auth_difference),
|
||||
)
|
||||
# State res v2.1 includes the conflicted subgraph in the difference
|
||||
return difference.auth_difference.union(difference.conflicted_subgraph)
|
||||
|
||||
return difference.auth_difference
|
||||
|
||||
|
||||
def _seperate(
|
||||
|
||||
@@ -51,7 +51,7 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events.snapshot import EventContext, EventPersistencePair
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
||||
from synapse.logging.opentracing import (
|
||||
@@ -144,7 +144,7 @@ class _PersistEventsTask:
|
||||
|
||||
name: ClassVar[str] = "persist_event_batch" # used for opentracing
|
||||
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]]
|
||||
events_and_contexts: List[EventPersistencePair]
|
||||
backfilled: bool
|
||||
|
||||
def try_merge(self, task: "_EventPersistQueueTask") -> bool:
|
||||
@@ -391,7 +391,7 @@ class EventsPersistenceStorageController:
|
||||
@trace
|
||||
async def persist_events(
|
||||
self,
|
||||
events_and_contexts: Iterable[Tuple[EventBase, EventContext]],
|
||||
events_and_contexts: Iterable[EventPersistencePair],
|
||||
backfilled: bool = False,
|
||||
) -> Tuple[List[EventBase], RoomStreamToken]:
|
||||
"""
|
||||
@@ -414,7 +414,7 @@ class EventsPersistenceStorageController:
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
event_ids: List[str] = []
|
||||
partitioned: Dict[str, List[Tuple[EventBase, EventContext]]] = {}
|
||||
partitioned: Dict[str, List[EventPersistencePair]] = {}
|
||||
for event, ctx in events_and_contexts:
|
||||
partitioned.setdefault(event.room_id, []).append((event, ctx))
|
||||
event_ids.append(event.event_id)
|
||||
@@ -430,7 +430,7 @@ class EventsPersistenceStorageController:
|
||||
set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled))
|
||||
|
||||
async def enqueue(
|
||||
item: Tuple[str, List[Tuple[EventBase, EventContext]]],
|
||||
item: Tuple[str, List[EventPersistencePair]],
|
||||
) -> Dict[str, str]:
|
||||
room_id, evs_ctxs = item
|
||||
return await self._event_persist_queue.add_to_queue(
|
||||
@@ -677,7 +677,7 @@ class EventsPersistenceStorageController:
|
||||
return replaced_events
|
||||
|
||||
async def _calculate_new_forward_extremities_and_state_delta(
|
||||
self, room_id: str, ev_ctx_rm: List[Tuple[EventBase, EventContext]]
|
||||
self, room_id: str, ev_ctx_rm: List[EventPersistencePair]
|
||||
) -> Tuple[Optional[Set[str]], Optional[DeltaState]]:
|
||||
"""Calculates the new forward extremities and state delta for a room
|
||||
given events to persist.
|
||||
@@ -802,7 +802,7 @@ class EventsPersistenceStorageController:
|
||||
async def _calculate_new_extremities(
|
||||
self,
|
||||
room_id: str,
|
||||
event_contexts: List[Tuple[EventBase, EventContext]],
|
||||
event_contexts: List[EventPersistencePair],
|
||||
latest_event_ids: AbstractSet[str],
|
||||
) -> Set[str]:
|
||||
"""Calculates the new forward extremities for a room given events to
|
||||
@@ -862,7 +862,7 @@ class EventsPersistenceStorageController:
|
||||
async def _get_new_state_after_events(
|
||||
self,
|
||||
room_id: str,
|
||||
events_context: List[Tuple[EventBase, EventContext]],
|
||||
events_context: List[EventPersistencePair],
|
||||
old_latest_event_ids: AbstractSet[str],
|
||||
new_latest_event_ids: Set[str],
|
||||
) -> Tuple[Optional[StateMap[str]], Optional[StateMap[str]], Set[str]]:
|
||||
@@ -1039,7 +1039,7 @@ class EventsPersistenceStorageController:
|
||||
new_latest_event_ids: Set[str],
|
||||
resolved_state_group: int,
|
||||
event_id_to_state_group: Dict[str, int],
|
||||
events_context: List[Tuple[EventBase, EventContext]],
|
||||
events_context: List[EventPersistencePair],
|
||||
) -> Set[str]:
|
||||
"""See if we can prune any of the extremities after calculating the
|
||||
resolved state.
|
||||
@@ -1176,7 +1176,7 @@ class EventsPersistenceStorageController:
|
||||
async def _is_server_still_joined(
|
||||
self,
|
||||
room_id: str,
|
||||
ev_ctx_rm: List[Tuple[EventBase, EventContext]],
|
||||
ev_ctx_rm: List[EventPersistencePair],
|
||||
delta: DeltaState,
|
||||
) -> bool:
|
||||
"""Check if the server will still be joined after the given events have
|
||||
|
||||
@@ -61,7 +61,7 @@ from synapse.logging.context import (
|
||||
current_context,
|
||||
make_deferred_yieldable,
|
||||
)
|
||||
from synapse.metrics import SERVER_NAME_LABEL, LaterGauge, register_threadpool
|
||||
from synapse.metrics import SERVER_NAME_LABEL, register_threadpool
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.background_updates import BackgroundUpdater
|
||||
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
|
||||
@@ -611,12 +611,6 @@ class DatabasePool:
|
||||
)
|
||||
|
||||
self.updates = BackgroundUpdater(hs, self)
|
||||
LaterGauge(
|
||||
name="synapse_background_update_status",
|
||||
desc="Background update status",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): self.updates.get_status()},
|
||||
)
|
||||
|
||||
self._previous_txn_total_time = 0.0
|
||||
self._current_txn_total_time = 0.0
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Generic, List, Optional, Type, TypeVar
|
||||
|
||||
from synapse.metrics import SERVER_NAME_LABEL, LaterGauge
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.database import DatabasePool, make_conn
|
||||
from synapse.storage.databases.main.events import PersistEventsStore
|
||||
@@ -40,6 +41,13 @@ logger = logging.getLogger(__name__)
|
||||
DataStoreT = TypeVar("DataStoreT", bound=SQLBaseStore, covariant=True)
|
||||
|
||||
|
||||
background_update_status = LaterGauge(
|
||||
name="synapse_background_update_status",
|
||||
desc="Background update status",
|
||||
labelnames=["database_name", SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
|
||||
class Databases(Generic[DataStoreT]):
|
||||
"""The various databases.
|
||||
|
||||
@@ -143,6 +151,15 @@ class Databases(Generic[DataStoreT]):
|
||||
|
||||
db_conn.close()
|
||||
|
||||
# Track the background update status for each database
|
||||
background_update_status.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {
|
||||
(database.name(), server_name): database.updates.get_status()
|
||||
for database in self.databases
|
||||
},
|
||||
)
|
||||
|
||||
# Sanity check that we have actually configured all the required stores.
|
||||
if not main:
|
||||
raise Exception("No 'main' database configured")
|
||||
|
||||
@@ -114,6 +114,12 @@ _LONGEST_BACKOFF_PERIOD_MILLISECONDS = (
|
||||
assert 0 < _LONGEST_BACKOFF_PERIOD_MILLISECONDS <= ((2**31) - 1)
|
||||
|
||||
|
||||
# We use 2^53-1 as a "very large number", it has no particular
|
||||
# importance other than knowing synapse can support it (given canonical json
|
||||
# requires it).
|
||||
MAX_CHAIN_LENGTH = (2**53) - 1
|
||||
|
||||
|
||||
# All the info we need while iterating the DAG while backfilling
|
||||
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
||||
class BackfillQueueNavigationItem:
|
||||
@@ -123,6 +129,14 @@ class BackfillQueueNavigationItem:
|
||||
type: str
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
||||
class StateDifference:
|
||||
# The event IDs in the auth difference.
|
||||
auth_difference: Set[str]
|
||||
# The event IDs in the conflicted state subgraph. Used in v2.1 only.
|
||||
conflicted_subgraph: Optional[Set[str]]
|
||||
|
||||
|
||||
class _NoChainCoverIndex(Exception):
|
||||
def __init__(self, room_id: str):
|
||||
super().__init__("Unexpectedly no chain cover for events in %s" % (room_id,))
|
||||
@@ -471,17 +485,41 @@ class EventFederationWorkerStore(
|
||||
return results
|
||||
|
||||
async def get_auth_chain_difference(
|
||||
self, room_id: str, state_sets: List[Set[str]]
|
||||
self,
|
||||
room_id: str,
|
||||
state_sets: List[Set[str]],
|
||||
) -> Set[str]:
|
||||
"""Given sets of state events figure out the auth chain difference (as
|
||||
state_diff = await self.get_auth_chain_difference_extended(
|
||||
room_id, state_sets, None, None
|
||||
)
|
||||
return state_diff.auth_difference
|
||||
|
||||
async def get_auth_chain_difference_extended(
|
||||
self,
|
||||
room_id: str,
|
||||
state_sets: List[Set[str]],
|
||||
conflicted_set: Optional[Set[str]],
|
||||
additional_backwards_reachable_conflicted_events: Optional[Set[str]],
|
||||
) -> StateDifference:
|
||||
""" "Given sets of state events figure out the auth chain difference (as
|
||||
per state res v2 algorithm).
|
||||
|
||||
This equivalent to fetching the full auth chain for each set of state
|
||||
This is equivalent to fetching the full auth chain for each set of state
|
||||
and returning the events that don't appear in each and every auth
|
||||
chain.
|
||||
|
||||
If conflicted_set is not None, calculate and return the conflicted sub-graph as per
|
||||
state res v2.1. The event IDs in the conflicted set MUST be a subset of the event IDs in
|
||||
state_sets.
|
||||
|
||||
If additional_backwards_reachable_conflicted_events is set, the provided events are included
|
||||
when calculating the conflicted subgraph. This is primarily useful for calculating the
|
||||
subgraph across a combination of persisted and unpersisted events. The event IDs in this set
|
||||
MUST be a subset of the event IDs in state_sets.
|
||||
|
||||
Returns:
|
||||
The set of the difference in auth chains.
|
||||
information on the auth chain difference, and also the conflicted subgraph if
|
||||
conflicted_set is not None
|
||||
"""
|
||||
|
||||
# Check if we have indexed the room so we can use the chain cover
|
||||
@@ -495,6 +533,8 @@ class EventFederationWorkerStore(
|
||||
self._get_auth_chain_difference_using_cover_index_txn,
|
||||
room_id,
|
||||
state_sets,
|
||||
conflicted_set,
|
||||
additional_backwards_reachable_conflicted_events,
|
||||
)
|
||||
except _NoChainCoverIndex:
|
||||
# For whatever reason we don't actually have a chain cover index
|
||||
@@ -503,25 +543,48 @@ class EventFederationWorkerStore(
|
||||
if not self.tests_allow_no_chain_cover_index:
|
||||
raise
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
# It's been 4 years since we added chain cover, so we expect all rooms to have it.
|
||||
# If they don't, we will error out when trying to do state res v2.1
|
||||
if conflicted_set is not None:
|
||||
raise _NoChainCoverIndex(room_id)
|
||||
|
||||
auth_diff = await self.db_pool.runInteraction(
|
||||
"get_auth_chain_difference",
|
||||
self._get_auth_chain_difference_txn,
|
||||
state_sets,
|
||||
)
|
||||
return StateDifference(auth_difference=auth_diff, conflicted_subgraph=None)
|
||||
|
||||
def _get_auth_chain_difference_using_cover_index_txn(
|
||||
self, txn: LoggingTransaction, room_id: str, state_sets: List[Set[str]]
|
||||
) -> Set[str]:
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
room_id: str,
|
||||
state_sets: List[Set[str]],
|
||||
conflicted_set: Optional[Set[str]] = None,
|
||||
additional_backwards_reachable_conflicted_events: Optional[Set[str]] = None,
|
||||
) -> StateDifference:
|
||||
"""Calculates the auth chain difference using the chain index.
|
||||
|
||||
See docs/auth_chain_difference_algorithm.md for details
|
||||
"""
|
||||
is_state_res_v21 = conflicted_set is not None
|
||||
|
||||
# First we look up the chain ID/sequence numbers for all the events, and
|
||||
# work out the chain/sequence numbers reachable from each state set.
|
||||
|
||||
initial_events = set(state_sets[0]).union(*state_sets[1:])
|
||||
|
||||
if is_state_res_v21:
|
||||
# Sanity check v2.1 fields
|
||||
assert conflicted_set is not None
|
||||
assert conflicted_set.issubset(initial_events)
|
||||
# It's possible for the conflicted_set to be empty if all the conflicts are in
|
||||
# unpersisted events, so we don't assert that conflicted_set has len > 0
|
||||
if additional_backwards_reachable_conflicted_events:
|
||||
assert additional_backwards_reachable_conflicted_events.issubset(
|
||||
initial_events
|
||||
)
|
||||
|
||||
# Map from event_id -> (chain ID, seq no)
|
||||
chain_info: Dict[str, Tuple[int, int]] = {}
|
||||
|
||||
@@ -557,14 +620,14 @@ class EventFederationWorkerStore(
|
||||
events_missing_chain_info = initial_events.difference(chain_info)
|
||||
|
||||
# The result set to return, i.e. the auth chain difference.
|
||||
result: Set[str] = set()
|
||||
auth_difference_result: Set[str] = set()
|
||||
|
||||
if events_missing_chain_info:
|
||||
# For some reason we have events we haven't calculated the chain
|
||||
# index for, so we need to handle those separately. This should only
|
||||
# happen for older rooms where the server doesn't have all the auth
|
||||
# events.
|
||||
result = self._fixup_auth_chain_difference_sets(
|
||||
auth_difference_result = self._fixup_auth_chain_difference_sets(
|
||||
txn,
|
||||
room_id,
|
||||
state_sets=state_sets,
|
||||
@@ -583,6 +646,45 @@ class EventFederationWorkerStore(
|
||||
|
||||
fetch_chain_info(new_events_to_fetch)
|
||||
|
||||
# State Res v2.1 needs extra data structures to calculate the conflicted subgraph which
|
||||
# are outlined below.
|
||||
|
||||
# A subset of chain_info for conflicted events only, as we need to
|
||||
# loop all conflicted chain positions. Map from event_id -> (chain ID, seq no)
|
||||
conflicted_chain_positions: Dict[str, Tuple[int, int]] = {}
|
||||
# For each chain, remember the positions where conflicted events are.
|
||||
# We need this for calculating the forward reachable events.
|
||||
conflicted_chain_to_seq: Dict[int, Set[int]] = {} # chain_id => {seq_num}
|
||||
# A subset of chain_info for additional backwards reachable events only, as we need to
|
||||
# loop all additional backwards reachable events for calculating backwards reachable events.
|
||||
additional_backwards_reachable_positions: Dict[
|
||||
str, Tuple[int, int]
|
||||
] = {} # event_id => (chain_id, seq_num)
|
||||
# These next two fields are critical as the intersection of them is the conflicted subgraph.
|
||||
# We'll populate them when we walk the chain links.
|
||||
# chain_id => max(seq_num) backwards reachable (e.g 4 means 1,2,3,4 are backwards reachable)
|
||||
conflicted_backwards_reachable: Dict[int, int] = {}
|
||||
# chain_id => min(seq_num) forwards reachable (e.g 4 means 4,5,6..n are forwards reachable)
|
||||
conflicted_forwards_reachable: Dict[int, int] = {}
|
||||
|
||||
# populate the v2.1 data structures
|
||||
if is_state_res_v21:
|
||||
assert conflicted_set is not None
|
||||
# provide chain positions for each conflicted event
|
||||
for conflicted_event_id in conflicted_set:
|
||||
(chain_id, seq_num) = chain_info[conflicted_event_id]
|
||||
conflicted_chain_positions[conflicted_event_id] = (chain_id, seq_num)
|
||||
conflicted_chain_to_seq.setdefault(chain_id, set()).add(seq_num)
|
||||
if additional_backwards_reachable_conflicted_events:
|
||||
for (
|
||||
additional_event_id
|
||||
) in additional_backwards_reachable_conflicted_events:
|
||||
(chain_id, seq_num) = chain_info[additional_event_id]
|
||||
additional_backwards_reachable_positions[additional_event_id] = (
|
||||
chain_id,
|
||||
seq_num,
|
||||
)
|
||||
|
||||
# Corresponds to `state_sets`, except as a map from chain ID to max
|
||||
# sequence number reachable from the state set.
|
||||
set_to_chain: List[Dict[int, int]] = []
|
||||
@@ -600,6 +702,8 @@ class EventFederationWorkerStore(
|
||||
|
||||
# (We need to take a copy of `seen_chains` as the function mutates it)
|
||||
for links in self._get_chain_links(txn, set(seen_chains)):
|
||||
# `links` encodes the backwards reachable events _from a single chain_ all the way to
|
||||
# the root of the graph.
|
||||
for chains in set_to_chain:
|
||||
for chain_id in links:
|
||||
if chain_id not in chains:
|
||||
@@ -608,6 +712,87 @@ class EventFederationWorkerStore(
|
||||
_materialize(chain_id, chains[chain_id], links, chains)
|
||||
|
||||
seen_chains.update(chains)
|
||||
if is_state_res_v21:
|
||||
# Apply v2.1 conflicted event reachability checks.
|
||||
#
|
||||
# A <-- B <-- C <-- D <-- E
|
||||
#
|
||||
# Backwards reachable from C = {A,B}
|
||||
# Forwards reachable from C = {D,E}
|
||||
|
||||
# this handles calculating forwards reachable information and updates
|
||||
# conflicted_forwards_reachable.
|
||||
accumulate_forwards_reachable_events(
|
||||
conflicted_forwards_reachable,
|
||||
links,
|
||||
conflicted_chain_positions,
|
||||
)
|
||||
|
||||
# handle backwards reachable information
|
||||
for (
|
||||
conflicted_chain_id,
|
||||
conflicted_chain_seq,
|
||||
) in conflicted_chain_positions.values():
|
||||
if conflicted_chain_id not in links:
|
||||
# This conflicted event does not lie on the path to the root.
|
||||
continue
|
||||
|
||||
# The conflicted chain position itself encodes reachability information
|
||||
# _within_ the chain. Set it now before walking to other links.
|
||||
conflicted_backwards_reachable[conflicted_chain_id] = max(
|
||||
conflicted_chain_seq,
|
||||
conflicted_backwards_reachable.get(conflicted_chain_id, 0),
|
||||
)
|
||||
|
||||
# Build backwards reachability paths. This is the same as what the auth difference
|
||||
# code does. We find which chain the conflicted event
|
||||
# belongs to then walk it backwards to the root. We store reachability info
|
||||
# for all conflicted events in the same map 'conflicted_backwards_reachable'
|
||||
# as we don't care about the paths themselves.
|
||||
_materialize(
|
||||
conflicted_chain_id,
|
||||
conflicted_chain_seq,
|
||||
links,
|
||||
conflicted_backwards_reachable,
|
||||
)
|
||||
# Mark some extra events as backwards reachable. This is used when we have some
|
||||
# unpersisted events and want to know the subgraph across the persisted/unpersisted
|
||||
# boundary:
|
||||
# |
|
||||
# A <-- B <-- C <-|- D <-- E <-- F
|
||||
# persisted | unpersisted
|
||||
#
|
||||
# Assume {B,E} are conflicted, we want to return {B,C,D,E}
|
||||
#
|
||||
# The unpersisted code ensures it passes C as an additional backwards reachable
|
||||
# event. C is NOT a conflicted event, but we do need to consider it as part of
|
||||
# the backwards reachable set. When we then calculate the forwards reachable set
|
||||
# from B, C will be in both the backwards and forwards reachable sets and hence
|
||||
# will be included in the conflicted subgraph.
|
||||
for (
|
||||
additional_chain_id,
|
||||
additional_chain_seq,
|
||||
) in additional_backwards_reachable_positions.values():
|
||||
if additional_chain_id not in links:
|
||||
# The additional backwards reachable event does not lie on the path to the root.
|
||||
continue
|
||||
|
||||
# the additional event chain position itself encodes reachability information.
|
||||
# It means that position and all positions earlier in that chain are backwards reachable
|
||||
# by some unpersisted conflicted event.
|
||||
conflicted_backwards_reachable[additional_chain_id] = max(
|
||||
additional_chain_seq,
|
||||
conflicted_backwards_reachable.get(additional_chain_id, 0),
|
||||
)
|
||||
|
||||
# Now walk the chains back, marking backwards reachable events.
|
||||
# This is the same thing we do for auth difference / conflicted events.
|
||||
_materialize(
|
||||
additional_chain_id, # walk all links back, marking them as backwards reachable
|
||||
additional_chain_seq,
|
||||
links,
|
||||
conflicted_backwards_reachable,
|
||||
)
|
||||
|
||||
# Now for each chain we figure out the maximum sequence number reachable
|
||||
# from *any* state set and the minimum sequence number reachable from
|
||||
@@ -616,7 +801,7 @@ class EventFederationWorkerStore(
|
||||
|
||||
# Mapping from chain ID to the range of sequence numbers that should be
|
||||
# pulled from the database.
|
||||
chain_to_gap: Dict[int, Tuple[int, int]] = {}
|
||||
auth_diff_chain_to_gap: Dict[int, Tuple[int, int]] = {}
|
||||
|
||||
for chain_id in seen_chains:
|
||||
min_seq_no = min(chains.get(chain_id, 0) for chains in set_to_chain)
|
||||
@@ -629,15 +814,76 @@ class EventFederationWorkerStore(
|
||||
for seq_no in range(min_seq_no + 1, max_seq_no + 1):
|
||||
event_id = chain_to_event.get(chain_id, {}).get(seq_no)
|
||||
if event_id:
|
||||
result.add(event_id)
|
||||
auth_difference_result.add(event_id)
|
||||
else:
|
||||
chain_to_gap[chain_id] = (min_seq_no, max_seq_no)
|
||||
auth_diff_chain_to_gap[chain_id] = (min_seq_no, max_seq_no)
|
||||
break
|
||||
|
||||
if not chain_to_gap:
|
||||
# If there are no gaps to fetch, we're done!
|
||||
return result
|
||||
conflicted_subgraph_result: Set[str] = set()
|
||||
# Mapping from chain ID to the range of sequence numbers that should be
|
||||
# pulled from the database.
|
||||
conflicted_subgraph_chain_to_gap: Dict[int, Tuple[int, int]] = {}
|
||||
if is_state_res_v21:
|
||||
# also include the conflicted subgraph using backward/forward reachability info from all
|
||||
# the conflicted events. To calculate this, we want to extract the intersection between
|
||||
# the backwards and forwards reachability sets, e.g:
|
||||
# A <- B <- C <- D <- E
|
||||
# Assume B and D are conflicted so we want {C} as the conflicted subgraph.
|
||||
# B_backwards={A}, B_forwards={C,D,E}
|
||||
# D_backwards={A,B,C} D_forwards={E}
|
||||
# ALL_backwards={A,B,C} ALL_forwards={C,D,E}
|
||||
# Intersection(ALL_backwards, ALL_forwards) = {C}
|
||||
#
|
||||
# It's worth noting that once we have the ALL_ sets, we no longer care about the paths.
|
||||
# We're dealing with chains and not singular events, but we've already got the ALL_ sets.
|
||||
# As such, we can inspect each chain in isolation and check for overlapping sequence
|
||||
# numbers:
|
||||
# 1,2,3,4,5 Seq Num
|
||||
# Chain N [A,B,C,D,E]
|
||||
#
|
||||
# if (N,4) is in the backwards set and (N,2) is in the forwards set, then the
|
||||
# intersection is events between 2 < 4. We will include the conflicted events themselves
|
||||
# in the subgraph, but they will already be, hence the full set of events is {B,C,D}.
|
||||
for chain_id, backwards_seq_num in conflicted_backwards_reachable.items():
|
||||
forwards_seq_num = conflicted_forwards_reachable.get(chain_id)
|
||||
if forwards_seq_num is None:
|
||||
continue # this chain isn't in both sets so can't intersect
|
||||
if forwards_seq_num > backwards_seq_num:
|
||||
continue # this chain is in both sets but they don't overap
|
||||
for seq_no in range(
|
||||
forwards_seq_num, backwards_seq_num + 1
|
||||
): # inclusive of both
|
||||
event_id = chain_to_event.get(chain_id, {}).get(seq_no)
|
||||
if event_id:
|
||||
conflicted_subgraph_result.add(event_id)
|
||||
else:
|
||||
conflicted_subgraph_chain_to_gap[chain_id] = (
|
||||
# _fetch_event_ids_from_chains_txn is exclusive of the min value
|
||||
forwards_seq_num - 1,
|
||||
backwards_seq_num,
|
||||
)
|
||||
break
|
||||
|
||||
if auth_diff_chain_to_gap:
|
||||
auth_difference_result.update(
|
||||
self._fetch_event_ids_from_chains_txn(txn, auth_diff_chain_to_gap)
|
||||
)
|
||||
if conflicted_subgraph_chain_to_gap:
|
||||
conflicted_subgraph_result.update(
|
||||
self._fetch_event_ids_from_chains_txn(
|
||||
txn, conflicted_subgraph_chain_to_gap
|
||||
)
|
||||
)
|
||||
|
||||
return StateDifference(
|
||||
auth_difference=auth_difference_result,
|
||||
conflicted_subgraph=conflicted_subgraph_result,
|
||||
)
|
||||
|
||||
def _fetch_event_ids_from_chains_txn(
|
||||
self, txn: LoggingTransaction, chains: Dict[int, Tuple[int, int]]
|
||||
) -> Set[str]:
|
||||
result: Set[str] = set()
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
# We can use `execute_values` to efficiently fetch the gaps when
|
||||
# using postgres.
|
||||
@@ -651,7 +897,7 @@ class EventFederationWorkerStore(
|
||||
|
||||
args = [
|
||||
(chain_id, min_no, max_no)
|
||||
for chain_id, (min_no, max_no) in chain_to_gap.items()
|
||||
for chain_id, (min_no, max_no) in chains.items()
|
||||
]
|
||||
|
||||
rows = txn.execute_values(sql, args)
|
||||
@@ -662,10 +908,9 @@ class EventFederationWorkerStore(
|
||||
SELECT event_id FROM event_auth_chains
|
||||
WHERE chain_id = ? AND ? < sequence_number AND sequence_number <= ?
|
||||
"""
|
||||
for chain_id, (min_no, max_no) in chain_to_gap.items():
|
||||
for chain_id, (min_no, max_no) in chains.items():
|
||||
txn.execute(sql, (chain_id, min_no, max_no))
|
||||
result.update(r for (r,) in txn)
|
||||
|
||||
return result
|
||||
|
||||
def _fixup_auth_chain_difference_sets(
|
||||
@@ -2165,6 +2410,7 @@ def _materialize(
|
||||
origin_sequence_number: int,
|
||||
links: Dict[int, List[Tuple[int, int, int]]],
|
||||
materialized: Dict[int, int],
|
||||
backwards: bool = True,
|
||||
) -> None:
|
||||
"""Helper function for fetching auth chain links. For a given origin chain
|
||||
ID / sequence number and a dictionary of links, updates the materialized
|
||||
@@ -2181,6 +2427,7 @@ def _materialize(
|
||||
target sequence number.
|
||||
materialized: dict to update with new reachability information, as a
|
||||
map from chain ID to max sequence number reachable.
|
||||
backwards: If True, walks backwards down the chains. If False, walks forwards from the chains.
|
||||
"""
|
||||
|
||||
# Do a standard graph traversal.
|
||||
@@ -2195,12 +2442,104 @@ def _materialize(
|
||||
target_chain_id,
|
||||
target_sequence_number,
|
||||
) in chain_links:
|
||||
# Ignore any links that are higher up the chain
|
||||
if sequence_number > s:
|
||||
continue
|
||||
if backwards:
|
||||
# Ignore any links that are higher up the chain
|
||||
if sequence_number > s:
|
||||
continue
|
||||
|
||||
# Check if we have already visited the target chain before, if so we
|
||||
# can skip it.
|
||||
if materialized.get(target_chain_id, 0) < target_sequence_number:
|
||||
stack.append((target_chain_id, target_sequence_number))
|
||||
materialized[target_chain_id] = target_sequence_number
|
||||
# Check if we have already visited the target chain before, if so we
|
||||
# can skip it.
|
||||
if materialized.get(target_chain_id, 0) < target_sequence_number:
|
||||
stack.append((target_chain_id, target_sequence_number))
|
||||
materialized[target_chain_id] = target_sequence_number
|
||||
else:
|
||||
# Ignore any links that are lower down the chain.
|
||||
if sequence_number < s:
|
||||
continue
|
||||
# Check if we have already visited the target chain before, if so we
|
||||
# can skip it.
|
||||
if (
|
||||
materialized.get(target_chain_id, MAX_CHAIN_LENGTH)
|
||||
> target_sequence_number
|
||||
):
|
||||
stack.append((target_chain_id, target_sequence_number))
|
||||
materialized[target_chain_id] = target_sequence_number
|
||||
|
||||
|
||||
def _generate_forward_links(
|
||||
links: Dict[int, List[Tuple[int, int, int]]],
|
||||
) -> Dict[int, List[Tuple[int, int, int]]]:
|
||||
"""Reverse the input links from the given backwards links"""
|
||||
new_links: Dict[int, List[Tuple[int, int, int]]] = {}
|
||||
for origin_chain_id, chain_links in links.items():
|
||||
for origin_seq_num, target_chain_id, target_seq_num in chain_links:
|
||||
new_links.setdefault(target_chain_id, []).append(
|
||||
(target_seq_num, origin_chain_id, origin_seq_num)
|
||||
)
|
||||
return new_links
|
||||
|
||||
|
||||
def accumulate_forwards_reachable_events(
|
||||
conflicted_forwards_reachable: Dict[int, int],
|
||||
back_links: Dict[int, List[Tuple[int, int, int]]],
|
||||
conflicted_chain_positions: Dict[str, Tuple[int, int]],
|
||||
) -> None:
|
||||
"""Accumulate new forwards reachable events using the back_links provided.
|
||||
|
||||
Accumulating forwards reachable information is quite different from backwards reachable information
|
||||
because _get_chain_links returns the entire linkage information for backwards reachable events,
|
||||
but not _forwards_ reachable events. We are only interested in the forwards reachable information
|
||||
that is encoded in the backwards reachable links, so we can just invert all the operations we do
|
||||
for backwards reachable events to calculate a subset of forwards reachable information. The
|
||||
caveat with this approach is that it is a _subset_. This means new back_links may encode new
|
||||
forwards reachable information which we also need. Consider this scenario:
|
||||
|
||||
A <-- B <-- C <--- D <-- E <-- F Chain 1
|
||||
|
|
||||
`----- G <-- H <-- I Chain 2
|
||||
|
|
||||
`---- J <-- K Chain 3
|
||||
|
||||
Now consider what happens when B is a conflicted event. _get_chain_links returns the conflicted
|
||||
chain and ALL links heading towards the root of the graph. This means we will know the
|
||||
Chain 1 to Chain 2 link via C (as all links for the chain are returned, not strictly ones with
|
||||
a lower sequence number), but we will NOT know the Chain 2 to Chain 3 link via H. We can be
|
||||
blissfully unaware of Chain 3 entirely, if and only if there isn't some other conflicted event
|
||||
on that chain. Consider what happens when K is /also/ conflicted. _get_chain_links will generate
|
||||
two iterations: one for B and one for K. It's important that we re-evaluate the forwards reachable
|
||||
information for B to include Chain 3 when we process the K iteration, hence we are "accumulating"
|
||||
forwards reachability information.
|
||||
|
||||
NB: We don't consider 'additional backwards reachable events' here because they have no effect
|
||||
on forwards reachability calculations, only backwards.
|
||||
|
||||
Args:
|
||||
conflicted_forwards_reachable: The materialised dict of forwards reachable information.
|
||||
The output to this function are stored here.
|
||||
back_links: One iteration of _get_chain_links which encodes backwards reachable information.
|
||||
conflicted_chain_positions: The conflicted events.
|
||||
"""
|
||||
# links go backwards but we want them to go forwards as well for v2.1
|
||||
fwd_links = _generate_forward_links(back_links)
|
||||
|
||||
# for each conflicted event, accumulate forwards reachability information
|
||||
for (
|
||||
conflicted_chain_id,
|
||||
conflicted_chain_seq,
|
||||
) in conflicted_chain_positions.values():
|
||||
# the conflicted event itself encodes reachability information
|
||||
# e.g if D was conflicted, it encodes E,F as forwards reachable.
|
||||
conflicted_forwards_reachable[conflicted_chain_id] = min(
|
||||
conflicted_chain_seq,
|
||||
conflicted_forwards_reachable.get(conflicted_chain_id, MAX_CHAIN_LENGTH),
|
||||
)
|
||||
# Walk from the conflicted event forwards to explore the links.
|
||||
# This function checks if we've visited the chain before and skips reprocessing, so this
|
||||
# does not repeatedly traverse the graph.
|
||||
_materialize(
|
||||
conflicted_chain_id,
|
||||
conflicted_chain_seq,
|
||||
fwd_links,
|
||||
conflicted_forwards_reachable,
|
||||
backwards=False,
|
||||
)
|
||||
|
||||
@@ -51,8 +51,13 @@ from synapse.api.constants import (
|
||||
)
|
||||
from synapse.api.errors import PartialStateConflictError
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.events import EventBase, StrippedStateEvent, relation_from_event
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events import (
|
||||
EventBase,
|
||||
StrippedStateEvent,
|
||||
is_creator,
|
||||
relation_from_event,
|
||||
)
|
||||
from synapse.events.snapshot import EventPersistencePair
|
||||
from synapse.events.utils import parse_stripped_state_event
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
@@ -269,7 +274,7 @@ class PersistEventsStore:
|
||||
async def _persist_events_and_state_updates(
|
||||
self,
|
||||
room_id: str,
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
events_and_contexts: List[EventPersistencePair],
|
||||
*,
|
||||
state_delta_for_room: Optional[DeltaState],
|
||||
new_forward_extremities: Optional[Set[str]],
|
||||
@@ -481,17 +486,27 @@ class PersistEventsStore:
|
||||
pl_id = state[(EventTypes.PowerLevels, "")]
|
||||
pl_event = await self.store.get_event(pl_id, allow_none=True)
|
||||
|
||||
if pl_event is None:
|
||||
# per the spec, if a power level event isn't in the room, grant the creator
|
||||
# level 100 and all other users 0
|
||||
create_id = state[(EventTypes.Create, "")]
|
||||
create_event = await self.store.get_event(create_id, allow_none=True)
|
||||
if create_event is None:
|
||||
# not sure how this would happen but if it does then just deny the redaction
|
||||
logger.warning("No create event found for room %s", event.room_id)
|
||||
return False
|
||||
if create_event.sender == event.sender:
|
||||
create_id = state[(EventTypes.Create, "")]
|
||||
create_event = await self.store.get_event(create_id, allow_none=True)
|
||||
|
||||
if create_event is None:
|
||||
# not sure how this would happen but if it does then just deny the redaction
|
||||
logger.warning("No create event found for room %s", event.room_id)
|
||||
return False
|
||||
|
||||
if create_event.room_version.msc4289_creator_power_enabled:
|
||||
# per the spec, grant the creator infinite power level and all other users 0
|
||||
if is_creator(create_event, event.sender):
|
||||
return True
|
||||
if pl_event is None:
|
||||
# per the spec, users other than the room creator have power level
|
||||
# 0, which is less than the default to redact events (50).
|
||||
return False
|
||||
else:
|
||||
# per the spec, if a power level event isn't in the room, grant the creator
|
||||
# level 100 (the default redaction level is 50) and all other users 0
|
||||
if pl_event is None:
|
||||
return create_event.sender == event.sender
|
||||
|
||||
assert pl_event is not None
|
||||
sender_level = pl_event.content.get("users", {}).get(event.sender)
|
||||
@@ -517,7 +532,7 @@ class PersistEventsStore:
|
||||
async def _calculate_sliding_sync_table_changes(
|
||||
self,
|
||||
room_id: str,
|
||||
events_and_contexts: Sequence[Tuple[EventBase, EventContext]],
|
||||
events_and_contexts: Sequence[EventPersistencePair],
|
||||
delta_state: DeltaState,
|
||||
) -> SlidingSyncTableChanges:
|
||||
"""
|
||||
@@ -1001,7 +1016,7 @@ class PersistEventsStore:
|
||||
txn: LoggingTransaction,
|
||||
*,
|
||||
room_id: str,
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
events_and_contexts: List[EventPersistencePair],
|
||||
inhibit_local_membership_updates: bool,
|
||||
state_delta_for_room: Optional[DeltaState],
|
||||
new_forward_extremities: Optional[Set[str]],
|
||||
@@ -1651,7 +1666,7 @@ class PersistEventsStore:
|
||||
def _persist_transaction_ids_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
events_and_contexts: List[EventPersistencePair],
|
||||
) -> None:
|
||||
"""Persist the mapping from transaction IDs to event IDs (if defined)."""
|
||||
|
||||
@@ -2301,7 +2316,7 @@ class PersistEventsStore:
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
room_id: str,
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
events_and_contexts: List[EventPersistencePair],
|
||||
) -> None:
|
||||
"""
|
||||
Update the latest `event_stream_ordering`/`bump_stamp` columns in the
|
||||
@@ -2441,8 +2456,8 @@ class PersistEventsStore:
|
||||
|
||||
@classmethod
|
||||
def _filter_events_and_contexts_for_duplicates(
|
||||
cls, events_and_contexts: List[Tuple[EventBase, EventContext]]
|
||||
) -> List[Tuple[EventBase, EventContext]]:
|
||||
cls, events_and_contexts: List[EventPersistencePair]
|
||||
) -> List[EventPersistencePair]:
|
||||
"""Ensure that we don't have the same event twice.
|
||||
|
||||
Pick the earliest non-outlier if there is one, else the earliest one.
|
||||
@@ -2453,9 +2468,7 @@ class PersistEventsStore:
|
||||
Returns:
|
||||
filtered list
|
||||
"""
|
||||
new_events_and_contexts: OrderedDict[str, Tuple[EventBase, EventContext]] = (
|
||||
OrderedDict()
|
||||
)
|
||||
new_events_and_contexts: OrderedDict[str, EventPersistencePair] = OrderedDict()
|
||||
for event, context in events_and_contexts:
|
||||
prev_event_context = new_events_and_contexts.get(event.event_id)
|
||||
if prev_event_context:
|
||||
@@ -2473,7 +2486,7 @@ class PersistEventsStore:
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
room_id: str,
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
events_and_contexts: List[EventPersistencePair],
|
||||
) -> None:
|
||||
"""Update min_depth for each room
|
||||
|
||||
@@ -2515,8 +2528,8 @@ class PersistEventsStore:
|
||||
def _update_outliers_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
) -> List[Tuple[EventBase, EventContext]]:
|
||||
events_and_contexts: List[EventPersistencePair],
|
||||
) -> List[EventPersistencePair]:
|
||||
"""Update any outliers with new event info.
|
||||
|
||||
This turns outliers into ex-outliers (unless the new event was rejected), and
|
||||
@@ -2623,7 +2636,7 @@ class PersistEventsStore:
|
||||
def _store_event_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
events_and_contexts: Collection[Tuple[EventBase, EventContext]],
|
||||
events_and_contexts: Collection[EventPersistencePair],
|
||||
) -> None:
|
||||
"""Insert new events into the event, event_json, redaction and
|
||||
state_events tables.
|
||||
@@ -2727,8 +2740,8 @@ class PersistEventsStore:
|
||||
def _store_rejected_events_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
) -> List[Tuple[EventBase, EventContext]]:
|
||||
events_and_contexts: List[EventPersistencePair],
|
||||
) -> List[EventPersistencePair]:
|
||||
"""Add rows to the 'rejections' table for received events which were
|
||||
rejected
|
||||
|
||||
@@ -2755,8 +2768,8 @@ class PersistEventsStore:
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
*,
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
all_events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
events_and_contexts: List[EventPersistencePair],
|
||||
all_events_and_contexts: List[EventPersistencePair],
|
||||
inhibit_local_membership_updates: bool = False,
|
||||
) -> None:
|
||||
"""Update all the miscellaneous tables for new events
|
||||
@@ -2850,7 +2863,7 @@ class PersistEventsStore:
|
||||
def _add_to_cache(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
events_and_contexts: List[EventPersistencePair],
|
||||
) -> None:
|
||||
to_prefill: List[EventCacheEntry] = []
|
||||
|
||||
@@ -3323,8 +3336,8 @@ class PersistEventsStore:
|
||||
def _set_push_actions_for_event_and_users_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
all_events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
events_and_contexts: List[EventPersistencePair],
|
||||
all_events_and_contexts: List[EventPersistencePair],
|
||||
) -> None:
|
||||
"""Handles moving push actions from staging table to main
|
||||
event_push_actions table for all events in `events_and_contexts`.
|
||||
@@ -3407,7 +3420,7 @@ class PersistEventsStore:
|
||||
def _store_event_state_mappings_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
events_and_contexts: Collection[Tuple[EventBase, EventContext]],
|
||||
events_and_contexts: Collection[EventPersistencePair],
|
||||
) -> None:
|
||||
"""
|
||||
Raises:
|
||||
|
||||
@@ -81,6 +81,7 @@ from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
make_tuple_in_list_sql_clause,
|
||||
)
|
||||
from synapse.storage.types import Cursor
|
||||
from synapse.storage.util.id_generators import (
|
||||
@@ -1337,6 +1338,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
fetched_event_ids: Set[str] = set()
|
||||
fetched_events: Dict[str, _EventRow] = {}
|
||||
|
||||
@trace
|
||||
async def _fetch_event_ids_and_get_outstanding_redactions(
|
||||
event_ids_to_fetch: Collection[str],
|
||||
) -> Collection[str]:
|
||||
@@ -1344,6 +1346,10 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
Fetch all of the given event_ids and return any associated redaction event_ids
|
||||
that we still need to fetch in the next iteration.
|
||||
"""
|
||||
set_tag(
|
||||
SynapseTags.FUNC_ARG_PREFIX + "event_ids_to_fetch.length",
|
||||
str(len(event_ids_to_fetch)),
|
||||
)
|
||||
row_map = await self._enqueue_events(event_ids_to_fetch)
|
||||
|
||||
# we need to recursively fetch any redactions of those events
|
||||
@@ -1617,21 +1623,28 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
# likely that some of these events may be for the same room/user combo, in
|
||||
# which case we don't need to do redundant queries
|
||||
to_check_set = set(to_check)
|
||||
for room_and_user in to_check_set:
|
||||
room_redactions_sql = "SELECT redacting_event_id, redact_end_ordering FROM room_ban_redactions WHERE room_id = ? and user_id = ?"
|
||||
txn.execute(room_redactions_sql, room_and_user)
|
||||
|
||||
res = txn.fetchone()
|
||||
# we have a redaction for a room, user_id combo - apply it to matching events
|
||||
if not res:
|
||||
continue
|
||||
room_redaction_sql = "SELECT room_id, user_id, redacting_event_id, redact_end_ordering FROM room_ban_redactions WHERE "
|
||||
(
|
||||
in_list_clause,
|
||||
room_redaction_args,
|
||||
) = make_tuple_in_list_sql_clause(
|
||||
self.database_engine, ("room_id", "user_id"), to_check_set
|
||||
)
|
||||
txn.execute(room_redaction_sql + in_list_clause, room_redaction_args)
|
||||
for (
|
||||
returned_room_id,
|
||||
returned_user_id,
|
||||
redacting_event_id,
|
||||
redact_end_ordering,
|
||||
) in txn:
|
||||
for e_row in events:
|
||||
e_json = json.loads(e_row.json)
|
||||
room_id = e_json.get("room_id")
|
||||
user_id = e_json.get("sender")
|
||||
room_and_user = (returned_room_id, returned_user_id)
|
||||
# check if we have a redaction match for this room, user combination
|
||||
if room_and_user != (room_id, user_id):
|
||||
continue
|
||||
redacting_event_id, redact_end_ordering = res
|
||||
if redact_end_ordering:
|
||||
# Avoid redacting any events arriving *after* the membership event which
|
||||
# ends an active redaction - note that this will always redact
|
||||
|
||||
@@ -33,6 +33,73 @@ from synapse.types import RoomStreamToken
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
purge_room_tables_with_event_id_index = (
|
||||
"event_auth",
|
||||
"event_edges",
|
||||
"event_json",
|
||||
"event_push_actions_staging",
|
||||
"event_relations",
|
||||
"event_to_state_groups",
|
||||
"event_auth_chains",
|
||||
"event_auth_chain_to_calculate",
|
||||
"redactions",
|
||||
"rejections",
|
||||
"state_events",
|
||||
)
|
||||
"""
|
||||
Tables which lack an index on `room_id` but have one on `event_id`
|
||||
"""
|
||||
|
||||
purge_room_tables_with_room_id_column = (
|
||||
"current_state_events",
|
||||
"destination_rooms",
|
||||
"event_backward_extremities",
|
||||
"event_forward_extremities",
|
||||
"event_push_actions",
|
||||
"event_search",
|
||||
"event_failed_pull_attempts",
|
||||
# Note: the partial state tables have foreign keys between each other, and to
|
||||
# `events` and `rooms`. We need to delete from them in the right order.
|
||||
"partial_state_events",
|
||||
"partial_state_rooms_servers",
|
||||
"partial_state_rooms",
|
||||
# Note: the _membership(s) tables have foreign keys to the `events` table
|
||||
# so must be deleted first.
|
||||
"local_current_membership",
|
||||
"room_memberships",
|
||||
# Note: the sliding_sync_ tables have foreign keys to the `events` table
|
||||
# so must be deleted first.
|
||||
"sliding_sync_joined_rooms",
|
||||
"sliding_sync_membership_snapshots",
|
||||
"events",
|
||||
"federation_inbound_events_staging",
|
||||
"receipts_graph",
|
||||
"receipts_linearized",
|
||||
"room_aliases",
|
||||
"room_depth",
|
||||
"room_stats_state",
|
||||
"room_stats_current",
|
||||
"room_stats_earliest_token",
|
||||
"stream_ordering_to_exterm",
|
||||
"users_in_public_rooms",
|
||||
"users_who_share_private_rooms",
|
||||
# no useful index, but let's clear them anyway
|
||||
"appservice_room_list",
|
||||
"e2e_room_keys",
|
||||
"event_push_summary",
|
||||
"pusher_throttle",
|
||||
"room_account_data",
|
||||
"room_tags",
|
||||
# "rooms" happens last, to keep the foreign keys in the other tables
|
||||
# happy
|
||||
"rooms",
|
||||
)
|
||||
"""
|
||||
The tables with a `room_id` column regardless of whether they have a useful index on
|
||||
`room_id`.
|
||||
"""
|
||||
|
||||
|
||||
class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||
async def purge_history(
|
||||
self, room_id: str, token: str, delete_local_events: bool
|
||||
@@ -398,20 +465,8 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||
referenced_chain_id_tuples,
|
||||
)
|
||||
|
||||
# Now we delete tables which lack an index on room_id but have one on event_id
|
||||
for table in (
|
||||
"event_auth",
|
||||
"event_edges",
|
||||
"event_json",
|
||||
"event_push_actions_staging",
|
||||
"event_relations",
|
||||
"event_to_state_groups",
|
||||
"event_auth_chains",
|
||||
"event_auth_chain_to_calculate",
|
||||
"redactions",
|
||||
"rejections",
|
||||
"state_events",
|
||||
):
|
||||
# Now we delete tables which lack an index on `room_id` but have one on `event_id`
|
||||
for table in purge_room_tables_with_event_id_index:
|
||||
logger.info("[purge] removing from %s", table)
|
||||
|
||||
txn.execute(
|
||||
@@ -424,51 +479,9 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||
(room_id,),
|
||||
)
|
||||
|
||||
# next, the tables with an index on room_id (or no useful index)
|
||||
for table in (
|
||||
"current_state_events",
|
||||
"destination_rooms",
|
||||
"event_backward_extremities",
|
||||
"event_forward_extremities",
|
||||
"event_push_actions",
|
||||
"event_search",
|
||||
"event_failed_pull_attempts",
|
||||
# Note: the partial state tables have foreign keys between each other, and to
|
||||
# `events` and `rooms`. We need to delete from them in the right order.
|
||||
"partial_state_events",
|
||||
"partial_state_rooms_servers",
|
||||
"partial_state_rooms",
|
||||
# Note: the _membership(s) tables have foreign keys to the `events` table
|
||||
# so must be deleted first.
|
||||
"local_current_membership",
|
||||
"room_memberships",
|
||||
# Note: the sliding_sync_ tables have foreign keys to the `events` table
|
||||
# so must be deleted first.
|
||||
"sliding_sync_joined_rooms",
|
||||
"sliding_sync_membership_snapshots",
|
||||
"events",
|
||||
"federation_inbound_events_staging",
|
||||
"receipts_graph",
|
||||
"receipts_linearized",
|
||||
"room_aliases",
|
||||
"room_depth",
|
||||
"room_stats_state",
|
||||
"room_stats_current",
|
||||
"room_stats_earliest_token",
|
||||
"stream_ordering_to_exterm",
|
||||
"users_in_public_rooms",
|
||||
"users_who_share_private_rooms",
|
||||
# no useful index, but let's clear them anyway
|
||||
"appservice_room_list",
|
||||
"e2e_room_keys",
|
||||
"event_push_summary",
|
||||
"pusher_throttle",
|
||||
"room_account_data",
|
||||
"room_tags",
|
||||
# "rooms" happens last, to keep the foreign keys in the other tables
|
||||
# happy
|
||||
"rooms",
|
||||
):
|
||||
# next, the tables with a `room_id` column regardless of whether they have a
|
||||
# useful index on `room_id`
|
||||
for table in purge_room_tables_with_room_id_column:
|
||||
logger.info("[purge] removing from %s", table)
|
||||
txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,))
|
||||
|
||||
|
||||
@@ -84,6 +84,13 @@ _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME = "current_state_events_membership"
|
||||
_POPULATE_PARTICIPANT_BG_UPDATE_BATCH_SIZE = 1000
|
||||
|
||||
|
||||
federation_known_servers_gauge = LaterGauge(
|
||||
name="synapse_federation_known_servers",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
||||
class EventIdMembership:
|
||||
"""Returned by `get_membership_from_event_ids`"""
|
||||
@@ -116,11 +123,9 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
|
||||
1,
|
||||
self._count_known_servers,
|
||||
)
|
||||
LaterGauge(
|
||||
name="synapse_federation_known_servers",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): self._known_servers_count},
|
||||
federation_known_servers_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): self._known_servers_count},
|
||||
)
|
||||
|
||||
@wrap_as_background_process("_count_known_servers")
|
||||
@@ -196,6 +201,19 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
|
||||
retcol="state_key",
|
||||
)
|
||||
|
||||
async def get_invited_users_in_room(self, room_id: str) -> StrCollection:
|
||||
"""Returns a list of users invited to the room."""
|
||||
return await self.db_pool.simple_select_onecol(
|
||||
table="current_state_events",
|
||||
keyvalues={
|
||||
"type": EventTypes.Member,
|
||||
"room_id": room_id,
|
||||
"membership": Membership.INVITE,
|
||||
},
|
||||
retcol="state_key",
|
||||
desc="get_invited_users_in_room",
|
||||
)
|
||||
|
||||
@cached()
|
||||
def get_user_in_room_with_profile(self, room_id: str, user_id: str) -> ProfileInfo:
|
||||
raise NotImplementedError()
|
||||
|
||||
@@ -25,8 +25,7 @@ from typing import (
|
||||
Tuple,
|
||||
)
|
||||
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events.snapshot import EventPersistencePair
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
@@ -228,7 +227,7 @@ class StateDeletionDataStore:
|
||||
|
||||
@contextlib.asynccontextmanager
|
||||
async def persisting_state_group_references(
|
||||
self, event_and_contexts: Collection[Tuple[EventBase, EventContext]]
|
||||
self, event_and_contexts: Collection[EventPersistencePair]
|
||||
) -> AsyncIterator[None]:
|
||||
"""Wraps the persistence of the given events and contexts, ensuring that
|
||||
any state groups referenced still exist and that they don't get deleted
|
||||
|
||||
@@ -355,12 +355,78 @@ class RoomAlias(DomainSpecificString):
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, repr=False)
|
||||
class RoomID(DomainSpecificString):
|
||||
"""Structure representing a room id."""
|
||||
class RoomIdWithDomain(DomainSpecificString):
|
||||
"""Structure representing a room ID with a domain suffix."""
|
||||
|
||||
SIGIL = "!"
|
||||
|
||||
|
||||
# the set of urlsafe base64 characters, no padding.
|
||||
ROOM_ID_PATTERN_DOMAINLESS = re.compile(r"^[A-Za-z0-9\-_]{43}$")
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True, repr=False)
|
||||
class RoomID:
|
||||
"""Structure representing a room id without a domain.
|
||||
There are two forms of room IDs:
|
||||
- "!localpart:domain" used in most room versions prior to MSC4291.
|
||||
- "!event_id_base_64" used in room versions post MSC4291.
|
||||
This class will accept any room ID which meets either of these two criteria.
|
||||
"""
|
||||
|
||||
SIGIL = "!"
|
||||
id: str
|
||||
room_id_with_domain: Optional[RoomIdWithDomain]
|
||||
|
||||
@classmethod
|
||||
def is_valid(cls: Type["RoomID"], s: str) -> bool:
|
||||
if ":" in s:
|
||||
return RoomIdWithDomain.is_valid(s)
|
||||
try:
|
||||
cls.from_string(s)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def get_domain(self) -> Optional[str]:
|
||||
if not self.room_id_with_domain:
|
||||
return None
|
||||
return self.room_id_with_domain.domain
|
||||
|
||||
def to_string(self) -> str:
|
||||
if self.room_id_with_domain:
|
||||
return self.room_id_with_domain.to_string()
|
||||
return self.id
|
||||
|
||||
__repr__ = to_string
|
||||
|
||||
@classmethod
|
||||
def from_string(cls: Type["RoomID"], s: str) -> "RoomID":
|
||||
# sigil check
|
||||
if len(s) < 1 or s[0] != cls.SIGIL:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Expected %s string to start with '%s'" % (cls.__name__, cls.SIGIL),
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
room_id_with_domain: Optional[RoomIdWithDomain] = None
|
||||
if ":" in s:
|
||||
room_id_with_domain = RoomIdWithDomain.from_string(s)
|
||||
else:
|
||||
# MSC4291 room IDs must be valid urlsafe unpadded base64
|
||||
val = s[1:]
|
||||
if not ROOM_ID_PATTERN_DOMAINLESS.match(val):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Expected %s string to be valid urlsafe unpadded base64 '%s'"
|
||||
% (cls.__name__, val),
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
return cls(id=s, room_id_with_domain=room_id_with_domain)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, repr=False)
|
||||
class EventID(DomainSpecificString):
|
||||
"""Structure representing an event ID which is namespaced to a homeserver.
|
||||
|
||||
@@ -84,9 +84,6 @@ class AbstractObservableDeferred(Generic[_T], metaclass=abc.ABCMeta):
|
||||
This returns a brand new deferred that is resolved when the underlying
|
||||
deferred is resolved. Interacting with the returned deferred does not
|
||||
effect the underlying deferred.
|
||||
|
||||
Note that the returned Deferred doesn't follow the Synapse logcontext rules -
|
||||
you will probably want to `make_deferred_yieldable` it.
|
||||
"""
|
||||
...
|
||||
|
||||
@@ -100,11 +97,6 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]):
|
||||
|
||||
Cancelling or otherwise resolving an observer will not affect the original
|
||||
ObservableDeferred.
|
||||
|
||||
NB that it does not attempt to do anything with logcontexts; in general
|
||||
you should probably make_deferred_yieldable the deferreds
|
||||
returned by `observe`, and ensure that the original deferred runs its
|
||||
callbacks in the sentinel logcontext.
|
||||
"""
|
||||
|
||||
__slots__ = ["_deferred", "_observers", "_result"]
|
||||
@@ -861,16 +853,12 @@ def stop_cancellation(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]":
|
||||
"""Prevent a `Deferred` from being cancelled by wrapping it in another `Deferred`.
|
||||
|
||||
Args:
|
||||
deferred: The `Deferred` to protect against cancellation. Must not follow the
|
||||
Synapse logcontext rules.
|
||||
deferred: The `Deferred` to protect against cancellation.
|
||||
|
||||
Returns:
|
||||
A new `Deferred`, which will contain the result of the original `Deferred`.
|
||||
The new `Deferred` will not propagate cancellation through to the original.
|
||||
When cancelled, the new `Deferred` will fail with a `CancelledError`.
|
||||
|
||||
The new `Deferred` will not follow the Synapse logcontext rules and should be
|
||||
wrapped with `make_deferred_yieldable`.
|
||||
"""
|
||||
new_deferred: "defer.Deferred[T]" = defer.Deferred()
|
||||
deferred.chainDeferred(new_deferred)
|
||||
@@ -896,8 +884,7 @@ def delay_cancellation(awaitable: Awaitable[T]) -> Awaitable[T]:
|
||||
resolve with a `CancelledError` until the original awaitable resolves.
|
||||
|
||||
Args:
|
||||
deferred: The coroutine or `Deferred` to protect against cancellation. May
|
||||
optionally follow the Synapse logcontext rules.
|
||||
deferred: The coroutine or `Deferred` to protect against cancellation.
|
||||
|
||||
Returns:
|
||||
A new `Deferred`, which will contain the result of the original coroutine or
|
||||
@@ -906,10 +893,6 @@ def delay_cancellation(awaitable: Awaitable[T]) -> Awaitable[T]:
|
||||
|
||||
When cancelled, the new `Deferred` will wait until the original coroutine or
|
||||
`Deferred` resolves before failing with a `CancelledError`.
|
||||
|
||||
The new `Deferred` will follow the Synapse logcontext rules if `awaitable`
|
||||
follows the Synapse logcontext rules. Otherwise the new `Deferred` should be
|
||||
wrapped with `make_deferred_yieldable`.
|
||||
"""
|
||||
|
||||
# First, convert the awaitable into a `Deferred`.
|
||||
|
||||
@@ -295,9 +295,6 @@ class DeferredCache(Generic[KT, VT]):
|
||||
*original* `value`, (c) any future calls to `get()` will complete with the
|
||||
result from the *new* `value`.
|
||||
|
||||
It is expected that `value` does *not* follow the synapse logcontext rules - ie,
|
||||
if it is incomplete, it runs its callbacks in the sentinel context.
|
||||
|
||||
Args:
|
||||
key: Key to be set
|
||||
value: a deferred which will complete with a result to add to the cache
|
||||
|
||||
@@ -234,11 +234,9 @@ class ResponseCache(Generic[KV]):
|
||||
) -> RV:
|
||||
"""Wrap together a *get* and *set* call, taking care of logcontexts
|
||||
|
||||
First looks up the key in the cache, and if it is present makes it
|
||||
follow the synapse logcontext rules and returns it.
|
||||
First looks up the key in the cache, and if present, returns it.
|
||||
|
||||
Otherwise, makes a call to *callback(*args, **kwargs)*, which should
|
||||
follow the synapse logcontext rules, and adds the result to the cache.
|
||||
Otherwise, makes a call to *callback(*args, **kwargs)* and adds the result to the cache.
|
||||
|
||||
Example usage:
|
||||
|
||||
|
||||
@@ -1,250 +0,0 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2023 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
# Originally licensed under the Apache License, Version 2.0:
|
||||
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
||||
#
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
|
||||
import functools
|
||||
import sys
|
||||
from types import GeneratorType
|
||||
from typing import Any, Callable, Generator, List, TypeVar, cast
|
||||
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
# Tracks if we've already patched inlineCallbacks
|
||||
_already_patched = False
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
P = ParamSpec("P")
|
||||
|
||||
|
||||
def do_patch() -> None:
|
||||
"""
|
||||
Patch defer.inlineCallbacks so that it checks the state of the logcontext on exit
|
||||
"""
|
||||
|
||||
from synapse.logging.context import current_context
|
||||
|
||||
global _already_patched
|
||||
|
||||
orig_inline_callbacks = defer.inlineCallbacks
|
||||
if _already_patched:
|
||||
return
|
||||
|
||||
def new_inline_callbacks(
|
||||
f: Callable[P, Generator["Deferred[object]", object, T]],
|
||||
) -> Callable[P, "Deferred[T]"]:
|
||||
@functools.wraps(f)
|
||||
def wrapped(*args: P.args, **kwargs: P.kwargs) -> "Deferred[T]":
|
||||
start_context = current_context()
|
||||
changes: List[str] = []
|
||||
orig: Callable[P, "Deferred[T]"] = orig_inline_callbacks(
|
||||
_check_yield_points(f, changes)
|
||||
)
|
||||
|
||||
try:
|
||||
res: "Deferred[T]" = orig(*args, **kwargs)
|
||||
except Exception:
|
||||
if current_context() != start_context:
|
||||
for err in changes:
|
||||
print(err, file=sys.stderr)
|
||||
|
||||
err = "%s changed context from %s to %s on exception" % (
|
||||
f,
|
||||
start_context,
|
||||
current_context(),
|
||||
)
|
||||
print(err, file=sys.stderr)
|
||||
raise Exception(err)
|
||||
raise
|
||||
|
||||
if not isinstance(res, Deferred) or res.called:
|
||||
if current_context() != start_context:
|
||||
for err in changes:
|
||||
print(err, file=sys.stderr)
|
||||
|
||||
err = "Completed %s changed context from %s to %s" % (
|
||||
f,
|
||||
start_context,
|
||||
current_context(),
|
||||
)
|
||||
# print the error to stderr because otherwise all we
|
||||
# see in travis-ci is the 500 error
|
||||
print(err, file=sys.stderr)
|
||||
raise Exception(err)
|
||||
return res
|
||||
|
||||
if current_context():
|
||||
err = (
|
||||
"%s returned incomplete deferred in non-sentinel context "
|
||||
"%s (start was %s)"
|
||||
) % (f, current_context(), start_context)
|
||||
print(err, file=sys.stderr)
|
||||
raise Exception(err)
|
||||
|
||||
def check_ctx(r: T) -> T:
|
||||
if current_context() != start_context:
|
||||
for err in changes:
|
||||
print(err, file=sys.stderr)
|
||||
err = "%s completion of %s changed context from %s to %s" % (
|
||||
"Failure" if isinstance(r, Failure) else "Success",
|
||||
f,
|
||||
start_context,
|
||||
current_context(),
|
||||
)
|
||||
print(err, file=sys.stderr)
|
||||
raise Exception(err)
|
||||
return r
|
||||
|
||||
res.addBoth(check_ctx)
|
||||
return res
|
||||
|
||||
return wrapped
|
||||
|
||||
defer.inlineCallbacks = new_inline_callbacks
|
||||
_already_patched = True
|
||||
|
||||
|
||||
def _check_yield_points(
|
||||
f: Callable[P, Generator["Deferred[object]", object, T]],
|
||||
changes: List[str],
|
||||
) -> Callable:
|
||||
"""Wraps a generator that is about to be passed to defer.inlineCallbacks
|
||||
checking that after every yield the log contexts are correct.
|
||||
|
||||
It's perfectly valid for log contexts to change within a function, e.g. due
|
||||
to new Measure blocks, so such changes are added to the given `changes`
|
||||
list instead of triggering an exception.
|
||||
|
||||
Args:
|
||||
f: generator function to wrap
|
||||
changes: A list of strings detailing how the contexts
|
||||
changed within a function.
|
||||
|
||||
Returns:
|
||||
function
|
||||
"""
|
||||
|
||||
from synapse.logging.context import current_context
|
||||
|
||||
@functools.wraps(f)
|
||||
def check_yield_points_inner(
|
||||
*args: P.args, **kwargs: P.kwargs
|
||||
) -> Generator["Deferred[object]", object, T]:
|
||||
gen = f(*args, **kwargs)
|
||||
|
||||
# We only patch if we have a native generator function, as we rely on
|
||||
# `gen.gi_frame`.
|
||||
if not isinstance(gen, GeneratorType):
|
||||
ret = yield from gen
|
||||
return ret
|
||||
|
||||
last_yield_line_no = gen.gi_frame.f_lineno
|
||||
result: Any = None
|
||||
while True:
|
||||
expected_context = current_context()
|
||||
|
||||
try:
|
||||
isFailure = isinstance(result, Failure)
|
||||
if isFailure:
|
||||
d = result.throwExceptionIntoGenerator(gen)
|
||||
else:
|
||||
d = gen.send(result)
|
||||
except StopIteration as e:
|
||||
if current_context() != expected_context:
|
||||
# This happens when the context is lost sometime *after* the
|
||||
# final yield and returning. E.g. we forgot to yield on a
|
||||
# function that returns a deferred.
|
||||
#
|
||||
# We don't raise here as it's perfectly valid for contexts to
|
||||
# change in a function, as long as it sets the correct context
|
||||
# on resolving (which is checked separately).
|
||||
err = (
|
||||
"Function %r returned and changed context from %s to %s,"
|
||||
" in %s between %d and end of func"
|
||||
% (
|
||||
f.__qualname__,
|
||||
expected_context,
|
||||
current_context(),
|
||||
f.__code__.co_filename,
|
||||
last_yield_line_no,
|
||||
)
|
||||
)
|
||||
changes.append(err)
|
||||
# The `StopIteration` contains the return value from the
|
||||
# generator.
|
||||
return cast(T, e.value)
|
||||
|
||||
frame = gen.gi_frame
|
||||
|
||||
if isinstance(d, defer.Deferred) and not d.called:
|
||||
# This happens if we yield on a deferred that doesn't follow
|
||||
# the log context rules without wrapping in a `make_deferred_yieldable`.
|
||||
# We raise here as this should never happen.
|
||||
if current_context():
|
||||
err = (
|
||||
"%s yielded with context %s rather than sentinel,"
|
||||
" yielded on line %d in %s"
|
||||
% (
|
||||
frame.f_code.co_name,
|
||||
current_context(),
|
||||
frame.f_lineno,
|
||||
frame.f_code.co_filename,
|
||||
)
|
||||
)
|
||||
raise Exception(err)
|
||||
|
||||
# the wrapped function yielded a Deferred: yield it back up to the parent
|
||||
# inlineCallbacks().
|
||||
try:
|
||||
result = yield d
|
||||
except Exception:
|
||||
# this will fish an earlier Failure out of the stack where possible, and
|
||||
# thus is preferable to passing in an exception to the Failure
|
||||
# constructor, since it results in less stack-mangling.
|
||||
result = Failure()
|
||||
|
||||
if current_context() != expected_context:
|
||||
# This happens because the context is lost sometime *after* the
|
||||
# previous yield and *after* the current yield. E.g. the
|
||||
# deferred we waited on didn't follow the rules, or we forgot to
|
||||
# yield on a function between the two yield points.
|
||||
#
|
||||
# We don't raise here as its perfectly valid for contexts to
|
||||
# change in a function, as long as it sets the correct context
|
||||
# on resolving (which is checked separately).
|
||||
err = (
|
||||
"%s changed context from %s to %s, happened between lines %d and %d in %s"
|
||||
% (
|
||||
frame.f_code.co_name,
|
||||
expected_context,
|
||||
current_context(),
|
||||
last_yield_line_no,
|
||||
frame.f_lineno,
|
||||
frame.f_code.co_filename,
|
||||
)
|
||||
)
|
||||
changes.append(err)
|
||||
|
||||
last_yield_line_no = frame.f_lineno
|
||||
|
||||
return check_yield_points_inner
|
||||
@@ -131,22 +131,28 @@ def _get_counts_from_rate_limiter_instance(
|
||||
# We track the number of affected hosts per time-period so we can
|
||||
# differentiate one really noisy homeserver from a general
|
||||
# ratelimit tuning problem across the federation.
|
||||
LaterGauge(
|
||||
sleep_affected_hosts_gauge = LaterGauge(
|
||||
name="synapse_rate_limit_sleep_affected_hosts",
|
||||
desc="Number of hosts that had requests put to sleep",
|
||||
labelnames=["rate_limiter_name", SERVER_NAME_LABEL],
|
||||
caller=lambda: _get_counts_from_rate_limiter_instance(
|
||||
)
|
||||
sleep_affected_hosts_gauge.register_hook(
|
||||
homeserver_instance_id=None,
|
||||
hook=lambda: _get_counts_from_rate_limiter_instance(
|
||||
lambda rate_limiter_instance: sum(
|
||||
ratelimiter.should_sleep()
|
||||
for ratelimiter in rate_limiter_instance.ratelimiters.values()
|
||||
)
|
||||
),
|
||||
)
|
||||
LaterGauge(
|
||||
reject_affected_hosts_gauge = LaterGauge(
|
||||
name="synapse_rate_limit_reject_affected_hosts",
|
||||
desc="Number of hosts that had requests rejected",
|
||||
labelnames=["rate_limiter_name", SERVER_NAME_LABEL],
|
||||
caller=lambda: _get_counts_from_rate_limiter_instance(
|
||||
)
|
||||
reject_affected_hosts_gauge.register_hook(
|
||||
homeserver_instance_id=None,
|
||||
hook=lambda: _get_counts_from_rate_limiter_instance(
|
||||
lambda rate_limiter_instance: sum(
|
||||
ratelimiter.should_reject()
|
||||
for ratelimiter in rate_limiter_instance.ratelimiters.values()
|
||||
|
||||
@@ -44,6 +44,13 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
running_tasks_gauge = LaterGauge(
|
||||
name="synapse_scheduler_running_tasks",
|
||||
desc="The number of concurrent running tasks handled by the TaskScheduler",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
|
||||
class TaskScheduler:
|
||||
"""
|
||||
This is a simple task scheduler designed for resumable tasks. Normally,
|
||||
@@ -130,11 +137,9 @@ class TaskScheduler:
|
||||
TaskScheduler.SCHEDULE_INTERVAL_MS,
|
||||
)
|
||||
|
||||
LaterGauge(
|
||||
name="synapse_scheduler_running_tasks",
|
||||
desc="The number of concurrent running tasks handled by the TaskScheduler",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
caller=lambda: {(self.server_name,): len(self._running_tasks)},
|
||||
running_tasks_gauge.register_hook(
|
||||
homeserver_instance_id=hs.get_instance_id(),
|
||||
hook=lambda: {(self.server_name,): len(self._running_tasks)},
|
||||
)
|
||||
|
||||
def register_action(
|
||||
|
||||
@@ -21,9 +21,4 @@
|
||||
|
||||
from twisted.trial import util
|
||||
|
||||
from synapse.util.patch_inline_callbacks import do_patch
|
||||
|
||||
# attempt to do the patch before we load any synapse code
|
||||
do_patch()
|
||||
|
||||
util.DEFAULT_TIMEOUT_DURATION = 20
|
||||
|
||||
@@ -25,11 +25,11 @@ import time
|
||||
from http import HTTPStatus
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
from io import BytesIO
|
||||
from typing import Any, Coroutine, Dict, Generator, Optional, TypeVar, Union
|
||||
from typing import Any, ClassVar, Coroutine, Dict, Generator, Optional, TypeVar, Union
|
||||
from unittest.mock import ANY, AsyncMock, Mock
|
||||
from urllib.parse import parse_qs
|
||||
|
||||
from parameterized import parameterized_class
|
||||
from parameterized.parameterized import parameterized_class
|
||||
from signedjson.key import (
|
||||
encode_verify_key_base64,
|
||||
generate_signing_key,
|
||||
@@ -46,7 +46,6 @@ from synapse.api.errors import (
|
||||
Codes,
|
||||
HttpResponseException,
|
||||
InvalidClientTokenError,
|
||||
OAuthInsufficientScopeError,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.appservice import ApplicationService
|
||||
@@ -78,11 +77,7 @@ JWKS_URI = ISSUER + ".well-known/jwks.json"
|
||||
INTROSPECTION_ENDPOINT = ISSUER + "introspect"
|
||||
|
||||
SYNAPSE_ADMIN_SCOPE = "urn:synapse:admin:*"
|
||||
MATRIX_USER_SCOPE = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
MATRIX_GUEST_SCOPE = "urn:matrix:org.matrix.msc2967.client:api:guest"
|
||||
MATRIX_DEVICE_SCOPE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
DEVICE = "AABBCCDD"
|
||||
MATRIX_DEVICE_SCOPE = MATRIX_DEVICE_SCOPE_PREFIX + DEVICE
|
||||
SUBJECT = "abc-def-ghi"
|
||||
USERNAME = "test-user"
|
||||
USER_ID = "@" + USERNAME + ":" + SERVER_NAME
|
||||
@@ -112,7 +107,24 @@ async def get_json(url: str) -> JsonDict:
|
||||
|
||||
|
||||
@skip_unless(HAS_AUTHLIB, "requires authlib")
|
||||
@parameterized_class(
|
||||
("device_scope_prefix", "api_scope"),
|
||||
[
|
||||
("urn:matrix:client:device:", "urn:matrix:client:api:*"),
|
||||
(
|
||||
"urn:matrix:org.matrix.msc2967.client:device:",
|
||||
"urn:matrix:org.matrix.msc2967.client:api:*",
|
||||
),
|
||||
],
|
||||
)
|
||||
class MSC3861OAuthDelegation(HomeserverTestCase):
|
||||
device_scope_prefix: ClassVar[str]
|
||||
api_scope: ClassVar[str]
|
||||
|
||||
@property
|
||||
def device_scope(self) -> str:
|
||||
return self.device_scope_prefix + DEVICE
|
||||
|
||||
servlets = [
|
||||
account.register_servlets,
|
||||
keys.register_servlets,
|
||||
@@ -212,7 +224,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
|
||||
"""The handler should return a 500 when no subject is present."""
|
||||
|
||||
self._set_introspection_returnvalue(
|
||||
{"active": True, "scope": " ".join([MATRIX_USER_SCOPE])}
|
||||
{"active": True, "scope": " ".join([self.api_scope])},
|
||||
)
|
||||
|
||||
request = Mock(args={})
|
||||
@@ -235,7 +247,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
|
||||
{
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join([MATRIX_DEVICE_SCOPE]),
|
||||
"scope": " ".join([self.device_scope]),
|
||||
}
|
||||
)
|
||||
request = Mock(args={})
|
||||
@@ -282,7 +294,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
|
||||
{
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join([SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE]),
|
||||
"scope": " ".join([SYNAPSE_ADMIN_SCOPE, self.api_scope]),
|
||||
"username": USERNAME,
|
||||
}
|
||||
)
|
||||
@@ -312,9 +324,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
|
||||
{
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join(
|
||||
[SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE, MATRIX_GUEST_SCOPE]
|
||||
),
|
||||
"scope": " ".join([SYNAPSE_ADMIN_SCOPE, self.api_scope]),
|
||||
"username": USERNAME,
|
||||
}
|
||||
)
|
||||
@@ -344,7 +354,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
|
||||
{
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join([MATRIX_USER_SCOPE]),
|
||||
"scope": " ".join([self.api_scope]),
|
||||
"username": USERNAME,
|
||||
}
|
||||
)
|
||||
@@ -374,7 +384,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
|
||||
{
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join([MATRIX_USER_SCOPE, MATRIX_DEVICE_SCOPE]),
|
||||
"scope": " ".join([self.api_scope, self.device_scope]),
|
||||
"username": USERNAME,
|
||||
}
|
||||
)
|
||||
@@ -404,7 +414,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
|
||||
{
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join([MATRIX_USER_SCOPE]),
|
||||
"scope": " ".join([self.api_scope]),
|
||||
"device_id": DEVICE,
|
||||
"username": USERNAME,
|
||||
}
|
||||
@@ -444,9 +454,9 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join(
|
||||
[
|
||||
MATRIX_USER_SCOPE,
|
||||
f"{MATRIX_DEVICE_SCOPE_PREFIX}AABBCC",
|
||||
f"{MATRIX_DEVICE_SCOPE_PREFIX}DDEEFF",
|
||||
self.api_scope,
|
||||
f"{self.device_scope_prefix}AABBCC",
|
||||
f"{self.device_scope_prefix}DDEEFF",
|
||||
]
|
||||
),
|
||||
"username": USERNAME,
|
||||
@@ -457,68 +467,6 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
|
||||
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
|
||||
self.get_failure(self.auth.get_user_by_req(request), AuthError)
|
||||
|
||||
def test_active_guest_not_allowed(self) -> None:
|
||||
"""The handler should return an insufficient scope error."""
|
||||
|
||||
self._set_introspection_returnvalue(
|
||||
{
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join([MATRIX_GUEST_SCOPE, MATRIX_DEVICE_SCOPE]),
|
||||
"username": USERNAME,
|
||||
}
|
||||
)
|
||||
request = Mock(args={})
|
||||
request.args[b"access_token"] = [b"mockAccessToken"]
|
||||
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
|
||||
error = self.get_failure(
|
||||
self.auth.get_user_by_req(request), OAuthInsufficientScopeError
|
||||
)
|
||||
self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
|
||||
self._rust_client.post.assert_called_once_with(
|
||||
url=INTROSPECTION_ENDPOINT,
|
||||
response_limit=ANY,
|
||||
request_body=ANY,
|
||||
headers=ANY,
|
||||
)
|
||||
self._assertParams()
|
||||
self.assertEqual(
|
||||
getattr(error.value, "headers", {})["WWW-Authenticate"],
|
||||
'Bearer error="insufficient_scope", scope="urn:matrix:org.matrix.msc2967.client:api:*"',
|
||||
)
|
||||
|
||||
def test_active_guest_allowed(self) -> None:
|
||||
"""The handler should return a requester with guest user rights and a device ID."""
|
||||
|
||||
self._set_introspection_returnvalue(
|
||||
{
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join([MATRIX_GUEST_SCOPE, MATRIX_DEVICE_SCOPE]),
|
||||
"username": USERNAME,
|
||||
}
|
||||
)
|
||||
request = Mock(args={})
|
||||
request.args[b"access_token"] = [b"mockAccessToken"]
|
||||
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
|
||||
requester = self.get_success(
|
||||
self.auth.get_user_by_req(request, allow_guest=True)
|
||||
)
|
||||
self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
|
||||
self._rust_client.post.assert_called_once_with(
|
||||
url=INTROSPECTION_ENDPOINT,
|
||||
response_limit=ANY,
|
||||
request_body=ANY,
|
||||
headers=ANY,
|
||||
)
|
||||
self._assertParams()
|
||||
self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME))
|
||||
self.assertEqual(requester.is_guest, True)
|
||||
self.assertEqual(
|
||||
get_awaitable_result(self.auth.is_server_admin(requester)), False
|
||||
)
|
||||
self.assertEqual(requester.device_id, DEVICE)
|
||||
|
||||
def test_unavailable_introspection_endpoint(self) -> None:
|
||||
"""The handler should return an internal server error."""
|
||||
request = Mock(args={})
|
||||
@@ -562,8 +510,8 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join(
|
||||
[
|
||||
MATRIX_USER_SCOPE,
|
||||
f"{MATRIX_DEVICE_SCOPE_PREFIX}AABBCC",
|
||||
self.api_scope,
|
||||
f"{self.device_scope_prefix}AABBCC",
|
||||
]
|
||||
),
|
||||
"username": USERNAME,
|
||||
@@ -611,7 +559,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
|
||||
{
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join([MATRIX_USER_SCOPE, MATRIX_DEVICE_SCOPE]),
|
||||
"scope": " ".join([self.api_scope, self.device_scope]),
|
||||
"username": USERNAME,
|
||||
}
|
||||
)
|
||||
@@ -676,7 +624,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
|
||||
return json.dumps(
|
||||
{
|
||||
"active": True,
|
||||
"scope": MATRIX_USER_SCOPE,
|
||||
"scope": self.api_scope,
|
||||
"sub": SUBJECT,
|
||||
"username": USERNAME,
|
||||
},
|
||||
@@ -842,8 +790,24 @@ class FakeMasServer(HTTPServer):
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
@parameterized_class(
|
||||
("device_scope_prefix", "api_scope"),
|
||||
[
|
||||
("urn:matrix:client:device:", "urn:matrix:client:api:*"),
|
||||
(
|
||||
"urn:matrix:org.matrix.msc2967.client:device:",
|
||||
"urn:matrix:org.matrix.msc2967.client:api:*",
|
||||
),
|
||||
],
|
||||
)
|
||||
class MasAuthDelegation(HomeserverTestCase):
|
||||
server: FakeMasServer
|
||||
device_scope_prefix: ClassVar[str]
|
||||
api_scope: ClassVar[str]
|
||||
|
||||
@property
|
||||
def device_scope(self) -> str:
|
||||
return self.device_scope_prefix + DEVICE
|
||||
|
||||
def till_deferred_has_result(
|
||||
self,
|
||||
@@ -914,12 +878,7 @@ class MasAuthDelegation(HomeserverTestCase):
|
||||
self.server.introspection_response = {
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join(
|
||||
[
|
||||
MATRIX_USER_SCOPE,
|
||||
f"{MATRIX_DEVICE_SCOPE_PREFIX}{DEVICE}",
|
||||
]
|
||||
),
|
||||
"scope": " ".join([self.api_scope, self.device_scope]),
|
||||
"username": USERNAME,
|
||||
"expires_in": 60,
|
||||
}
|
||||
@@ -943,12 +902,7 @@ class MasAuthDelegation(HomeserverTestCase):
|
||||
self.server.introspection_response = {
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join(
|
||||
[
|
||||
MATRIX_USER_SCOPE,
|
||||
f"{MATRIX_DEVICE_SCOPE_PREFIX}{DEVICE}",
|
||||
]
|
||||
),
|
||||
"scope": " ".join([self.api_scope, self.device_scope]),
|
||||
"username": USERNAME,
|
||||
}
|
||||
|
||||
@@ -971,12 +925,7 @@ class MasAuthDelegation(HomeserverTestCase):
|
||||
self.server.introspection_response = {
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join(
|
||||
[
|
||||
MATRIX_USER_SCOPE,
|
||||
f"{MATRIX_DEVICE_SCOPE_PREFIX}ABCDEF",
|
||||
]
|
||||
),
|
||||
"scope": " ".join([self.api_scope, f"{self.device_scope_prefix}ABCDEF"]),
|
||||
"username": USERNAME,
|
||||
"expires_in": 60,
|
||||
}
|
||||
@@ -993,7 +942,7 @@ class MasAuthDelegation(HomeserverTestCase):
|
||||
self.server.introspection_response = {
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join([MATRIX_USER_SCOPE]),
|
||||
"scope": " ".join([self.api_scope]),
|
||||
"username": "inexistent_user",
|
||||
"expires_in": 60,
|
||||
}
|
||||
@@ -1039,7 +988,7 @@ class MasAuthDelegation(HomeserverTestCase):
|
||||
self.server.introspection_response = {
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": MATRIX_USER_SCOPE,
|
||||
"scope": self.api_scope,
|
||||
"username": USERNAME,
|
||||
"expires_in": 60,
|
||||
"device_id": DEVICE,
|
||||
@@ -1057,7 +1006,7 @@ class MasAuthDelegation(HomeserverTestCase):
|
||||
self.server.introspection_response = {
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join([SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE]),
|
||||
"scope": " ".join([SYNAPSE_ADMIN_SCOPE, self.api_scope]),
|
||||
"username": USERNAME,
|
||||
"expires_in": 60,
|
||||
}
|
||||
@@ -1079,12 +1028,7 @@ class MasAuthDelegation(HomeserverTestCase):
|
||||
self.server.introspection_response = {
|
||||
"active": True,
|
||||
"sub": SUBJECT,
|
||||
"scope": " ".join(
|
||||
[
|
||||
MATRIX_USER_SCOPE,
|
||||
f"{MATRIX_DEVICE_SCOPE_PREFIX}{DEVICE}",
|
||||
]
|
||||
),
|
||||
"scope": " ".join([self.api_scope, self.device_scope]),
|
||||
"username": USERNAME,
|
||||
"expires_in": 60,
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
#
|
||||
#
|
||||
|
||||
from typing import Awaitable, cast
|
||||
from typing import Awaitable, Dict, cast
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.testing import MemoryReactorClock
|
||||
@@ -38,9 +38,11 @@ from synapse.logging.opentracing import (
|
||||
from synapse.util import Clock
|
||||
|
||||
try:
|
||||
from synapse.logging.scopecontextmanager import LogContextScopeManager
|
||||
import opentracing
|
||||
from opentracing.scope_managers.contextvars import ContextVarsScopeManager
|
||||
except ImportError:
|
||||
LogContextScopeManager = None # type: ignore
|
||||
opentracing = None # type: ignore
|
||||
ContextVarsScopeManager = None # type: ignore
|
||||
|
||||
try:
|
||||
import jaeger_client
|
||||
@@ -54,9 +56,10 @@ from tests.unittest import TestCase
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LogContextScopeManagerTestCase(TestCase):
|
||||
class TracingScopeTestCase(TestCase):
|
||||
"""
|
||||
Test logging contexts and active opentracing spans.
|
||||
Test that our tracing machinery works well in a variety of situations (especially
|
||||
with Twisted's runtime and deferreds).
|
||||
|
||||
There's casts throughout this from generic opentracing objects (e.g.
|
||||
opentracing.Span) to the ones specific to Jaeger since they have additional
|
||||
@@ -64,7 +67,7 @@ class LogContextScopeManagerTestCase(TestCase):
|
||||
opentracing backend is Jaeger.
|
||||
"""
|
||||
|
||||
if LogContextScopeManager is None:
|
||||
if opentracing is None:
|
||||
skip = "Requires opentracing" # type: ignore[unreachable]
|
||||
if jaeger_client is None:
|
||||
skip = "Requires jaeger_client" # type: ignore[unreachable]
|
||||
@@ -74,7 +77,7 @@ class LogContextScopeManagerTestCase(TestCase):
|
||||
# global variables that power opentracing. We create our own tracer instance
|
||||
# and test with it.
|
||||
|
||||
scope_manager = LogContextScopeManager()
|
||||
scope_manager = ContextVarsScopeManager()
|
||||
config = jaeger_client.config.Config(
|
||||
config={}, service_name="test", scope_manager=scope_manager
|
||||
)
|
||||
@@ -208,6 +211,135 @@ class LogContextScopeManagerTestCase(TestCase):
|
||||
[scopes[1].span, scopes[2].span, scopes[0].span],
|
||||
)
|
||||
|
||||
def test_run_in_background_active_scope_still_available(self) -> None:
|
||||
"""
|
||||
Test that tasks running via `run_in_background` still have access to the
|
||||
active tracing scope.
|
||||
|
||||
This is a regression test for a previous Synapse issue where the tracing scope
|
||||
would `__exit__` and close before the `run_in_background` task completed and our
|
||||
own previous custom `_LogContextScope.close(...)` would clear
|
||||
`LoggingContext.scope` preventing further tracing spans from having the correct
|
||||
parent.
|
||||
"""
|
||||
reactor = MemoryReactorClock()
|
||||
clock = Clock(reactor)
|
||||
|
||||
scope_map: Dict[str, opentracing.Scope] = {}
|
||||
|
||||
async def async_task() -> None:
|
||||
root_scope = scope_map["root"]
|
||||
root_context = cast(jaeger_client.SpanContext, root_scope.span.context)
|
||||
|
||||
self.assertEqual(
|
||||
self._tracer.active_span,
|
||||
root_scope.span,
|
||||
"expected to inherit the root tracing scope from where this was run",
|
||||
)
|
||||
|
||||
# Return control back to the reactor thread and wait an arbitrary amount
|
||||
await clock.sleep(4)
|
||||
|
||||
# This is a key part of what we're testing! In a previous version of
|
||||
# Synapse, we would lose the active span at this point.
|
||||
self.assertEqual(
|
||||
self._tracer.active_span,
|
||||
root_scope.span,
|
||||
"expected to still have a root tracing scope/span active",
|
||||
)
|
||||
|
||||
# For complete-ness sake, let's also trace more sub-tasks here and assert
|
||||
# they have the correct span parents as well (root)
|
||||
|
||||
# Start tracing some other sub-task.
|
||||
#
|
||||
# This is a key part of what we're testing! In a previous version of
|
||||
# Synapse, it would have the incorrect span parents.
|
||||
scope = start_active_span(
|
||||
"task1",
|
||||
tracer=self._tracer,
|
||||
)
|
||||
scope_map["task1"] = scope
|
||||
|
||||
# Ensure the span parent is pointing to the root scope
|
||||
context = cast(jaeger_client.SpanContext, scope.span.context)
|
||||
self.assertEqual(
|
||||
context.parent_id,
|
||||
root_context.span_id,
|
||||
"expected task1 parent to be the root span",
|
||||
)
|
||||
|
||||
# Ensure that the active span is our new sub-task now
|
||||
self.assertEqual(self._tracer.active_span, scope.span)
|
||||
# Return control back to the reactor thread and wait an arbitrary amount
|
||||
await clock.sleep(4)
|
||||
# We should still see the active span as the scope wasn't closed yet
|
||||
self.assertEqual(self._tracer.active_span, scope.span)
|
||||
scope.close()
|
||||
|
||||
async def root() -> None:
|
||||
with start_active_span(
|
||||
"root span",
|
||||
tracer=self._tracer,
|
||||
# We will close this off later. We're basically just mimicking the same
|
||||
# pattern for how we handle requests. We pass the span off to the
|
||||
# request for it to finish.
|
||||
finish_on_close=False,
|
||||
) as root_scope:
|
||||
scope_map["root"] = root_scope
|
||||
self.assertEqual(self._tracer.active_span, root_scope.span)
|
||||
|
||||
# Fire-and-forget a task
|
||||
#
|
||||
# XXX: The root scope context manager will `__exit__` before this task
|
||||
# completes.
|
||||
run_in_background(async_task)
|
||||
|
||||
# Because we used `run_in_background`, the active span should still be
|
||||
# the root.
|
||||
self.assertEqual(self._tracer.active_span, root_scope.span)
|
||||
|
||||
# We shouldn't see any active spans outside of the scope
|
||||
self.assertIsNone(self._tracer.active_span)
|
||||
|
||||
with LoggingContext("root context"):
|
||||
# Start the test off
|
||||
d_root = defer.ensureDeferred(root())
|
||||
|
||||
# Let the tasks complete
|
||||
reactor.pump((2,) * 8)
|
||||
self.successResultOf(d_root)
|
||||
|
||||
# After we see all of the tasks are done (like a request when it
|
||||
# `_finished_processing`), let's finish our root span
|
||||
scope_map["root"].span.finish()
|
||||
|
||||
# Sanity check again: We shouldn't see any active spans leftover in this
|
||||
# this context.
|
||||
self.assertIsNone(self._tracer.active_span)
|
||||
|
||||
# The spans should be reported in order of their finishing: task 1, task 2,
|
||||
# root.
|
||||
#
|
||||
# We use `assertIncludes` just as an easier way to see if items are missing or
|
||||
# added. We assert the order just below
|
||||
self.assertIncludes(
|
||||
set(self._reporter.get_spans()),
|
||||
{
|
||||
scope_map["task1"].span,
|
||||
scope_map["root"].span,
|
||||
},
|
||||
exact=True,
|
||||
)
|
||||
# This is where we actually assert the correct order
|
||||
self.assertEqual(
|
||||
self._reporter.get_spans(),
|
||||
[
|
||||
scope_map["task1"].span,
|
||||
scope_map["root"].span,
|
||||
],
|
||||
)
|
||||
|
||||
def test_trace_decorator_sync(self) -> None:
|
||||
"""
|
||||
Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args`
|
||||
|
||||
@@ -18,11 +18,18 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
from typing import Dict, Protocol, Tuple
|
||||
from typing import Dict, NoReturn, Protocol, Tuple
|
||||
|
||||
from prometheus_client.core import Sample
|
||||
|
||||
from synapse.metrics import REGISTRY, InFlightGauge, generate_latest
|
||||
from synapse.metrics import (
|
||||
REGISTRY,
|
||||
SERVER_NAME_LABEL,
|
||||
InFlightGauge,
|
||||
LaterGauge,
|
||||
all_later_gauges_to_clean_up_on_shutdown,
|
||||
generate_latest,
|
||||
)
|
||||
from synapse.util.caches.deferred_cache import DeferredCache
|
||||
|
||||
from tests import unittest
|
||||
@@ -285,6 +292,95 @@ class CacheMetricsTests(unittest.HomeserverTestCase):
|
||||
self.assertEqual(hs2_cache_max_size_metric_value, "777.0")
|
||||
|
||||
|
||||
class LaterGaugeTests(unittest.HomeserverTestCase):
|
||||
def setUp(self) -> None:
|
||||
super().setUp()
|
||||
self.later_gauge = LaterGauge(
|
||||
name="foo",
|
||||
desc="",
|
||||
labelnames=[SERVER_NAME_LABEL],
|
||||
)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
super().tearDown()
|
||||
|
||||
REGISTRY.unregister(self.later_gauge)
|
||||
all_later_gauges_to_clean_up_on_shutdown.pop(self.later_gauge.name, None)
|
||||
|
||||
def test_later_gauge_multiple_servers(self) -> None:
|
||||
"""
|
||||
Test that LaterGauge metrics are reported correctly across multiple servers. We
|
||||
will have an metrics entry for each homeserver that is labeled with the
|
||||
`server_name` label.
|
||||
"""
|
||||
self.later_gauge.register_hook(
|
||||
homeserver_instance_id="123", hook=lambda: {("hs1",): 1}
|
||||
)
|
||||
self.later_gauge.register_hook(
|
||||
homeserver_instance_id="456", hook=lambda: {("hs2",): 2}
|
||||
)
|
||||
|
||||
metrics_map = get_latest_metrics()
|
||||
|
||||
# Find the metrics from both homeservers
|
||||
hs1_metric = 'foo{server_name="hs1"}'
|
||||
hs1_metric_value = metrics_map.get(hs1_metric)
|
||||
self.assertIsNotNone(
|
||||
hs1_metric_value,
|
||||
f"Missing metric {hs1_metric} in metrics {metrics_map}",
|
||||
)
|
||||
self.assertEqual(hs1_metric_value, "1.0")
|
||||
|
||||
hs2_metric = 'foo{server_name="hs2"}'
|
||||
hs2_metric_value = metrics_map.get(hs2_metric)
|
||||
self.assertIsNotNone(
|
||||
hs2_metric_value,
|
||||
f"Missing metric {hs2_metric} in metrics {metrics_map}",
|
||||
)
|
||||
self.assertEqual(hs2_metric_value, "2.0")
|
||||
|
||||
def test_later_gauge_hook_exception(self) -> None:
|
||||
"""
|
||||
Test that LaterGauge metrics are collected across multiple servers even if one
|
||||
hooks is throwing an exception.
|
||||
"""
|
||||
|
||||
def raise_exception() -> NoReturn:
|
||||
raise Exception("fake error generating data")
|
||||
|
||||
# Make the hook for hs1 throw an exception
|
||||
self.later_gauge.register_hook(
|
||||
homeserver_instance_id="123", hook=raise_exception
|
||||
)
|
||||
# Metrics from hs2 still work fine
|
||||
self.later_gauge.register_hook(
|
||||
homeserver_instance_id="456", hook=lambda: {("hs2",): 2}
|
||||
)
|
||||
|
||||
metrics_map = get_latest_metrics()
|
||||
|
||||
# Since we encountered an exception while trying to collect metrics from hs1, we
|
||||
# don't expect to see it here.
|
||||
hs1_metric = 'foo{server_name="hs1"}'
|
||||
hs1_metric_value = metrics_map.get(hs1_metric)
|
||||
self.assertIsNone(
|
||||
hs1_metric_value,
|
||||
(
|
||||
"Since we encountered an exception while trying to collect metrics from hs1"
|
||||
f"we don't expect to see it the metrics_map {metrics_map}"
|
||||
),
|
||||
)
|
||||
|
||||
# We should still see metrics from hs2 though
|
||||
hs2_metric = 'foo{server_name="hs2"}'
|
||||
hs2_metric_value = metrics_map.get(hs2_metric)
|
||||
self.assertIsNotNone(
|
||||
hs2_metric_value,
|
||||
f"Missing metric {hs2_metric} in cache metrics {metrics_map}",
|
||||
)
|
||||
self.assertEqual(hs2_metric_value, "2.0")
|
||||
|
||||
|
||||
def get_latest_metrics() -> Dict[str, str]:
|
||||
"""
|
||||
Collect the latest metrics from the registry and parse them into an easy to use map.
|
||||
|
||||
@@ -32,7 +32,6 @@ from synapse.config.workers import InstanceTcpLocationConfig, InstanceUnixLocati
|
||||
from synapse.http.site import SynapseRequest, SynapseSite
|
||||
from synapse.replication.http import ReplicationRestResource
|
||||
from synapse.replication.tcp.client import ReplicationDataHandler
|
||||
from synapse.replication.tcp.handler import ReplicationCommandHandler
|
||||
from synapse.replication.tcp.protocol import (
|
||||
ClientReplicationStreamProtocol,
|
||||
ServerReplicationStreamProtocol,
|
||||
@@ -97,7 +96,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
|
||||
self.test_handler = self._build_replication_data_handler()
|
||||
self.worker_hs._replication_data_handler = self.test_handler # type: ignore[attr-defined]
|
||||
|
||||
repl_handler = ReplicationCommandHandler(self.worker_hs)
|
||||
repl_handler = self.worker_hs.get_replication_command_handler()
|
||||
self.client = ClientReplicationStreamProtocol(
|
||||
self.worker_hs,
|
||||
"client",
|
||||
|
||||
@@ -33,12 +33,17 @@ from twisted.internet.testing import MemoryReactor
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import EventTypes, Membership, RoomTypes
|
||||
from synapse.api.errors import Codes
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.handlers.pagination import (
|
||||
PURGE_ROOM_ACTION_NAME,
|
||||
SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME,
|
||||
)
|
||||
from synapse.rest.client import directory, events, knock, login, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main.purge_events import (
|
||||
purge_room_tables_with_event_id_index,
|
||||
purge_room_tables_with_room_id_column,
|
||||
)
|
||||
from synapse.types import UserID
|
||||
from synapse.util import Clock
|
||||
from synapse.util.task_scheduler import TaskScheduler
|
||||
@@ -546,7 +551,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
def _is_purged(self, room_id: str) -> None:
|
||||
"""Test that the following tables have been purged of all rows related to the room."""
|
||||
for table in PURGE_TABLES:
|
||||
for table in purge_room_tables_with_room_id_column:
|
||||
count = self.get_success(
|
||||
self.store.db_pool.simple_select_one_onecol(
|
||||
table=table,
|
||||
@@ -555,7 +560,21 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
|
||||
desc="test_purge_room",
|
||||
)
|
||||
)
|
||||
self.assertEqual(count, 0, msg=f"Rows not purged in {table}")
|
||||
|
||||
for table in purge_room_tables_with_event_id_index:
|
||||
rows = self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"find_event_count_for_table",
|
||||
f"""
|
||||
SELECT COUNT(*) FROM {table} WHERE event_id IN (
|
||||
SELECT event_id FROM events WHERE room_id=?
|
||||
)
|
||||
""",
|
||||
room_id,
|
||||
)
|
||||
)
|
||||
count = rows[0][0]
|
||||
self.assertEqual(count, 0, msg=f"Rows not purged in {table}")
|
||||
|
||||
def _assert_peek(self, room_id: str, expect_code: int) -> None:
|
||||
@@ -1228,7 +1247,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
||||
|
||||
def _is_purged(self, room_id: str) -> None:
|
||||
"""Test that the following tables have been purged of all rows related to the room."""
|
||||
for table in PURGE_TABLES:
|
||||
for table in purge_room_tables_with_room_id_column:
|
||||
count = self.get_success(
|
||||
self.store.db_pool.simple_select_one_onecol(
|
||||
table=table,
|
||||
@@ -1237,7 +1256,21 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
||||
desc="test_purge_room",
|
||||
)
|
||||
)
|
||||
self.assertEqual(count, 0, msg=f"Rows not purged in {table}")
|
||||
|
||||
for table in purge_room_tables_with_event_id_index:
|
||||
rows = self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"find_event_count_for_table",
|
||||
f"""
|
||||
SELECT COUNT(*) FROM {table} WHERE event_id IN (
|
||||
SELECT event_id FROM events WHERE room_id=?
|
||||
)
|
||||
""",
|
||||
room_id,
|
||||
)
|
||||
)
|
||||
count = rows[0][0]
|
||||
self.assertEqual(count, 0, msg=f"Rows not purged in {table}")
|
||||
|
||||
def _assert_peek(self, room_id: str, expect_code: int) -> None:
|
||||
@@ -2892,6 +2925,63 @@ class MakeRoomAdminTestCase(unittest.HomeserverTestCase):
|
||||
"No local admin user in room with power to update power levels.",
|
||||
)
|
||||
|
||||
def test_v12_room(self) -> None:
|
||||
"""Test that you can be promoted to admin in v12 rooms which won't have the admin the PL event."""
|
||||
room_id = self.helper.create_room_as(
|
||||
self.creator,
|
||||
tok=self.creator_tok,
|
||||
room_version=RoomVersions.V12.identifier,
|
||||
)
|
||||
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/_synapse/admin/v1/rooms/{room_id}/make_room_admin",
|
||||
content={},
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
||||
self.assertEqual(200, channel.code, msg=channel.json_body)
|
||||
|
||||
# Now we test that we can join the room and that the admin user has PL 100.
|
||||
self.helper.join(room_id, self.admin_user, tok=self.admin_user_tok)
|
||||
pl = self.helper.get_state(
|
||||
room_id, EventTypes.PowerLevels, tok=self.creator_tok
|
||||
)
|
||||
self.assertEquals(pl["users"][self.admin_user], 100)
|
||||
|
||||
def test_v12_room_with_many_user_pls(self) -> None:
|
||||
"""Test that you can be promoted to the admin user's PL in v12 rooms that contain a range of user PLs."""
|
||||
room_id = self.helper.create_room_as(
|
||||
self.creator,
|
||||
tok=self.creator_tok,
|
||||
room_version=RoomVersions.V12.identifier,
|
||||
is_public=True,
|
||||
extra_content={
|
||||
"power_level_content_override": {
|
||||
"users": {
|
||||
self.second_user_id: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
self.helper.join(room_id, self.admin_user, tok=self.admin_user_tok)
|
||||
self.helper.join(room_id, self.second_user_id, tok=self.second_tok)
|
||||
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
f"/_synapse/admin/v1/rooms/{room_id}/make_room_admin",
|
||||
content={},
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
||||
self.assertEqual(200, channel.code, msg=channel.json_body)
|
||||
|
||||
pl = self.helper.get_state(
|
||||
room_id, EventTypes.PowerLevels, tok=self.creator_tok
|
||||
)
|
||||
self.assertEquals(pl["users"][self.admin_user], 100)
|
||||
|
||||
|
||||
class BlockRoomTestCase(unittest.HomeserverTestCase):
|
||||
servlets = [
|
||||
@@ -3119,35 +3209,3 @@ class BlockRoomTestCase(unittest.HomeserverTestCase):
|
||||
"""Block a room in database"""
|
||||
self.get_success(self._store.block_room(room_id, self.other_user))
|
||||
self._is_blocked(room_id, expect=True)
|
||||
|
||||
|
||||
PURGE_TABLES = [
|
||||
"current_state_events",
|
||||
"event_backward_extremities",
|
||||
"event_forward_extremities",
|
||||
"event_json",
|
||||
"event_push_actions",
|
||||
"event_search",
|
||||
"events",
|
||||
"receipts_graph",
|
||||
"receipts_linearized",
|
||||
"room_aliases",
|
||||
"room_depth",
|
||||
"room_memberships",
|
||||
"room_stats_state",
|
||||
"room_stats_current",
|
||||
"room_stats_earliest_token",
|
||||
"rooms",
|
||||
"stream_ordering_to_exterm",
|
||||
"users_in_public_rooms",
|
||||
"users_who_share_private_rooms",
|
||||
"appservice_room_list",
|
||||
"e2e_room_keys",
|
||||
"event_push_summary",
|
||||
"pusher_throttle",
|
||||
"room_account_data",
|
||||
"room_tags",
|
||||
"state_groups",
|
||||
"state_groups_state",
|
||||
"federation_inbound_events_staging",
|
||||
]
|
||||
|
||||
@@ -18,8 +18,11 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
from http import HTTPStatus
|
||||
from typing import ClassVar
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
from parameterized import parameterized_class
|
||||
|
||||
from synapse.rest.client import auth_metadata
|
||||
|
||||
from tests.unittest import HomeserverTestCase, override_config, skip_unless
|
||||
@@ -85,17 +88,22 @@ class AuthIssuerTestCase(HomeserverTestCase):
|
||||
req_mock.assert_not_called()
|
||||
|
||||
|
||||
@parameterized_class(
|
||||
("endpoint",),
|
||||
[
|
||||
("/_matrix/client/unstable/org.matrix.msc2965/auth_metadata",),
|
||||
("/_matrix/client/v1/auth_metadata",),
|
||||
],
|
||||
)
|
||||
class AuthMetadataTestCase(HomeserverTestCase):
|
||||
endpoint: ClassVar[str]
|
||||
servlets = [
|
||||
auth_metadata.register_servlets,
|
||||
]
|
||||
|
||||
def test_returns_404_when_msc3861_disabled(self) -> None:
|
||||
# Make an unauthenticated request for the discovery info.
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
"/_matrix/client/unstable/org.matrix.msc2965/auth_metadata",
|
||||
)
|
||||
channel = self.make_request("GET", self.endpoint)
|
||||
self.assertEqual(channel.code, HTTPStatus.NOT_FOUND)
|
||||
|
||||
@skip_unless(HAS_AUTHLIB, "requires authlib")
|
||||
@@ -124,10 +132,7 @@ class AuthMetadataTestCase(HomeserverTestCase):
|
||||
)
|
||||
self.hs.get_proxied_http_client().get_json = req_mock # type: ignore[method-assign]
|
||||
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
"/_matrix/client/unstable/org.matrix.msc2965/auth_metadata",
|
||||
)
|
||||
channel = self.make_request("GET", self.endpoint)
|
||||
|
||||
self.assertEqual(channel.code, HTTPStatus.OK)
|
||||
self.assertEqual(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user