mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-09 01:30:18 +00:00
Compare commits
209 Commits
anoa/fix_d
...
v1.107.0rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
522a40c4de | ||
|
|
dcd03d3b15 | ||
|
|
438bc23560 | ||
|
|
cf30cfe5d1 | ||
|
|
1726b49457 | ||
|
|
792cfe7ba6 | ||
|
|
c3682ff668 | ||
|
|
3e6ee8ff88 | ||
|
|
7c9ac01eb5 | ||
|
|
3818597751 | ||
|
|
3aadf43122 | ||
|
|
5b6a75935e | ||
|
|
c0ea2bf800 | ||
|
|
37558d5e4c | ||
|
|
0b358f8643 | ||
|
|
7254015665 | ||
|
|
e84a493f41 | ||
|
|
07232e27a8 | ||
|
|
e26673fe97 | ||
|
|
7ab0f630da | ||
|
|
b548f7803a | ||
|
|
758aec6b34 | ||
|
|
c897ac63e9 | ||
|
|
38bc7a009d | ||
|
|
6a275828c8 | ||
|
|
6e373468a4 | ||
|
|
48ee17dc79 | ||
|
|
f6437ca1c4 | ||
|
|
02bda250f8 | ||
|
|
0fd6b269d3 | ||
|
|
89fc579329 | ||
|
|
9c91873922 | ||
|
|
41fbe387d6 | ||
|
|
90cc9e5b29 | ||
|
|
516fd891ee | ||
|
|
0ef2315a99 | ||
|
|
59710437e4 | ||
|
|
9985aa6821 | ||
|
|
31742149d4 | ||
|
|
947e8a6cb0 | ||
|
|
0d4d00a07c | ||
|
|
3166445514 | ||
|
|
922656fc77 | ||
|
|
30c50e0240 | ||
|
|
48a90c697b | ||
|
|
47773232b0 | ||
|
|
2e92b718d5 | ||
|
|
646cb6ff24 | ||
|
|
0fe9e1f7da | ||
|
|
ae181233aa | ||
|
|
20c9e19519 | ||
|
|
55b0aa847a | ||
|
|
074ef4d75f | ||
|
|
301c9771c4 | ||
|
|
800a5b6ef3 | ||
|
|
8c667759ad | ||
|
|
14e9ab19be | ||
|
|
20c8991a94 | ||
|
|
dcae2b4ba4 | ||
|
|
98f57ea3f2 | ||
|
|
f5b6005559 | ||
|
|
47f3870894 | ||
|
|
6d64f1b2b8 | ||
|
|
1d47532310 | ||
|
|
09f0957b36 | ||
|
|
803f05f60c | ||
|
|
c8e0bed426 | ||
|
|
28f5ad07d3 | ||
|
|
f0d6f14047 | ||
|
|
3a196b3227 | ||
|
|
fbb2573525 | ||
|
|
259442fa4c | ||
|
|
fe4719a268 | ||
|
|
3a30846bd0 | ||
|
|
db4e321219 | ||
|
|
657b8cc75c | ||
|
|
a2a543fd12 | ||
|
|
89f1092284 | ||
|
|
4ffed6330f | ||
|
|
e363881592 | ||
|
|
d40878451c | ||
|
|
892cbd0624 | ||
|
|
106cfd4b39 | ||
|
|
0a6ae6fe4c | ||
|
|
13a3987929 | ||
|
|
680f60102b | ||
|
|
3e51b370c5 | ||
|
|
9b8597e431 | ||
|
|
4d10a8fb18 | ||
|
|
1f8f991d51 | ||
|
|
5360baeb64 | ||
|
|
0e68e9b7f4 | ||
|
|
230b709d9d | ||
|
|
05957ac70f | ||
|
|
31122b71bc | ||
|
|
51776745b9 | ||
|
|
ca27b51665 | ||
|
|
ec174d0470 | ||
|
|
fd48fc4585 | ||
|
|
ea6bfae0fc | ||
|
|
59ceabcb97 | ||
|
|
0581741342 | ||
|
|
34878b6bc9 | ||
|
|
c900d18647 | ||
|
|
03f0d746c3 | ||
|
|
b5322b4daf | ||
|
|
b7af076ab5 | ||
|
|
9ad49e7ecf | ||
|
|
f7a3ebe44d | ||
|
|
bef765b262 | ||
|
|
6d3ffdd421 | ||
|
|
3ab9e6d524 | ||
|
|
db95b75515 | ||
|
|
4c98aad47b | ||
|
|
5a59c68b3d | ||
|
|
6cf23febb9 | ||
|
|
159536d525 | ||
|
|
70a86f69c2 | ||
|
|
21daa56ee1 | ||
|
|
cf5adc80e1 | ||
|
|
8fb5b0f335 | ||
|
|
77b824008c | ||
|
|
77317cecc7 | ||
|
|
3e89afdef7 | ||
|
|
f768e028c1 | ||
|
|
74ab329eaa | ||
|
|
05489d89c6 | ||
|
|
9635822cc1 | ||
|
|
42fa47a2a4 | ||
|
|
0b4dc4de7c | ||
|
|
52f456a822 | ||
|
|
6d5bafb2c8 | ||
|
|
1198f649ea | ||
|
|
acc2f00eca | ||
|
|
1c1b0bfa77 | ||
|
|
cb562d73aa | ||
|
|
a111ba0207 | ||
|
|
1cc1d6b655 | ||
|
|
92f2069627 | ||
|
|
9b5eef95ad | ||
|
|
e161103b46 | ||
|
|
f4e12ceb1f | ||
|
|
10e56b162f | ||
|
|
74fb3e1996 | ||
|
|
a91fb6cc06 | ||
|
|
6cb8839f67 | ||
|
|
1e68b56a62 | ||
|
|
2bdf6280f6 | ||
|
|
5c0b87ff95 | ||
|
|
0d44f64c4e | ||
|
|
1f88790764 | ||
|
|
9d7880c0c6 | ||
|
|
48f59d3806 | ||
|
|
696cc9e802 | ||
|
|
4af33015af | ||
|
|
2d1bb0b06b | ||
|
|
ab80b3412e | ||
|
|
1dee1b72ec | ||
|
|
571ca0c004 | ||
|
|
8a05304222 | ||
|
|
274f289a52 | ||
|
|
8de3283ebe | ||
|
|
4ad70f115b | ||
|
|
3778cea776 | ||
|
|
5ce9498047 | ||
|
|
f2c5f1564e | ||
|
|
91694907da | ||
|
|
e0b19a4777 | ||
|
|
0c55c76da8 | ||
|
|
3eb0a3b468 | ||
|
|
7c1c011942 | ||
|
|
7856ec96ef | ||
|
|
cdbbf3653d | ||
|
|
c51a2240d1 | ||
|
|
e5dfb6ecbf | ||
|
|
1b7304c8b4 | ||
|
|
0621e8eb0e | ||
|
|
bc1db16086 | ||
|
|
7b4d7429f8 | ||
|
|
01910b981f | ||
|
|
2252bae3df | ||
|
|
79e31e8527 | ||
|
|
b07617fbe8 | ||
|
|
e7cdf6152b | ||
|
|
c415b7a412 | ||
|
|
ea1b30940e | ||
|
|
bfa93d1d3b | ||
|
|
02a147039c | ||
|
|
7c805f00a7 | ||
|
|
71ca199165 | ||
|
|
871f51c270 | ||
|
|
71e8634069 | ||
|
|
93edd0932e | ||
|
|
505cdd044b | ||
|
|
d2674bacdb | ||
|
|
afd513fb25 | ||
|
|
53744c7258 | ||
|
|
bdcad7823f | ||
|
|
6f3f9770dc | ||
|
|
50a332cf30 | ||
|
|
6e714a6277 | ||
|
|
0c1c56b75c | ||
|
|
b720059d75 | ||
|
|
fbf7fa986f | ||
|
|
ab9d3c0f40 | ||
|
|
8822ea88a3 | ||
|
|
d24d115706 | ||
|
|
3ba984d7af | ||
|
|
4a5ea43f1b |
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
run: docker buildx inspect
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.3.0
|
||||
uses: sigstore/cosign-installer@v3.5.0
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
2
.github/workflows/docs-pr-netlify.yaml
vendored
2
.github/workflows/docs-pr-netlify.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@e7466d1a7587ed14867642c2ca74b5bcc1e19a2d # v3.0.0
|
||||
uses: dawidd6/action-download-artifact@09f2f74827fd3a8607589e5ad7f9398816f540fe # v3.1.4
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
|
||||
4
.github/workflows/docs-pr.yaml
vendored
4
.github/workflows/docs-pr.yaml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
|
||||
34
.github/workflows/docs.yaml
vendored
34
.github/workflows/docs.yaml
vendored
@@ -56,7 +56,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
@@ -80,38 +80,8 @@ jobs:
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
|
||||
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./book
|
||||
destination_dir: ./${{ needs.pre.outputs.branch-version }}
|
||||
|
||||
################################################################################
|
||||
pages-devdocs:
|
||||
name: GitHub Pages (developer docs)
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- pre
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: "Set up Sphinx"
|
||||
uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.3.2"
|
||||
groups: "dev-docs"
|
||||
extras: ""
|
||||
|
||||
- name: Build the documentation
|
||||
run: |
|
||||
cd dev-docs
|
||||
poetry run make html
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./dev-docs/_build/html
|
||||
destination_dir: ./dev-docs/${{ needs.pre.outputs.branch-version }}
|
||||
|
||||
2
.github/workflows/latest_deps.yml
vendored
2
.github/workflows/latest_deps.yml
vendored
@@ -226,7 +226,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
|
||||
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
20
.github/workflows/tests.yml
vendored
20
.github/workflows/tests.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
integration: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.integration }}
|
||||
linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }}
|
||||
steps:
|
||||
- uses: dorny/paths-filter@v2
|
||||
- uses: dorny/paths-filter@v3
|
||||
id: filter
|
||||
# We only check on PRs
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
@@ -81,7 +81,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.65.0
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
@@ -148,7 +148,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.65.0
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Setup Poetry
|
||||
@@ -208,7 +208,7 @@ jobs:
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.65.0
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
@@ -225,7 +225,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.65.0
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
with:
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -344,7 +344,7 @@ jobs:
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.65.0
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -386,7 +386,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.65.0
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# There aren't wheels for some of the older deps, so we need to install
|
||||
@@ -498,7 +498,7 @@ jobs:
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.65.0
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run SyTest
|
||||
@@ -642,7 +642,7 @@ jobs:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.65.0
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
@@ -674,7 +674,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.65.0
|
||||
uses: dtolnay/rust-toolchain@1.66.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo test
|
||||
|
||||
2
.github/workflows/twisted_trunk.yml
vendored
2
.github/workflows/twisted_trunk.yml
vendored
@@ -207,7 +207,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
|
||||
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
351
CHANGES.md
351
CHANGES.md
@@ -1,3 +1,354 @@
|
||||
# Synapse 1.107.0rc1 (2024-05-07)
|
||||
|
||||
### Features
|
||||
|
||||
- Add preliminary support for [MSC3823: Account Suspension](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#17051](https://github.com/element-hq/synapse/issues/17051))
|
||||
- Declare support for [Matrix v1.10](https://matrix.org/blog/2024/03/22/matrix-v1.10-release/). Contributed by @clokep. ([\#17082](https://github.com/element-hq/synapse/issues/17082))
|
||||
- Add support for [MSC4115: membership metadata on events](https://github.com/matrix-org/matrix-spec-proposals/pull/4115). ([\#17104](https://github.com/element-hq/synapse/issues/17104), [\#17137](https://github.com/element-hq/synapse/issues/17137))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fixed search feature of Element Android on homesevers using SQLite by returning search terms as search highlights. ([\#17000](https://github.com/element-hq/synapse/issues/17000))
|
||||
- Fixes a bug introduced in v1.52.0 where the `destination` query parameter for the [Destination Rooms Admin API](https://element-hq.github.io/synapse/v1.105/usage/administration/admin_api/federation.html#destination-rooms) failed to actually filter returned rooms. ([\#17077](https://github.com/element-hq/synapse/issues/17077))
|
||||
- For MSC3266 room summaries, support queries at the recommended endpoint of `/_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`. The existing endpoint of `/_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` is deprecated. ([\#17078](https://github.com/element-hq/synapse/issues/17078))
|
||||
- Apply user email & picture during OIDC registration if present & selected. ([\#17120](https://github.com/element-hq/synapse/issues/17120))
|
||||
- Improve error message for cross signing reset with [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) enabled. ([\#17121](https://github.com/element-hq/synapse/issues/17121))
|
||||
- Fix a bug which meant that to-device messages received over federation could be dropped when the server was under load or networking problems caused problems between Synapse processes or the database. ([\#17127](https://github.com/element-hq/synapse/issues/17127))
|
||||
- Fix bug where `StreamChangeCache` would not respect configured cache factors. ([\#17152](https://github.com/element-hq/synapse/issues/17152))
|
||||
|
||||
### Updates to the Docker image
|
||||
|
||||
- Correct licensing metadata on Docker image. ([\#17141](https://github.com/element-hq/synapse/issues/17141))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Update the `event_cache_size` and `global_factor` configuration options' documentation. ([\#17071](https://github.com/element-hq/synapse/issues/17071))
|
||||
- Remove broken sphinx docs. ([\#17073](https://github.com/element-hq/synapse/issues/17073), [\#17148](https://github.com/element-hq/synapse/issues/17148))
|
||||
- Add RuntimeDirectory to example matrix-synapse.service systemd unit. ([\#17084](https://github.com/element-hq/synapse/issues/17084))
|
||||
- Fix various small typos throughout the docs. ([\#17114](https://github.com/element-hq/synapse/issues/17114))
|
||||
- Update enable_notifs configuration documentation. ([\#17116](https://github.com/element-hq/synapse/issues/17116))
|
||||
- Update the Upgrade Notes with the latest minimum supported Rust version of 1.66.0. Contributed by @jahway603. ([\#17140](https://github.com/element-hq/synapse/issues/17140))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Enable [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) by default in the Synapse Complement image. ([\#17105](https://github.com/element-hq/synapse/issues/17105))
|
||||
- Add optimisation to `StreamChangeCache.get_entities_changed(..)`. ([\#17130](https://github.com/element-hq/synapse/issues/17130))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump furo from 2024.1.29 to 2024.4.27. ([\#17133](https://github.com/element-hq/synapse/issues/17133))
|
||||
* Bump idna from 3.6 to 3.7. ([\#17136](https://github.com/element-hq/synapse/issues/17136))
|
||||
* Bump jsonschema from 4.21.1 to 4.22.0. ([\#17157](https://github.com/element-hq/synapse/issues/17157))
|
||||
* Bump lxml from 5.1.0 to 5.2.1. ([\#17158](https://github.com/element-hq/synapse/issues/17158))
|
||||
* Bump phonenumbers from 8.13.29 to 8.13.35. ([\#17106](https://github.com/element-hq/synapse/issues/17106))
|
||||
- Bump pillow from 10.2.0 to 10.3.0. ([\#17146](https://github.com/element-hq/synapse/issues/17146))
|
||||
* Bump pydantic from 2.6.4 to 2.7.0. ([\#17107](https://github.com/element-hq/synapse/issues/17107))
|
||||
* Bump pydantic from 2.7.0 to 2.7.1. ([\#17160](https://github.com/element-hq/synapse/issues/17160))
|
||||
* Bump pyicu from 2.12 to 2.13. ([\#17109](https://github.com/element-hq/synapse/issues/17109))
|
||||
* Bump serde from 1.0.197 to 1.0.198. ([\#17111](https://github.com/element-hq/synapse/issues/17111))
|
||||
* Bump serde from 1.0.198 to 1.0.199. ([\#17132](https://github.com/element-hq/synapse/issues/17132))
|
||||
* Bump serde from 1.0.199 to 1.0.200. ([\#17161](https://github.com/element-hq/synapse/issues/17161))
|
||||
* Bump serde_json from 1.0.115 to 1.0.116. ([\#17112](https://github.com/element-hq/synapse/issues/17112))
|
||||
- Update `tornado` Python dependency from 6.2 to 6.4. ([\#17131](https://github.com/element-hq/synapse/issues/17131))
|
||||
* Bump twisted from 23.10.0 to 24.3.0. ([\#17135](https://github.com/element-hq/synapse/issues/17135))
|
||||
* Bump types-bleach from 6.1.0.1 to 6.1.0.20240331. ([\#17110](https://github.com/element-hq/synapse/issues/17110))
|
||||
* Bump types-pillow from 10.2.0.20240415 to 10.2.0.20240423. ([\#17159](https://github.com/element-hq/synapse/issues/17159))
|
||||
* Bump types-setuptools from 69.0.0.20240125 to 69.5.0.20240423. ([\#17134](https://github.com/element-hq/synapse/issues/17134))
|
||||
|
||||
# Synapse 1.106.0 (2024-04-30)
|
||||
|
||||
No significant changes since 1.106.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.106.0rc1 (2024-04-25)
|
||||
|
||||
### Features
|
||||
|
||||
- Send an email if the address is already bound to an user account. ([\#16819](https://github.com/element-hq/synapse/issues/16819))
|
||||
- Implement the rendezvous mechanism described by [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/issues/4108). ([\#17056](https://github.com/element-hq/synapse/issues/17056))
|
||||
- Support delegating the rendezvous mechanism described [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/issues/4108) to an external implementation. ([\#17086](https://github.com/element-hq/synapse/issues/17086))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Add validation to ensure that the `limit` parameter on `/publicRooms` is non-negative. ([\#16920](https://github.com/element-hq/synapse/issues/16920))
|
||||
- Return `400 M_NOT_JSON` upon receiving invalid JSON in query parameters across various client and admin endpoints, rather than an internal server error. ([\#16923](https://github.com/element-hq/synapse/issues/16923))
|
||||
- Make the CSAPI endpoint `/keys/device_signing/upload` idempotent. ([\#16943](https://github.com/element-hq/synapse/issues/16943))
|
||||
- Redact membership events if the user requested erasure upon deactivating. ([\#17076](https://github.com/element-hq/synapse/issues/17076))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Add a prompt in the contributing guide to manually configure icu4c. ([\#17069](https://github.com/element-hq/synapse/issues/17069))
|
||||
- Clarify what part of message retention is still experimental. ([\#17099](https://github.com/element-hq/synapse/issues/17099))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Use new receipts column to optimise receipt and push action SQL queries. Contributed by Nick @ Beeper (@fizzadar). ([\#17032](https://github.com/element-hq/synapse/issues/17032), [\#17096](https://github.com/element-hq/synapse/issues/17096))
|
||||
- Fix mypy with latest Twisted release. ([\#17036](https://github.com/element-hq/synapse/issues/17036))
|
||||
- Bump minimum supported Rust version to 1.66.0. ([\#17079](https://github.com/element-hq/synapse/issues/17079))
|
||||
- Add helpers to transform Twisted requests to Rust http Requests/Responses. ([\#17081](https://github.com/element-hq/synapse/issues/17081))
|
||||
- Fix type annotation for `visited_chains` after `mypy` upgrade. ([\#17125](https://github.com/element-hq/synapse/issues/17125))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump anyhow from 1.0.81 to 1.0.82. ([\#17095](https://github.com/element-hq/synapse/issues/17095))
|
||||
* Bump peaceiris/actions-gh-pages from 3.9.3 to 4.0.0. ([\#17087](https://github.com/element-hq/synapse/issues/17087))
|
||||
* Bump peaceiris/actions-mdbook from 1.2.0 to 2.0.0. ([\#17089](https://github.com/element-hq/synapse/issues/17089))
|
||||
* Bump pyasn1-modules from 0.3.0 to 0.4.0. ([\#17093](https://github.com/element-hq/synapse/issues/17093))
|
||||
* Bump pygithub from 2.2.0 to 2.3.0. ([\#17092](https://github.com/element-hq/synapse/issues/17092))
|
||||
* Bump ruff from 0.3.5 to 0.3.7. ([\#17094](https://github.com/element-hq/synapse/issues/17094))
|
||||
* Bump sigstore/cosign-installer from 3.4.0 to 3.5.0. ([\#17088](https://github.com/element-hq/synapse/issues/17088))
|
||||
* Bump twine from 4.0.2 to 5.0.0. ([\#17091](https://github.com/element-hq/synapse/issues/17091))
|
||||
* Bump types-pillow from 10.2.0.20240406 to 10.2.0.20240415. ([\#17090](https://github.com/element-hq/synapse/issues/17090))
|
||||
|
||||
# Synapse 1.105.1 (2024-04-23)
|
||||
|
||||
## Security advisory
|
||||
|
||||
The following issues are fixed in 1.105.1.
|
||||
|
||||
- [GHSA-3h7q-rfh9-xm4v](https://github.com/element-hq/synapse/security/advisories/GHSA-3h7q-rfh9-xm4v) / [CVE-2024-31208](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-31208) — High Severity
|
||||
|
||||
Weakness in auth chain indexing allows DoS from remote room members through disk fill and high CPU usage.
|
||||
|
||||
See the advisories for more details. If you have any questions, email security@element.io.
|
||||
|
||||
|
||||
|
||||
# Synapse 1.105.0 (2024-04-16)
|
||||
|
||||
No significant changes since 1.105.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.105.0rc1 (2024-04-11)
|
||||
|
||||
### Features
|
||||
|
||||
- Stabilize support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which clarifies the interaction of push rules and account data. Contributed by @clokep. ([\#17022](https://github.com/element-hq/synapse/issues/17022))
|
||||
- Stabilize support for [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981): `/relations` recursion. Contributed by @clokep. ([\#17023](https://github.com/element-hq/synapse/issues/17023))
|
||||
- Add support for moving `/pushrules` off of main process. ([\#17037](https://github.com/element-hq/synapse/issues/17037), [\#17038](https://github.com/element-hq/synapse/issues/17038))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix various long-standing bugs which could cause incorrect state to be returned from `/sync` in certain situations. ([\#16930](https://github.com/element-hq/synapse/issues/16930), [\#16932](https://github.com/element-hq/synapse/issues/16932), [\#16942](https://github.com/element-hq/synapse/issues/16942), [\#17064](https://github.com/element-hq/synapse/issues/17064), [\#17065](https://github.com/element-hq/synapse/issues/17065), [\#17066](https://github.com/element-hq/synapse/issues/17066))
|
||||
- Fix server notice rooms not always being created as unencrypted rooms, even when `encryption_enabled_by_default_for_room_type` is in use (server notices are always unencrypted). ([\#17033](https://github.com/element-hq/synapse/issues/17033))
|
||||
- Fix the `.m.rule.encrypted_room_one_to_one` and `.m.rule.room_one_to_one` default underride push rules being in the wrong order. Contributed by @Sumpy1. ([\#17043](https://github.com/element-hq/synapse/issues/17043))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Refactor auth chain fetching to reduce duplication. ([\#17044](https://github.com/element-hq/synapse/issues/17044))
|
||||
- Improve database performance by adding a missing index to `access_tokens.refresh_token_id`. ([\#17045](https://github.com/element-hq/synapse/issues/17045), [\#17054](https://github.com/element-hq/synapse/issues/17054))
|
||||
- Improve database performance by reducing number of receipts fetched when sending push notifications. ([\#17049](https://github.com/element-hq/synapse/issues/17049))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump packaging from 23.2 to 24.0. ([\#17027](https://github.com/element-hq/synapse/issues/17027))
|
||||
* Bump regex from 1.10.3 to 1.10.4. ([\#17028](https://github.com/element-hq/synapse/issues/17028))
|
||||
* Bump ruff from 0.3.2 to 0.3.5. ([\#17060](https://github.com/element-hq/synapse/issues/17060))
|
||||
* Bump serde_json from 1.0.114 to 1.0.115. ([\#17041](https://github.com/element-hq/synapse/issues/17041))
|
||||
* Bump types-pillow from 10.2.0.20240125 to 10.2.0.20240406. ([\#17061](https://github.com/element-hq/synapse/issues/17061))
|
||||
* Bump types-requests from 2.31.0.20240125 to 2.31.0.20240406. ([\#17063](https://github.com/element-hq/synapse/issues/17063))
|
||||
* Bump typing-extensions from 4.9.0 to 4.11.0. ([\#17062](https://github.com/element-hq/synapse/issues/17062))
|
||||
|
||||
# Synapse 1.104.0 (2024-04-02)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix regression when using OIDC provider. Introduced in v1.104.0rc1. ([\#17031](https://github.com/element-hq/synapse/issues/17031))
|
||||
|
||||
|
||||
# Synapse 1.104.0rc1 (2024-03-26)
|
||||
|
||||
### Features
|
||||
|
||||
- Add an OIDC config to specify extra parameters for the authorization grant URL. IT can be useful to pass an ACR value for example. ([\#16971](https://github.com/element-hq/synapse/issues/16971))
|
||||
- Add support for OIDC provider returning JWT. ([\#16972](https://github.com/element-hq/synapse/issues/16972), [\#17031](https://github.com/element-hq/synapse/issues/17031))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix a bug which meant that, under certain circumstances, we might never retry sending events or to-device messages over federation after a failure. ([\#16925](https://github.com/element-hq/synapse/issues/16925))
|
||||
- Fix various long-standing bugs which could cause incorrect state to be returned from `/sync` in certain situations. ([\#16949](https://github.com/element-hq/synapse/issues/16949))
|
||||
- Fix case in which `m.fully_read` marker would not get updated. Contributed by @SpiritCroc. ([\#16990](https://github.com/element-hq/synapse/issues/16990))
|
||||
- Fix bug which did not retract a user's pending knocks at rooms when their account was deactivated. Contributed by @hanadi92. ([\#17010](https://github.com/element-hq/synapse/issues/17010))
|
||||
|
||||
### Updates to the Docker image
|
||||
|
||||
- Updated `start.py` to generate config using the correct user ID when running as root (fixes [\#16824](https://github.com/element-hq/synapse/issues/16824), [\#15202](https://github.com/element-hq/synapse/issues/15202)). ([\#16978](https://github.com/element-hq/synapse/issues/16978))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Add a query to force a refresh of a remote user's device list to the "Useful SQL for Admins" documentation page. ([\#16892](https://github.com/element-hq/synapse/issues/16892))
|
||||
- Minor grammatical corrections to the upgrade documentation. ([\#16965](https://github.com/element-hq/synapse/issues/16965))
|
||||
- Fix the sort order for the documentation version picker, so that newer releases appear above older ones. ([\#16966](https://github.com/element-hq/synapse/issues/16966))
|
||||
- Remove recommendation for a specific poetry version from contributing guide. ([\#17002](https://github.com/element-hq/synapse/issues/17002))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Improve lock performance when a lot of locks are all waiting for a single lock to be released. ([\#16840](https://github.com/element-hq/synapse/issues/16840))
|
||||
- Update power level default for public rooms. ([\#16907](https://github.com/element-hq/synapse/issues/16907))
|
||||
- Improve event validation. ([\#16908](https://github.com/element-hq/synapse/issues/16908))
|
||||
- Multi-worker-docker-container: disable log buffering. ([\#16919](https://github.com/element-hq/synapse/issues/16919))
|
||||
- Refactor state delta calculation in `/sync` handler. ([\#16929](https://github.com/element-hq/synapse/issues/16929))
|
||||
- Clarify docs for some room state functions. ([\#16950](https://github.com/element-hq/synapse/issues/16950))
|
||||
- Specify IP subnets in canonical form. ([\#16953](https://github.com/element-hq/synapse/issues/16953))
|
||||
- As done for SAML mapping provider, let's pass the module API to the OIDC one so the mapper can do more logic in its code. ([\#16974](https://github.com/element-hq/synapse/issues/16974))
|
||||
- Allow containers building on top of Synapse's Complement container is use the included PostgreSQL cluster. ([\#16985](https://github.com/element-hq/synapse/issues/16985))
|
||||
- Raise poetry-core version cap to 1.9.0. ([\#16986](https://github.com/element-hq/synapse/issues/16986))
|
||||
- Patch the db conn pool sooner in tests. ([\#17017](https://github.com/element-hq/synapse/issues/17017))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump anyhow from 1.0.80 to 1.0.81. ([\#17009](https://github.com/element-hq/synapse/issues/17009))
|
||||
* Bump black from 23.10.1 to 24.2.0. ([\#16936](https://github.com/element-hq/synapse/issues/16936))
|
||||
* Bump cryptography from 41.0.7 to 42.0.5. ([\#16958](https://github.com/element-hq/synapse/issues/16958))
|
||||
* Bump dawidd6/action-download-artifact from 3.1.1 to 3.1.2. ([\#16960](https://github.com/element-hq/synapse/issues/16960))
|
||||
* Bump dawidd6/action-download-artifact from 3.1.2 to 3.1.4. ([\#17008](https://github.com/element-hq/synapse/issues/17008))
|
||||
* Bump jinja2 from 3.1.2 to 3.1.3. ([\#17005](https://github.com/element-hq/synapse/issues/17005))
|
||||
* Bump log from 0.4.20 to 0.4.21. ([\#16977](https://github.com/element-hq/synapse/issues/16977))
|
||||
* Bump mypy from 1.5.1 to 1.8.0. ([\#16901](https://github.com/element-hq/synapse/issues/16901))
|
||||
* Bump netaddr from 0.9.0 to 1.2.1. ([\#17006](https://github.com/element-hq/synapse/issues/17006))
|
||||
* Bump pydantic from 2.6.0 to 2.6.4. ([\#17004](https://github.com/element-hq/synapse/issues/17004))
|
||||
* Bump pyo3 from 0.20.2 to 0.20.3. ([\#16962](https://github.com/element-hq/synapse/issues/16962))
|
||||
* Bump ruff from 0.1.14 to 0.3.2. ([\#16994](https://github.com/element-hq/synapse/issues/16994))
|
||||
* Bump serde from 1.0.196 to 1.0.197. ([\#16963](https://github.com/element-hq/synapse/issues/16963))
|
||||
* Bump serde_json from 1.0.113 to 1.0.114. ([\#16961](https://github.com/element-hq/synapse/issues/16961))
|
||||
* Bump types-jsonschema from 4.21.0.20240118 to 4.21.0.20240311. ([\#17007](https://github.com/element-hq/synapse/issues/17007))
|
||||
* Bump types-psycopg2 from 2.9.21.16 to 2.9.21.20240311. ([\#16995](https://github.com/element-hq/synapse/issues/16995))
|
||||
* Bump types-pyopenssl from 23.3.0.0 to 24.0.0.20240311. ([\#17003](https://github.com/element-hq/synapse/issues/17003))
|
||||
|
||||
# Synapse 1.103.0 (2024-03-19)
|
||||
|
||||
No significant changes since 1.103.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.103.0rc1 (2024-03-12)
|
||||
|
||||
### Features
|
||||
|
||||
- Add a new [List Accounts v3](https://element-hq.github.io/synapse/v1.103/admin_api/user_admin_api.html#list-accounts-v3) Admin API with improved deactivated user filtering capabilities. ([\#16874](https://github.com/element-hq/synapse/issues/16874))
|
||||
- Include `Retry-After` header by default per [MSC4041](https://github.com/matrix-org/matrix-spec-proposals/pull/4041). Contributed by @clokep. ([\#16947](https://github.com/element-hq/synapse/issues/16947))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix joining remote rooms when a module uses the `on_new_event` callback. This callback may now pass partial state events instead of the full state for remote rooms. Introduced in v1.76.0. ([\#16973](https://github.com/element-hq/synapse/issues/16973))
|
||||
- Fix performance issue when joining very large rooms that can cause the server to lock up. Introduced in v1.100.0. Contributed by @ggogel. ([\#16968](https://github.com/element-hq/synapse/issues/16968))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Add HAProxy example for single port operation to reverse proxy documentation. Contributed by Georg Pfuetzenreuter (@tacerus). ([\#16768](https://github.com/element-hq/synapse/issues/16768))
|
||||
- Improve the documentation around running Complement tests with new configuration parameters. ([\#16946](https://github.com/element-hq/synapse/issues/16946))
|
||||
- Add docs on upgrading from a very old version. ([\#16951](https://github.com/element-hq/synapse/issues/16951))
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump JasonEtco/create-an-issue from 2.9.1 to 2.9.2. ([\#16934](https://github.com/element-hq/synapse/issues/16934))
|
||||
* Bump anyhow from 1.0.79 to 1.0.80. ([\#16935](https://github.com/element-hq/synapse/issues/16935))
|
||||
* Bump dawidd6/action-download-artifact from 3.0.0 to 3.1.1. ([\#16933](https://github.com/element-hq/synapse/issues/16933))
|
||||
* Bump furo from 2023.9.10 to 2024.1.29. ([\#16939](https://github.com/element-hq/synapse/issues/16939))
|
||||
* Bump pyopenssl from 23.3.0 to 24.0.0. ([\#16937](https://github.com/element-hq/synapse/issues/16937))
|
||||
* Bump types-netaddr from 0.10.0.20240106 to 1.2.0.20240219. ([\#16938](https://github.com/element-hq/synapse/issues/16938))
|
||||
|
||||
|
||||
# Synapse 1.102.0 (2024-03-05)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Revert https://github.com/element-hq/synapse/pull/16756, which caused incorrect notification counts on mobile clients since v1.100.0. ([\#16979](https://github.com/element-hq/synapse/issues/16979))
|
||||
|
||||
|
||||
# Synapse 1.102.0rc1 (2024-02-20)
|
||||
|
||||
### Features
|
||||
|
||||
- A metric was added for emails sent by Synapse, broken down by type: `synapse_emails_sent_total`. Contributed by Remi Rampin. ([\#16881](https://github.com/element-hq/synapse/issues/16881))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Do not send multiple concurrent requests for keys for the same server. ([\#16894](https://github.com/element-hq/synapse/issues/16894))
|
||||
- Fix performance issue when joining very large rooms that can cause the server to lock up. Introduced in v1.100.0. ([\#16903](https://github.com/element-hq/synapse/issues/16903))
|
||||
- Always prefer unthreaded receipt when >1 exist ([MSC4102](https://github.com/matrix-org/matrix-spec-proposals/pull/4102)). ([\#16927](https://github.com/element-hq/synapse/issues/16927))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Fix a small typo in the Rooms section of the Admin API documentation. Contributed by @RainerZufall187. ([\#16857](https://github.com/element-hq/synapse/issues/16857))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Don't invalidate the entire event cache when we purge history. ([\#16905](https://github.com/element-hq/synapse/issues/16905))
|
||||
- Add experimental config option to not send device list updates for specific users. ([\#16909](https://github.com/element-hq/synapse/issues/16909))
|
||||
- Fix incorrect docker hub link in release script. ([\#16910](https://github.com/element-hq/synapse/issues/16910))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump attrs from 23.1.0 to 23.2.0. ([\#16899](https://github.com/element-hq/synapse/issues/16899))
|
||||
* Bump bcrypt from 4.0.1 to 4.1.2. ([\#16900](https://github.com/element-hq/synapse/issues/16900))
|
||||
* Bump pygithub from 2.1.1 to 2.2.0. ([\#16902](https://github.com/element-hq/synapse/issues/16902))
|
||||
* Bump sentry-sdk from 1.40.0 to 1.40.3. ([\#16898](https://github.com/element-hq/synapse/issues/16898))
|
||||
|
||||
# Synapse 1.101.0 (2024-02-13)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix performance regression when fetching auth chains from the DB. Introduced in v1.100.0. ([\#16893](https://github.com/element-hq/synapse/issues/16893))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.101.0rc1 (2024-02-06)
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Fix broken links in the documentation. ([\#16853](https://github.com/element-hq/synapse/issues/16853))
|
||||
- Update MacOS installation instructions to mention that libicu is optional. ([\#16854](https://github.com/element-hq/synapse/issues/16854))
|
||||
- The version picker now correctly lists versions after `v1.98.0`. ([\#16880](https://github.com/element-hq/synapse/issues/16880))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Add support for stabilised [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981) that adds a `recurse` parameter on the `/relations` API. ([\#16842](https://github.com/element-hq/synapse/issues/16842))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump dorny/paths-filter from 2 to 3. ([\#16869](https://github.com/element-hq/synapse/issues/16869))
|
||||
* Bump gitpython from 3.1.40 to 3.1.41. ([\#16850](https://github.com/element-hq/synapse/issues/16850))
|
||||
* Bump hiredis from 2.2.3 to 2.3.2. ([\#16862](https://github.com/element-hq/synapse/issues/16862))
|
||||
* Bump jsonschema from 4.20.0 to 4.21.1. ([\#16887](https://github.com/element-hq/synapse/issues/16887))
|
||||
* Bump lxml-stubs from 0.4.0 to 0.5.1. ([\#16885](https://github.com/element-hq/synapse/issues/16885))
|
||||
* Bump mypy-zope from 1.0.1 to 1.0.3. ([\#16865](https://github.com/element-hq/synapse/issues/16865))
|
||||
* Bump phonenumbers from 8.13.26 to 8.13.29. ([\#16868](https://github.com/element-hq/synapse/issues/16868))
|
||||
* Bump pydantic from 2.5.3 to 2.6.0. ([\#16888](https://github.com/element-hq/synapse/issues/16888))
|
||||
* Bump sentry-sdk from 1.39.1 to 1.40.0. ([\#16889](https://github.com/element-hq/synapse/issues/16889))
|
||||
* Bump serde from 1.0.195 to 1.0.196. ([\#16867](https://github.com/element-hq/synapse/issues/16867))
|
||||
* Bump serde_json from 1.0.111 to 1.0.113. ([\#16866](https://github.com/element-hq/synapse/issues/16866))
|
||||
* Bump sigstore/cosign-installer from 3.3.0 to 3.4.0. ([\#16890](https://github.com/element-hq/synapse/issues/16890))
|
||||
* Bump types-pillow from 10.1.0.2 to 10.2.0.20240125. ([\#16864](https://github.com/element-hq/synapse/issues/16864))
|
||||
* Bump types-requests from 2.31.0.10 to 2.31.0.20240125. ([\#16886](https://github.com/element-hq/synapse/issues/16886))
|
||||
* Bump types-setuptools from 69.0.0.0 to 69.0.0.20240125. ([\#16863](https://github.com/element-hq/synapse/issues/16863))
|
||||
|
||||
# Synapse 1.100.0 (2024-01-30)
|
||||
|
||||
No significant changes since 1.100.0rc3.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.100.0rc3 (2024-01-24)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
308
Cargo.lock
generated
308
Cargo.lock
generated
@@ -13,9 +13,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.79"
|
||||
version = "1.0.82"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca"
|
||||
checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -29,6 +29,12 @@ version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.21.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.3.2"
|
||||
@@ -53,12 +59,33 @@ dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bumpalo"
|
||||
version = "3.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "1.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "cpufeatures"
|
||||
version = "0.2.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.1.6"
|
||||
@@ -71,15 +98,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.10.5"
|
||||
version = "0.10.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
|
||||
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
||||
dependencies = [
|
||||
"block-buffer",
|
||||
"crypto-common",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
|
||||
|
||||
[[package]]
|
||||
name = "generic-array"
|
||||
version = "0.14.6"
|
||||
@@ -90,6 +123,43 @@ dependencies = [
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"js-sys",
|
||||
"libc",
|
||||
"wasi",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "headers"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"bytes",
|
||||
"headers-core",
|
||||
"http",
|
||||
"httpdate",
|
||||
"mime",
|
||||
"sha1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "headers-core"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4"
|
||||
dependencies = [
|
||||
"http",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.4.1"
|
||||
@@ -102,6 +172,23 @@ version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fnv",
|
||||
"itoa",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "httpdate"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
|
||||
|
||||
[[package]]
|
||||
name = "indoc"
|
||||
version = "2.0.4"
|
||||
@@ -114,6 +201,15 @@ version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
|
||||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.69"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d"
|
||||
dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
@@ -122,9 +218,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.135"
|
||||
version = "0.2.153"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
|
||||
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
@@ -138,9 +234,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.20"
|
||||
version = "0.4.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
||||
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
@@ -157,6 +253,12 @@ dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mime"
|
||||
version = "0.3.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.15.0"
|
||||
@@ -186,6 +288,18 @@ dependencies = [
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "portable-atomic"
|
||||
version = "1.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.76"
|
||||
@@ -197,9 +311,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3"
|
||||
version = "0.20.2"
|
||||
version = "0.20.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9a89dc7a5850d0e983be1ec2a463a171d20990487c3cfcd68b5363f1ee3d6fe0"
|
||||
checksum = "53bdbb96d49157e65d45cc287af5f32ffadd5f4761438b527b055fb0d4bb8233"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
@@ -207,6 +321,7 @@ dependencies = [
|
||||
"libc",
|
||||
"memoffset",
|
||||
"parking_lot",
|
||||
"portable-atomic",
|
||||
"pyo3-build-config",
|
||||
"pyo3-ffi",
|
||||
"pyo3-macros",
|
||||
@@ -215,9 +330,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-build-config"
|
||||
version = "0.20.2"
|
||||
version = "0.20.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "07426f0d8fe5a601f26293f300afd1a7b1ed5e78b2a705870c5f30893c5163be"
|
||||
checksum = "deaa5745de3f5231ce10517a1f5dd97d53e5a2fd77aa6b5842292085831d48d7"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"target-lexicon",
|
||||
@@ -225,9 +340,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-ffi"
|
||||
version = "0.20.2"
|
||||
version = "0.20.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbb7dec17e17766b46bca4f1a4215a85006b4c2ecde122076c562dd058da6cf1"
|
||||
checksum = "62b42531d03e08d4ef1f6e85a2ed422eb678b8cd62b762e53891c05faf0d4afa"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"pyo3-build-config",
|
||||
@@ -246,9 +361,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros"
|
||||
version = "0.20.2"
|
||||
version = "0.20.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "05f738b4e40d50b5711957f142878cfa0f28e054aa0ebdfc3fd137a843f74ed3"
|
||||
checksum = "7305c720fa01b8055ec95e484a6eca7a83c841267f0dd5280f0c8b8551d2c158"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"pyo3-macros-backend",
|
||||
@@ -258,12 +373,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros-backend"
|
||||
version = "0.20.2"
|
||||
version = "0.20.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fc910d4851847827daf9d6cdd4a823fbdaab5b8818325c5e97a86da79e8881f"
|
||||
checksum = "7c7e9b68bb9c3149c5b0cade5d07f953d6d125eb4337723c4ccdb665f1f96185"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
"pyo3-build-config",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
@@ -287,6 +403,36 @@ dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.16"
|
||||
@@ -298,9 +444,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.10.3"
|
||||
version = "1.10.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15"
|
||||
checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -339,18 +485,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.195"
|
||||
version = "1.0.200"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02"
|
||||
checksum = "ddc6f9cc94d67c0e21aaf7eda3a010fd3af78ebf6e096aa6e2e13c79749cce4f"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.195"
|
||||
version = "1.0.200"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c"
|
||||
checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -359,15 +505,37 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.111"
|
||||
version = "1.0.116"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4"
|
||||
checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha1"
|
||||
version = "0.10.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.10.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.10.0"
|
||||
@@ -396,16 +564,23 @@ name = "synapse"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"base64",
|
||||
"blake2",
|
||||
"bytes",
|
||||
"headers",
|
||||
"hex",
|
||||
"http",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"mime",
|
||||
"pyo3",
|
||||
"pyo3-log",
|
||||
"pythonize",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"ulid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -420,6 +595,17 @@ version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
|
||||
|
||||
[[package]]
|
||||
name = "ulid"
|
||||
version = "1.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34778c17965aa2a08913b57e1f34db9b4a63f5de31768b55bf20d2795f921259"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"rand",
|
||||
"web-time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.5"
|
||||
@@ -438,6 +624,76 @@ version = "0.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.11.0+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"wasm-bindgen-macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-backend"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"log",
|
||||
"once_cell",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"wasm-bindgen-backend",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96"
|
||||
|
||||
[[package]]
|
||||
name = "web-time"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.36.1"
|
||||
|
||||
90
debian/changelog
vendored
90
debian/changelog
vendored
@@ -1,3 +1,93 @@
|
||||
matrix-synapse-py3 (1.107.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.107.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 07 May 2024 16:26:26 +0100
|
||||
|
||||
matrix-synapse-py3 (1.106.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.106.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Apr 2024 11:51:43 +0100
|
||||
|
||||
matrix-synapse-py3 (1.106.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.106.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Apr 2024 15:54:59 +0100
|
||||
|
||||
matrix-synapse-py3 (1.105.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.105.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 23 Apr 2024 15:56:18 +0100
|
||||
|
||||
matrix-synapse-py3 (1.105.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.105.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Apr 2024 15:53:23 +0100
|
||||
|
||||
matrix-synapse-py3 (1.105.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.105.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 11 Apr 2024 12:15:49 +0100
|
||||
|
||||
matrix-synapse-py3 (1.104.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.104.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Apr 2024 17:15:45 +0100
|
||||
|
||||
matrix-synapse-py3 (1.104.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.104.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Mar 2024 11:48:58 +0000
|
||||
|
||||
matrix-synapse-py3 (1.103.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.103.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Mar 2024 12:24:36 +0000
|
||||
|
||||
matrix-synapse-py3 (1.103.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.103.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Mar 2024 15:02:56 +0000
|
||||
|
||||
matrix-synapse-py3 (1.102.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.102.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Mar 2024 14:47:03 +0000
|
||||
|
||||
matrix-synapse-py3 (1.102.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.102.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Feb 2024 15:50:36 +0000
|
||||
|
||||
matrix-synapse-py3 (1.101.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.101.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Feb 2024 10:45:35 +0000
|
||||
|
||||
matrix-synapse-py3 (1.101.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.101.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Feb 2024 16:02:02 +0000
|
||||
|
||||
matrix-synapse-py3 (1.100.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.100.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Jan 2024 16:58:19 +0000
|
||||
|
||||
matrix-synapse-py3 (1.100.0~rc3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.100.0rc3.
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
@@ -1,50 +0,0 @@
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# For the full list of built-in configuration values, see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||
|
||||
project = "Synapse development"
|
||||
copyright = "2023, The Matrix.org Foundation C.I.C."
|
||||
author = "The Synapse Maintainers and Community"
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
||||
|
||||
extensions = [
|
||||
"autodoc2",
|
||||
"myst_parser",
|
||||
]
|
||||
|
||||
templates_path = ["_templates"]
|
||||
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
|
||||
|
||||
|
||||
# -- Options for Autodoc2 ----------------------------------------------------
|
||||
|
||||
autodoc2_docstring_parser_regexes = [
|
||||
# this will render all docstrings as 'MyST' Markdown
|
||||
(r".*", "myst"),
|
||||
]
|
||||
|
||||
autodoc2_packages = [
|
||||
{
|
||||
"path": "../synapse",
|
||||
# Don't render documentation for everything as a matter of course
|
||||
"auto_mode": False,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
# -- Options for MyST (Markdown) ---------------------------------------------
|
||||
|
||||
# myst_heading_anchors = 2
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
||||
|
||||
html_theme = "furo"
|
||||
html_static_path = ["_static"]
|
||||
@@ -1,22 +0,0 @@
|
||||
.. Synapse Developer Documentation documentation master file, created by
|
||||
sphinx-quickstart on Mon Mar 13 08:59:51 2023.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to the Synapse Developer Documentation!
|
||||
===========================================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
modules/federation_sender
|
||||
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
@@ -1,5 +0,0 @@
|
||||
Federation Sender
|
||||
=================
|
||||
|
||||
```{autodoc2-docstring} synapse.federation.sender
|
||||
```
|
||||
@@ -163,7 +163,7 @@ FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm
|
||||
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
|
||||
LABEL org.opencontainers.image.documentation='https://github.com/element-hq/synapse/blob/master/docker/README.md'
|
||||
LABEL org.opencontainers.image.source='https://github.com/element-hq/synapse.git'
|
||||
LABEL org.opencontainers.image.licenses='Apache-2.0'
|
||||
LABEL org.opencontainers.image.licenses='AGPL-3.0-or-later'
|
||||
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
|
||||
@@ -30,3 +30,14 @@ Consult `scripts-dev/complement.sh` in the repository root for a real example.
|
||||
|
||||
[complement]: https://github.com/matrix-org/complement
|
||||
[complementEnv]: https://github.com/matrix-org/complement/pull/382
|
||||
|
||||
## How to modify homeserver.yaml for Complement tests
|
||||
|
||||
It's common for MSCs to be gated behind a feature flag like this:
|
||||
```yaml
|
||||
experimental_features:
|
||||
faster_joins: true
|
||||
```
|
||||
To modify this for the Complement image, modify `./conf/workers-shared-extra.yaml.j2`. Despite the name,
|
||||
this will affect non-worker mode as well. Remember to _rebuild_ the image (so don't use `-e` if using
|
||||
`complement.sh`).
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[program:postgres]
|
||||
command=/usr/local/bin/prefix-log gosu postgres postgres
|
||||
|
||||
# Only start if START_POSTGRES=1
|
||||
# Only start if START_POSTGRES=true
|
||||
autostart=%(ENV_START_POSTGRES)s
|
||||
|
||||
# Lower priority number = starts first
|
||||
|
||||
@@ -32,8 +32,9 @@ case "$SYNAPSE_COMPLEMENT_DATABASE" in
|
||||
;;
|
||||
|
||||
sqlite|"")
|
||||
# Configure supervisord not to start Postgres, as we don't need it
|
||||
export START_POSTGRES=false
|
||||
# Set START_POSTGRES to false unless it has already been set
|
||||
# (i.e. by another container image inheriting our own).
|
||||
export START_POSTGRES=${START_POSTGRES:-false}
|
||||
;;
|
||||
|
||||
*)
|
||||
|
||||
@@ -92,8 +92,6 @@ allow_device_name_lookup_over_federation: true
|
||||
## Experimental Features ##
|
||||
|
||||
experimental_features:
|
||||
# client-side support for partial state in /send_join responses
|
||||
faster_joins: true
|
||||
# Enable support for polls
|
||||
msc3381_polls_enabled: true
|
||||
# Enable deleting device-specific notification settings stored in account data
|
||||
@@ -102,6 +100,12 @@ experimental_features:
|
||||
msc3391_enabled: true
|
||||
# Filtering /messages by relation type.
|
||||
msc3874_enabled: true
|
||||
# no UIA for x-signing upload for the first time
|
||||
msc3967_enabled: true
|
||||
# Expose a room summary for public rooms
|
||||
msc3266_enabled: true
|
||||
|
||||
msc4115_membership_on_events: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
|
||||
@@ -310,6 +310,13 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"push_rules": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
}
|
||||
|
||||
# Templates for sections that may be inserted multiple times in config files
|
||||
@@ -401,6 +408,7 @@ def add_worker_roles_to_shared_config(
|
||||
"receipts",
|
||||
"to_device",
|
||||
"typing",
|
||||
"push_rules",
|
||||
]
|
||||
|
||||
# Worker-type specific sharding config. Now a single worker can fulfill multiple
|
||||
|
||||
@@ -7,6 +7,9 @@
|
||||
# prefix-log command [args...]
|
||||
#
|
||||
|
||||
exec 1> >(awk '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0}' >&1)
|
||||
exec 2> >(awk '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0}' >&2)
|
||||
# '-W interactive' is a `mawk` extension which disables buffering on stdout and sets line-buffered reads on
|
||||
# stdin. The effect is that the output is flushed after each line, rather than being batched, which helps reduce
|
||||
# confusion due to to interleaving of the different processes.
|
||||
exec 1> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&1)
|
||||
exec 2> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&2)
|
||||
exec "$@"
|
||||
|
||||
@@ -160,11 +160,6 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
|
||||
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
|
||||
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
|
||||
|
||||
if ownership is not None:
|
||||
# make sure that synapse has perms to write to the data dir.
|
||||
log(f"Setting ownership on {data_dir} to {ownership}")
|
||||
subprocess.run(["chown", ownership, data_dir], check=True)
|
||||
|
||||
# create a suitable log config from our template
|
||||
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
|
||||
if not os.path.exists(log_config_file):
|
||||
@@ -189,9 +184,15 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
|
||||
"--generate-config",
|
||||
"--open-private-ports",
|
||||
]
|
||||
|
||||
if ownership is not None:
|
||||
# make sure that synapse has perms to write to the data dir.
|
||||
log(f"Setting ownership on {data_dir} to {ownership}")
|
||||
subprocess.run(["chown", ownership, data_dir], check=True)
|
||||
args = ["gosu", ownership] + args
|
||||
|
||||
# log("running %s" % (args, ))
|
||||
flush_buffers()
|
||||
os.execv(sys.executable, args)
|
||||
subprocess.run(args, check=True)
|
||||
|
||||
|
||||
def main(args: List[str], environ: MutableMapping[str, str]) -> None:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Edit Room Membership API
|
||||
|
||||
This API allows an administrator to join an user account with a given `user_id`
|
||||
This API allows an administrator to join a user account with a given `user_id`
|
||||
to a room with a given `room_id_or_alias`. You can only modify the membership of
|
||||
local users. The server administrator must be in the room and have permission to
|
||||
invite users.
|
||||
|
||||
@@ -913,7 +913,7 @@ With all that being said, if you still want to try and recover the room:
|
||||
them handle rejoining themselves.
|
||||
|
||||
4. If `new_room_user_id` was given, a 'Content Violation' will have been
|
||||
created. Consider whether you want to delete that roomm.
|
||||
created. Consider whether you want to delete that room.
|
||||
|
||||
# Make Room Admin API
|
||||
|
||||
|
||||
@@ -164,6 +164,7 @@ Body parameters:
|
||||
Other allowed options are: `bot` and `support`.
|
||||
|
||||
## List Accounts
|
||||
### List Accounts (V2)
|
||||
|
||||
This API returns all local user accounts.
|
||||
By default, the response is ordered by ascending user ID.
|
||||
@@ -287,6 +288,19 @@ The following fields are returned in the JSON response body:
|
||||
|
||||
*Added in Synapse 1.93:* the `locked` query parameter and response field.
|
||||
|
||||
### List Accounts (V3)
|
||||
|
||||
This API returns all local user accounts (see v2). In contrast to v2, the query parameter `deactivated` is handled differently.
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v3/users
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
- `deactivated` - Optional flag to filter deactivated users. If `true`, only deactivated users are returned.
|
||||
If `false`, deactivated users are excluded from the query. When the flag is absent (the default),
|
||||
users are not filtered by deactivation status.
|
||||
|
||||
## Query current sessions for a user
|
||||
|
||||
This API returns information about the active sessions for a specific user.
|
||||
|
||||
@@ -68,7 +68,7 @@ Of their installation methods, we recommend
|
||||
|
||||
```shell
|
||||
pip install --user pipx
|
||||
pipx install poetry==1.5.1 # Problems with Poetry 1.6, see https://github.com/matrix-org/synapse/issues/16147
|
||||
pipx install poetry
|
||||
```
|
||||
|
||||
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
|
||||
@@ -86,6 +86,8 @@ poetry install --extras all
|
||||
This will install the runtime and developer dependencies for the project. Be sure to check
|
||||
that the `poetry install` step completed cleanly.
|
||||
|
||||
For OSX users, be sure to set `PKG_CONFIG_PATH` to support `icu4c`. Run `brew info icu4c` for more details.
|
||||
|
||||
## Running Synapse via poetry
|
||||
|
||||
To start a local instance of Synapse in the locked poetry environment, create a config file:
|
||||
@@ -329,7 +331,7 @@ This configuration should generally cover your needs.
|
||||
- To run with Postgres, supply the `-e POSTGRES=1 -e MULTI_POSTGRES=1` environment flags.
|
||||
- To run with Synapse in worker mode, supply the `-e WORKERS=1 -e REDIS=1` environment flags (in addition to the Postgres flags).
|
||||
|
||||
For more details about other configurations, see the [Docker-specific documentation in the SyTest repo](https://github.com/vector-im/sytest/blob/develop/docker/README.md).
|
||||
For more details about other configurations, see the [Docker-specific documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
|
||||
|
||||
|
||||
## Run the integration tests ([Complement](https://github.com/matrix-org/complement)).
|
||||
|
||||
@@ -7,8 +7,10 @@ follow the semantics described in
|
||||
and allow server and room admins to configure how long messages should
|
||||
be kept in a homeserver's database before being purged from it.
|
||||
**Please note that, as this feature isn't part of the Matrix
|
||||
specification yet, this implementation is to be considered as
|
||||
experimental.**
|
||||
specification yet, the use of `m.room.retention` events for per-room
|
||||
retention policies is to be considered as experimental. However, the use
|
||||
of a default message retention policy is considered a stable feature
|
||||
in Synapse.**
|
||||
|
||||
A message retention policy is mainly defined by its `max_lifetime`
|
||||
parameter, which defines how long a message can be kept around after
|
||||
@@ -49,8 +51,8 @@ clients.
|
||||
|
||||
## Server configuration
|
||||
|
||||
Support for this feature can be enabled and configured by adding a the
|
||||
`retention` in the Synapse configuration file (see
|
||||
Support for this feature can be enabled and configured by adding the
|
||||
`retention` option in the Synapse configuration file (see
|
||||
[configuration manual](usage/configuration/config_documentation.md#retention)).
|
||||
|
||||
To enable support for message retention policies, set the setting
|
||||
@@ -115,7 +117,7 @@ In this example, we define three jobs:
|
||||
policy's `max_lifetime` is greater than a week.
|
||||
|
||||
Note that this example is tailored to show different configurations and
|
||||
features slightly more jobs than it's probably necessary (in practice, a
|
||||
features slightly more jobs than is probably necessary (in practice, a
|
||||
server admin would probably consider it better to replace the two last
|
||||
jobs with one that runs once a day and handles rooms which
|
||||
policy's `max_lifetime` is greater than 3 days).
|
||||
|
||||
@@ -142,6 +142,10 @@ Called after sending an event into a room. The module is passed the event, as we
|
||||
as the state of the room _after_ the event. This means that if the event is a state event,
|
||||
it will be included in this state.
|
||||
|
||||
The state map may not be complete if Synapse hasn't yet loaded the full state
|
||||
of the room. This can happen for events in rooms that were just joined from
|
||||
a remote server.
|
||||
|
||||
Note that this callback is called when the event has already been processed and stored
|
||||
into the room, which means this callback cannot be used to deny persisting the event. To
|
||||
deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#check_event_for_spam) instead.
|
||||
|
||||
@@ -12,7 +12,7 @@ This is the main reason people have a poor matrix experience on resource constra
|
||||
|
||||
While synapse does have some performance issues with presence [#3971](https://github.com/matrix-org/synapse/issues/3971), the fundamental problem is that this is an easy feature to implement for a centralised service at nearly no overhead, but federation makes it combinatorial [#8055](https://github.com/matrix-org/synapse/issues/8055). There is also a client-side config option which disables the UI and idle tracking [enable_presence_by_hs_url] to blacklist the largest instances but I didn't notice much difference, so I recommend disabling the feature entirely at the server level as well.
|
||||
|
||||
[enable_presence_by_hs_url]: https://github.com/vector-im/element-web/blob/v1.7.8/config.sample.json#L45
|
||||
[enable_presence_by_hs_url]: https://github.com/element-hq/element-web/blob/v1.7.8/config.sample.json#L45
|
||||
|
||||
### Joining
|
||||
|
||||
|
||||
@@ -128,7 +128,7 @@ can read more about that [here](https://www.postgresql.org/docs/10/kernel-resour
|
||||
### Overview
|
||||
|
||||
The script `synapse_port_db` allows porting an existing synapse server
|
||||
backed by SQLite to using PostgreSQL. This is done in as a two phase
|
||||
backed by SQLite to using PostgreSQL. This is done as a two phase
|
||||
process:
|
||||
|
||||
1. Copy the existing SQLite database to a separate location and run
|
||||
@@ -182,7 +182,7 @@ synapse_port_db --sqlite-database homeserver.db.snapshot \
|
||||
--postgres-config homeserver-postgres.yaml
|
||||
```
|
||||
|
||||
The flag `--curses` displays a coloured curses progress UI.
|
||||
The flag `--curses` displays a coloured curses progress UI. (NOTE: if your terminal is too small the script will error out)
|
||||
|
||||
If the script took a long time to complete, or time has otherwise passed
|
||||
since the original snapshot was taken, repeat the previous steps with a
|
||||
|
||||
@@ -186,6 +186,25 @@ Example configuration, if using a UNIX socket. The configuration lines regarding
|
||||
backend matrix
|
||||
server matrix unix@/run/synapse/main_public.sock
|
||||
```
|
||||
Example configuration when using a single port for both client and federation traffic.
|
||||
```
|
||||
frontend https
|
||||
bind *:443,[::]:443 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
|
||||
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
|
||||
http-request set-header X-Forwarded-For %[src]
|
||||
|
||||
acl matrix-host hdr(host) -i matrix.example.com matrix.example.com:443
|
||||
acl matrix-sni ssl_fc_sni matrix.example.com
|
||||
acl matrix-path path_beg /_matrix
|
||||
acl matrix-path path_beg /_synapse/client
|
||||
|
||||
use_backend matrix if matrix-host matrix-path
|
||||
use_backend matrix if matrix-sni
|
||||
|
||||
backend matrix
|
||||
server matrix 127.0.0.1:8008
|
||||
```
|
||||
|
||||
[Delegation](delegate.md) example:
|
||||
```
|
||||
|
||||
@@ -26,7 +26,7 @@ for most users.
|
||||
#### Docker images and Ansible playbooks
|
||||
|
||||
There is an official synapse image available at
|
||||
<https://hub.docker.com/r/vectorim/synapse> or at [`ghcr.io/element-hq/synapse`](https://ghcr.io/element-hq/synapse)
|
||||
<https://hub.docker.com/r/matrixdotorg/synapse> or at [`ghcr.io/element-hq/synapse`](https://ghcr.io/element-hq/synapse)
|
||||
which can be used with the docker-compose file available at
|
||||
[contrib/docker](https://github.com/element-hq/synapse/tree/develop/contrib/docker).
|
||||
Further information on this including configuration options is available in the README
|
||||
@@ -259,9 +259,9 @@ users, etc.) to the developers via the `--report-stats` argument.
|
||||
|
||||
This command will generate you a config file that you can then customise, but it will
|
||||
also generate a set of keys for you. These keys will allow your homeserver to
|
||||
identify itself to other homeserver, so don't lose or delete them. It would be
|
||||
identify itself to other homeservers, so don't lose or delete them. It would be
|
||||
wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||
change your homeserver's keys, you may find that other homeserver have the
|
||||
change your homeserver's keys, you may find that other homeservers have the
|
||||
old key cached. If you update the signing key, you should change the name of the
|
||||
key in the `<server name>.signing.key` file (the second word) to something
|
||||
different. See the [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys) for more information on key management).
|
||||
@@ -326,6 +326,17 @@ Some extra dependencies may be needed. You can use Homebrew (https://brew.sh) fo
|
||||
You may need to install icu, and make the icu binaries and libraries accessible.
|
||||
Please follow [the official instructions of PyICU](https://pypi.org/project/PyICU/) to do so.
|
||||
|
||||
If you're struggling to get icu discovered, and see:
|
||||
```
|
||||
RuntimeError:
|
||||
Please install pkg-config on your system or set the ICU_VERSION environment
|
||||
variable to the version of ICU you have installed.
|
||||
```
|
||||
despite it being installed and having your `PATH` updated, you can omit this dependency by
|
||||
not specifying `--extras all` to `poetry`. If using postgres, you can install Synapse via
|
||||
`poetry install --extras saml2 --extras oidc --extras postgres --extras opentracing --extras redis --extras sentry`.
|
||||
ICU is not a hard dependency on getting a working installation.
|
||||
|
||||
On ARM-based Macs you may also need to install libjpeg and libpq:
|
||||
```sh
|
||||
brew install jpeg libpq
|
||||
|
||||
@@ -136,8 +136,8 @@ This will install and start a systemd service called `coturn`.
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[respective](https://github.com/element-hq/element-android/issues/1533)
|
||||
[issues](https://github.com/element-hq/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
|
||||
@@ -137,8 +137,8 @@ must be edited:
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[respective](https://github.com/element-hq/element-android/issues/1533)
|
||||
[issues](https://github.com/element-hq/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
|
||||
@@ -50,11 +50,13 @@ comment these options out and use those specified by the module instead.
|
||||
|
||||
A custom mapping provider must specify the following methods:
|
||||
|
||||
* `def __init__(self, parsed_config)`
|
||||
* `def __init__(self, parsed_config, module_api)`
|
||||
- Arguments:
|
||||
- `parsed_config` - A configuration object that is the return value of the
|
||||
`parse_config` method. You should set any configuration options needed by
|
||||
the module here.
|
||||
- `module_api` - a `synapse.module_api.ModuleApi` object which provides the
|
||||
stable API available for extension modules.
|
||||
* `def parse_config(config)`
|
||||
- This method should have the `@staticmethod` decoration.
|
||||
- Arguments:
|
||||
@@ -96,6 +98,7 @@ A custom mapping provider must specify the following methods:
|
||||
either accept this localpart or pick their own username. Otherwise this
|
||||
option has no effect. If omitted, defaults to `False`.
|
||||
- `display_name`: An optional string, the display name for the user.
|
||||
- `picture`: An optional string, the avatar url for the user.
|
||||
- `emails`: A list of strings, the email address(es) to associate with
|
||||
this user. If omitted, defaults to an empty list.
|
||||
* `async def get_extra_attributes(self, userinfo, token)`
|
||||
|
||||
@@ -9,6 +9,7 @@ ReloadPropagatedFrom=matrix-synapse.target
|
||||
Type=notify
|
||||
NotifyAccess=main
|
||||
User=matrix-synapse
|
||||
RuntimeDirectory=synapse
|
||||
WorkingDirectory=/var/lib/matrix-synapse
|
||||
EnvironmentFile=-/etc/default/matrix-synapse
|
||||
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
|
||||
|
||||
@@ -88,15 +88,43 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
Generally Synapse database schemas are compatible across multiple versions, once
|
||||
a version of Synapse is deployed you may not be able to rollback automatically.
|
||||
Generally Synapse database schemas are compatible across multiple versions, but once
|
||||
a version of Synapse is deployed you may not be able to roll back automatically.
|
||||
The following table gives the version ranges and the earliest version they can
|
||||
be rolled back to. E.g. Synapse versions v1.58.0 through v1.61.1 can be rolled
|
||||
back safely to v1.57.0, but starting with v1.62.0 it is only safe to rollback to
|
||||
back safely to v1.57.0, but starting with v1.62.0 it is only safe to roll back to
|
||||
v1.61.0.
|
||||
|
||||
<!-- REPLACE_WITH_SCHEMA_VERSIONS -->
|
||||
|
||||
## Upgrading from a very old version
|
||||
|
||||
You need to read all of the upgrade notes for each version between your current
|
||||
version and the latest so that you can update your dependencies, environment,
|
||||
config files, etc. if necessary. But you do not need to perform an
|
||||
upgrade to each individual version that was missed.
|
||||
|
||||
We do not have a list of which versions must be installed. Instead, we recommend
|
||||
that you upgrade through each incompatible database schema version, which would
|
||||
give you the ability to roll back the maximum number of versions should anything
|
||||
go wrong. See [Rolling back to older versions](#rolling-back-to-older-versions)
|
||||
above.
|
||||
|
||||
Additionally, new versions of Synapse will occasionally run database migrations
|
||||
and background updates to update the database. Synapse will not start until
|
||||
database migrations are complete. You should wait until background updates from
|
||||
each upgrade are complete before moving on to the next upgrade, to avoid
|
||||
stacking them up. You can monitor the currently running background updates with
|
||||
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||
|
||||
# Upgrading to v1.106.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
The minimum supported Rust version has been increased from v1.65.0 to v1.66.0.
|
||||
Users building from source will need to ensure their `rustc` version is up to
|
||||
date.
|
||||
|
||||
|
||||
# Upgrading to v1.100.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
||||
@@ -44,7 +44,7 @@ For each update:
|
||||
|
||||
## Enabled
|
||||
|
||||
This API allow pausing background updates.
|
||||
This API allows pausing background updates.
|
||||
|
||||
Background updates should *not* be paused for significant periods of time, as
|
||||
this can affect the performance of Synapse.
|
||||
|
||||
@@ -120,6 +120,11 @@ for file in $source_directory/*; do
|
||||
done
|
||||
```
|
||||
|
||||
How do I upgrade from a very old version of Synapse to the latest?
|
||||
---
|
||||
See [this](../../upgrade.html#upgrading-from-a-very-old-version) section in the
|
||||
upgrade docs.
|
||||
|
||||
Manually resetting passwords
|
||||
---
|
||||
Users can reset their password through their client. Alternatively, a server admin
|
||||
@@ -236,7 +241,7 @@ in memory constrained environments, or increased if performance starts to
|
||||
degrade.
|
||||
|
||||
However, degraded performance due to a low cache factor, common on
|
||||
machines with slow disks, often leads to explosions in memory use due
|
||||
machines with slow disks, often leads to explosions in memory use due to
|
||||
backlogged requests. In this case, reducing the cache factor will make
|
||||
things worse. Instead, try increasing it drastically. 2.0 is a good
|
||||
starting value.
|
||||
|
||||
@@ -205,3 +205,12 @@ SELECT user_id, device_id, user_agent, TO_TIMESTAMP(last_seen / 1000) AS "last_s
|
||||
FROM devices
|
||||
WHERE last_seen < DATE_PART('epoch', NOW() - INTERVAL '3 month') * 1000;
|
||||
```
|
||||
|
||||
## Clear the cache of a remote user's device list
|
||||
|
||||
Forces the resync of a remote user's device list - if you have somehow cached a bad state, and the remote server is
|
||||
will not send out a device list update.
|
||||
```sql
|
||||
INSERT INTO device_lists_remote_resync
|
||||
VALUES ('USER_ID', (EXTRACT(epoch FROM NOW()) * 1000)::BIGINT);
|
||||
```
|
||||
|
||||
@@ -676,8 +676,8 @@ This setting has the following sub-options:
|
||||
trailing 's'.
|
||||
* `app_name`: `app_name` defines the default value for '%(app)s' in `notif_from` and email
|
||||
subjects. It defaults to 'Matrix'.
|
||||
* `enable_notifs`: Set to true to enable sending emails for messages that the user
|
||||
has missed. Disabled by default.
|
||||
* `enable_notifs`: Set to true to allow users to receive e-mail notifications. If this is not set,
|
||||
users can configure e-mail notifications but will not receive them. Disabled by default.
|
||||
* `notif_for_new_users`: Set to false to disable automatic subscription to email
|
||||
notifications for new users. Enabled by default.
|
||||
* `notif_delay_before_mail`: The time to wait before emailing about a notification.
|
||||
@@ -1317,6 +1317,12 @@ Options related to caching.
|
||||
The number of events to cache in memory. Defaults to 10K. Like other caches,
|
||||
this is affected by `caches.global_factor` (see below).
|
||||
|
||||
For example, the default is 10K and the global_factor default is 0.5.
|
||||
|
||||
Since 10K * 0.5 is 5K then the event cache size will be 5K.
|
||||
|
||||
The cache affected by this configuration is named as "*getEvent*".
|
||||
|
||||
Note that this option is not part of the `caches` section.
|
||||
|
||||
Example configuration:
|
||||
@@ -1342,6 +1348,8 @@ number of entries that can be stored.
|
||||
|
||||
Defaults to 0.5, which will halve the size of all caches.
|
||||
|
||||
Note that changing this value also affects the HTTP connection pool.
|
||||
|
||||
* `per_cache_factors`: A dictionary of cache name to cache factor for that individual
|
||||
cache. Overrides the global cache factor for a given cache.
|
||||
|
||||
@@ -3349,6 +3357,9 @@ Options for each entry include:
|
||||
not included in `scopes`. Set to `userinfo_endpoint` to always use the
|
||||
userinfo endpoint.
|
||||
|
||||
* `additional_authorization_parameters`: String to string dictionary that will be passed as
|
||||
additional parameters to the authorization grant URL.
|
||||
|
||||
* `allow_existing_users`: set to true to allow a user logging in via OIDC to
|
||||
match a pre-existing account instead of failing. This could be used if
|
||||
switching from password logins to OIDC. Defaults to false.
|
||||
@@ -3473,6 +3484,8 @@ oidc_providers:
|
||||
token_endpoint: "https://accounts.example.com/oauth2/token"
|
||||
userinfo_endpoint: "https://accounts.example.com/userinfo"
|
||||
jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
|
||||
additional_authorization_parameters:
|
||||
acr_values: 2fa
|
||||
skip_verification: true
|
||||
enable_registration: true
|
||||
user_mapping_provider:
|
||||
|
||||
@@ -86,9 +86,9 @@ The search term is then split into words:
|
||||
* If unavailable, then runs of ASCII characters, numbers, underscores, and hyphens
|
||||
are considered words.
|
||||
|
||||
The queries for PostgreSQL and SQLite are detailed below, by their overall goal
|
||||
The queries for PostgreSQL and SQLite are detailed below, but their overall goal
|
||||
is to find matching users, preferring users who are "real" (e.g. not bots,
|
||||
not deactivated). It is assumed that real users will have an display name and
|
||||
not deactivated). It is assumed that real users will have a display name and
|
||||
avatar set.
|
||||
|
||||
### PostgreSQL
|
||||
|
||||
@@ -54,7 +54,7 @@ function fetchVersions(dropdown, dropdownMenu) {
|
||||
return new Promise((resolve, reject) => {
|
||||
window.addEventListener("load", () => {
|
||||
|
||||
fetch("https://api.github.com/repos/matrix-org/synapse/git/trees/gh-pages", {
|
||||
fetch("https://api.github.com/repos/element-hq/synapse/git/trees/gh-pages", {
|
||||
cache: "force-cache",
|
||||
}).then(res =>
|
||||
res.json()
|
||||
@@ -100,10 +100,30 @@ function sortVersions(a, b) {
|
||||
if (a === 'develop' || a === 'latest') return -1;
|
||||
if (b === 'develop' || b === 'latest') return 1;
|
||||
|
||||
const versionA = (a.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
const versionB = (b.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
// If any of the versions do not confrom to a semantic version string, they
|
||||
// will be sorted behind a valid version.
|
||||
const versionA = (a.match(/v(\d+(\.\d+)+)/) || [])[1]?.split('.') ?? '';
|
||||
const versionB = (b.match(/v(\d+(\.\d+)+)/) || [])[1]?.split('.') ?? '';
|
||||
|
||||
return versionB.localeCompare(versionA);
|
||||
for (let i = 0; i < Math.max(versionA.length, versionB.length); i++) {
|
||||
if (versionB[i] === undefined) {
|
||||
return -1;
|
||||
}
|
||||
if (versionA[i] === undefined) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
const partA = parseInt(versionA[i], 10);
|
||||
const partB = parseInt(versionB[i], 10);
|
||||
|
||||
if (partA > partB) {
|
||||
return -1;
|
||||
} else if (partB > partA) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -124,4 +144,4 @@ function changeVersion(url, newVersion) {
|
||||
parsedURL.pathname = pathSegments.join('/');
|
||||
|
||||
return parsedURL.href;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,7 +232,7 @@ information.
|
||||
^/_matrix/client/v1/rooms/.*/hierarchy$
|
||||
^/_matrix/client/(v1|unstable)/rooms/.*/relations/
|
||||
^/_matrix/client/v1/rooms/.*/threads$
|
||||
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
|
||||
^/_matrix/client/unstable/im.nheko.summary/summary/.*$
|
||||
^/_matrix/client/(r0|v3|unstable)/account/3pid$
|
||||
^/_matrix/client/(r0|v3|unstable)/account/whoami$
|
||||
^/_matrix/client/(r0|v3|unstable)/devices$
|
||||
@@ -532,6 +532,13 @@ the stream writer for the `presence` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
||||
|
||||
##### The `push_rules` stream
|
||||
|
||||
The following endpoints should be routed directly to the worker configured as
|
||||
the stream writer for the `push` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
|
||||
|
||||
#### Restrict outbound federation traffic to a specific set of workers
|
||||
|
||||
The
|
||||
@@ -627,9 +634,9 @@ worker application type.
|
||||
|
||||
#### Push Notifications
|
||||
|
||||
You can designate generic worker to sending push notifications to
|
||||
You can designate generic workers to send push notifications to
|
||||
a [push gateway](https://spec.matrix.org/v1.5/push-gateway-api/) such as
|
||||
[sygnal](https://github.com/vector-im/sygnal) and email.
|
||||
[sygnal](https://github.com/matrix-org/sygnal) and email.
|
||||
|
||||
This will stop the main process sending push notifications.
|
||||
|
||||
|
||||
1628
poetry.lock
generated
1628
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.100.0rc3"
|
||||
version = "1.107.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -321,7 +321,7 @@ all = [
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.7.0"
|
||||
ruff = "0.1.14"
|
||||
ruff = "0.3.7"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
@@ -364,17 +364,6 @@ towncrier = ">=18.6.0rc1"
|
||||
tomli = ">=1.2.3"
|
||||
|
||||
|
||||
# Dependencies for building the development documentation
|
||||
[tool.poetry.group.dev-docs]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.dev-docs.dependencies]
|
||||
sphinx = {version = "^6.1", python = "^3.8"}
|
||||
sphinx-autodoc2 = {version = ">=0.4.2,<0.6.0", python = "^3.8"}
|
||||
myst-parser = {version = "^1.0.0", python = "^3.8"}
|
||||
furo = ">=2022.12.7,<2024.0.0"
|
||||
|
||||
|
||||
[build-system]
|
||||
# The upper bounds here are defensive, intended to prevent situations like
|
||||
# https://github.com/matrix-org/synapse/issues/13849 and
|
||||
@@ -382,7 +371,7 @@ furo = ">=2022.12.7,<2024.0.0"
|
||||
# runtime errors caused by build system changes.
|
||||
# We are happy to raise these upper bounds upon request,
|
||||
# provided we check that it's safe to do so (i.e. that CI passes).
|
||||
requires = ["poetry-core>=1.1.0,<=1.8.1", "setuptools_rust>=1.3,<=1.8.1"]
|
||||
requires = ["poetry-core>=1.1.0,<=1.9.0", "setuptools_rust>=1.3,<=1.8.1"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ name = "synapse"
|
||||
version = "0.1.0"
|
||||
|
||||
edition = "2021"
|
||||
rust-version = "1.65.0"
|
||||
rust-version = "1.66.0"
|
||||
|
||||
[lib]
|
||||
name = "synapse"
|
||||
@@ -23,8 +23,13 @@ name = "synapse.synapse_rust"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.63"
|
||||
base64 = "0.21.7"
|
||||
bytes = "1.6.0"
|
||||
headers = "0.4.0"
|
||||
http = "1.1.0"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.17"
|
||||
mime = "0.3.17"
|
||||
pyo3 = { version = "0.20.0", features = [
|
||||
"macros",
|
||||
"anyhow",
|
||||
@@ -34,8 +39,10 @@ pyo3 = { version = "0.20.0", features = [
|
||||
pyo3-log = "0.9.0"
|
||||
pythonize = "0.20.0"
|
||||
regex = "1.6.0"
|
||||
sha2 = "0.10.8"
|
||||
serde = { version = "1.0.144", features = ["derive"] }
|
||||
serde_json = "1.0.85"
|
||||
ulid = "1.1.2"
|
||||
|
||||
[features]
|
||||
extension-module = ["pyo3/extension-module"]
|
||||
|
||||
60
rust/src/errors.rs
Normal file
60
rust/src/errors.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2024 New Vector, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*/
|
||||
|
||||
#![allow(clippy::new_ret_no_self)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use pyo3::{exceptions::PyValueError, import_exception};
|
||||
|
||||
import_exception!(synapse.api.errors, SynapseError);
|
||||
|
||||
impl SynapseError {
|
||||
pub fn new(
|
||||
code: StatusCode,
|
||||
message: String,
|
||||
errcode: &'static str,
|
||||
additional_fields: Option<HashMap<String, String>>,
|
||||
headers: Option<HeaderMap>,
|
||||
) -> pyo3::PyErr {
|
||||
// Transform the HeaderMap into a HashMap<String, String>
|
||||
let headers = if let Some(headers) = headers {
|
||||
let mut map = HashMap::with_capacity(headers.len());
|
||||
for (key, value) in headers.iter() {
|
||||
let Ok(value) = value.to_str() else {
|
||||
// This should never happen, but we don't want to panic in case it does
|
||||
return PyValueError::new_err(
|
||||
"Could not construct SynapseError: header value is not valid ASCII",
|
||||
);
|
||||
};
|
||||
|
||||
map.insert(key.as_str().to_owned(), value.to_owned());
|
||||
}
|
||||
Some(map)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
SynapseError::new_err((code.as_u16(), message, errcode, additional_fields, headers))
|
||||
}
|
||||
}
|
||||
|
||||
import_exception!(synapse.api.errors, NotFoundError);
|
||||
|
||||
impl NotFoundError {
|
||||
pub fn new() -> pyo3::PyErr {
|
||||
NotFoundError::new_err(())
|
||||
}
|
||||
}
|
||||
@@ -20,8 +20,10 @@
|
||||
|
||||
//! Implements the internal metadata class attached to events.
|
||||
//!
|
||||
//! The internal metadata is a bit like a `TypedDict`, in that it is stored as a
|
||||
//! JSON dict in the DB. Most events have zero, or only a few, of these keys
|
||||
//! The internal metadata is a bit like a `TypedDict`, in that most of
|
||||
//! it is stored as a JSON dict in the DB (the exceptions being `outlier`
|
||||
//! and `stream_ordering` which have their own columns in the database).
|
||||
//! Most events have zero, or only a few, of these keys
|
||||
//! set. Therefore, since we care more about memory size than performance here,
|
||||
//! we store these fields in a mapping.
|
||||
//!
|
||||
@@ -234,6 +236,9 @@ impl EventInternalMetadata {
|
||||
self.clone()
|
||||
}
|
||||
|
||||
/// Get a dict holding the data stored in the `internal_metadata` column in the database.
|
||||
///
|
||||
/// Note that `outlier` and `stream_ordering` are stored in separate columns so are not returned here.
|
||||
fn get_dict(&self, py: Python<'_>) -> PyResult<PyObject> {
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
|
||||
165
rust/src/http.rs
Normal file
165
rust/src/http.rs
Normal file
@@ -0,0 +1,165 @@
|
||||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2024 New Vector, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*/
|
||||
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use headers::{Header, HeaderMapExt};
|
||||
use http::{HeaderName, HeaderValue, Method, Request, Response, StatusCode, Uri};
|
||||
use pyo3::{
|
||||
exceptions::PyValueError,
|
||||
types::{PyBytes, PySequence, PyTuple},
|
||||
PyAny, PyResult,
|
||||
};
|
||||
|
||||
use crate::errors::SynapseError;
|
||||
|
||||
/// Read a file-like Python object by chunks
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if calling the `read` on the Python object failed
|
||||
fn read_io_body(body: &PyAny, chunk_size: usize) -> PyResult<Bytes> {
|
||||
let mut buf = BytesMut::new();
|
||||
loop {
|
||||
let bytes: &PyBytes = body.call_method1("read", (chunk_size,))?.downcast()?;
|
||||
if bytes.as_bytes().is_empty() {
|
||||
return Ok(buf.into());
|
||||
}
|
||||
buf.put(bytes.as_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
/// Transform a Twisted `IRequest` to an [`http::Request`]
|
||||
///
|
||||
/// It uses the following members of `IRequest`:
|
||||
/// - `content`, which is expected to be a file-like object with a `read` method
|
||||
/// - `uri`, which is expected to be a valid URI as `bytes`
|
||||
/// - `method`, which is expected to be a valid HTTP method as `bytes`
|
||||
/// - `requestHeaders`, which is expected to have a `getAllRawHeaders` method
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the Python object doesn't properly implement `IRequest`
|
||||
pub fn http_request_from_twisted(request: &PyAny) -> PyResult<Request<Bytes>> {
|
||||
let content = request.getattr("content")?;
|
||||
let body = read_io_body(content, 4096)?;
|
||||
|
||||
let mut req = Request::new(body);
|
||||
|
||||
let uri: &PyBytes = request.getattr("uri")?.downcast()?;
|
||||
*req.uri_mut() =
|
||||
Uri::try_from(uri.as_bytes()).map_err(|_| PyValueError::new_err("invalid uri"))?;
|
||||
|
||||
let method: &PyBytes = request.getattr("method")?.downcast()?;
|
||||
*req.method_mut() = Method::from_bytes(method.as_bytes())
|
||||
.map_err(|_| PyValueError::new_err("invalid method"))?;
|
||||
|
||||
let headers_iter = request
|
||||
.getattr("requestHeaders")?
|
||||
.call_method0("getAllRawHeaders")?
|
||||
.iter()?;
|
||||
|
||||
for header in headers_iter {
|
||||
let header = header?;
|
||||
let header: &PyTuple = header.downcast()?;
|
||||
let name: &PyBytes = header.get_item(0)?.downcast()?;
|
||||
let name = HeaderName::from_bytes(name.as_bytes())
|
||||
.map_err(|_| PyValueError::new_err("invalid header name"))?;
|
||||
|
||||
let values: &PySequence = header.get_item(1)?.downcast()?;
|
||||
for index in 0..values.len()? {
|
||||
let value: &PyBytes = values.get_item(index)?.downcast()?;
|
||||
let value = HeaderValue::from_bytes(value.as_bytes())
|
||||
.map_err(|_| PyValueError::new_err("invalid header value"))?;
|
||||
req.headers_mut().append(name.clone(), value);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(req)
|
||||
}
|
||||
|
||||
/// Send an [`http::Response`] through a Twisted `IRequest`
|
||||
///
|
||||
/// It uses the following members of `IRequest`:
|
||||
///
|
||||
/// - `responseHeaders`, which is expected to have a `addRawHeader(bytes, bytes)` method
|
||||
/// - `setResponseCode(int)` method
|
||||
/// - `write(bytes)` method
|
||||
/// - `finish()` method
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the Python object doesn't properly implement `IRequest`
|
||||
pub fn http_response_to_twisted<B>(request: &PyAny, response: Response<B>) -> PyResult<()>
|
||||
where
|
||||
B: Buf,
|
||||
{
|
||||
let (parts, mut body) = response.into_parts();
|
||||
|
||||
request.call_method1("setResponseCode", (parts.status.as_u16(),))?;
|
||||
|
||||
let response_headers = request.getattr("responseHeaders")?;
|
||||
for (name, value) in parts.headers.iter() {
|
||||
response_headers.call_method1("addRawHeader", (name.as_str(), value.as_bytes()))?;
|
||||
}
|
||||
|
||||
while body.remaining() != 0 {
|
||||
let chunk = body.chunk();
|
||||
request.call_method1("write", (chunk,))?;
|
||||
body.advance(chunk.len());
|
||||
}
|
||||
|
||||
request.call_method0("finish")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// An extension trait for [`HeaderMap`] that provides typed access to headers, and throws the
|
||||
/// right python exceptions when the header is missing or fails to parse.
|
||||
///
|
||||
/// [`HeaderMap`]: headers::HeaderMap
|
||||
pub trait HeaderMapPyExt: HeaderMapExt {
|
||||
/// Get a header from the map, returning an error if it is missing or invalid.
|
||||
fn typed_get_required<H>(&self) -> PyResult<H>
|
||||
where
|
||||
H: Header,
|
||||
{
|
||||
self.typed_get_optional::<H>()?.ok_or_else(|| {
|
||||
SynapseError::new(
|
||||
StatusCode::BAD_REQUEST,
|
||||
format!("Missing required header: {}", H::name()),
|
||||
"M_MISSING_PARAM",
|
||||
None,
|
||||
None,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Get a header from the map, returning `None` if it is missing and an error if it is invalid.
|
||||
fn typed_get_optional<H>(&self) -> PyResult<Option<H>>
|
||||
where
|
||||
H: Header,
|
||||
{
|
||||
self.typed_try_get::<H>().map_err(|_| {
|
||||
SynapseError::new(
|
||||
StatusCode::BAD_REQUEST,
|
||||
format!("Invalid header: {}", H::name()),
|
||||
"M_INVALID_PARAM",
|
||||
None,
|
||||
None,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: HeaderMapExt> HeaderMapPyExt for T {}
|
||||
@@ -3,8 +3,11 @@ use pyo3::prelude::*;
|
||||
use pyo3_log::ResetHandle;
|
||||
|
||||
pub mod acl;
|
||||
pub mod errors;
|
||||
pub mod events;
|
||||
pub mod http;
|
||||
pub mod push;
|
||||
pub mod rendezvous;
|
||||
|
||||
lazy_static! {
|
||||
static ref LOGGING_HANDLE: ResetHandle = pyo3_log::init();
|
||||
@@ -43,6 +46,7 @@ fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
acl::register_module(py, m)?;
|
||||
push::register_module(py, m)?;
|
||||
events::register_module(py, m)?;
|
||||
rendezvous::register_module(py, m)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -304,12 +304,12 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.room_one_to_one"),
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.encrypted_room_one_to_one"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Cow::Borrowed("m.room.message"),
|
||||
pattern: Cow::Borrowed("m.room.encrypted"),
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
@@ -320,12 +320,12 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.encrypted_room_one_to_one"),
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.room_one_to_one"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Cow::Borrowed("m.room.encrypted"),
|
||||
pattern: Cow::Borrowed("m.room.message"),
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
|
||||
315
rust/src/rendezvous/mod.rs
Normal file
315
rust/src/rendezvous/mod.rs
Normal file
@@ -0,0 +1,315 @@
|
||||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2024 New Vector, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*
|
||||
*/
|
||||
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use headers::{
|
||||
AccessControlAllowOrigin, AccessControlExposeHeaders, CacheControl, ContentLength, ContentType,
|
||||
HeaderMapExt, IfMatch, IfNoneMatch, Pragma,
|
||||
};
|
||||
use http::{header::ETAG, HeaderMap, Response, StatusCode, Uri};
|
||||
use mime::Mime;
|
||||
use pyo3::{
|
||||
exceptions::PyValueError, pyclass, pymethods, types::PyModule, Py, PyAny, PyObject, PyResult,
|
||||
Python, ToPyObject,
|
||||
};
|
||||
use ulid::Ulid;
|
||||
|
||||
use self::session::Session;
|
||||
use crate::{
|
||||
errors::{NotFoundError, SynapseError},
|
||||
http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt},
|
||||
};
|
||||
|
||||
mod session;
|
||||
|
||||
// n.b. Because OPTIONS requests are handled by the Python code, we don't need to set Access-Control-Allow-Headers.
|
||||
fn prepare_headers(headers: &mut HeaderMap, session: &Session) {
|
||||
headers.typed_insert(AccessControlAllowOrigin::ANY);
|
||||
headers.typed_insert(AccessControlExposeHeaders::from_iter([ETAG]));
|
||||
headers.typed_insert(Pragma::no_cache());
|
||||
headers.typed_insert(CacheControl::new().with_no_store());
|
||||
headers.typed_insert(session.etag());
|
||||
headers.typed_insert(session.expires());
|
||||
headers.typed_insert(session.last_modified());
|
||||
}
|
||||
|
||||
#[pyclass]
|
||||
struct RendezvousHandler {
|
||||
base: Uri,
|
||||
clock: PyObject,
|
||||
sessions: BTreeMap<Ulid, Session>,
|
||||
capacity: usize,
|
||||
max_content_length: u64,
|
||||
ttl: Duration,
|
||||
}
|
||||
|
||||
impl RendezvousHandler {
|
||||
/// Check the input headers of a request which sets data for a session, and return the content type.
|
||||
fn check_input_headers(&self, headers: &HeaderMap) -> PyResult<Mime> {
|
||||
let ContentLength(content_length) = headers.typed_get_required()?;
|
||||
|
||||
if content_length > self.max_content_length {
|
||||
return Err(SynapseError::new(
|
||||
StatusCode::PAYLOAD_TOO_LARGE,
|
||||
"Payload too large".to_owned(),
|
||||
"M_TOO_LARGE",
|
||||
None,
|
||||
None,
|
||||
));
|
||||
}
|
||||
|
||||
let content_type: ContentType = headers.typed_get_required()?;
|
||||
|
||||
// Content-Type must be text/plain
|
||||
if content_type != ContentType::text() {
|
||||
return Err(SynapseError::new(
|
||||
StatusCode::BAD_REQUEST,
|
||||
"Content-Type must be text/plain".to_owned(),
|
||||
"M_INVALID_PARAM",
|
||||
None,
|
||||
None,
|
||||
));
|
||||
}
|
||||
|
||||
Ok(content_type.into())
|
||||
}
|
||||
|
||||
/// Evict expired sessions and remove the oldest sessions until we're under the capacity.
|
||||
fn evict(&mut self, now: SystemTime) {
|
||||
// First remove all the entries which expired
|
||||
self.sessions.retain(|_, session| !session.expired(now));
|
||||
|
||||
// Then we remove the oldest entires until we're under the limit
|
||||
while self.sessions.len() > self.capacity {
|
||||
self.sessions.pop_first();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl RendezvousHandler {
|
||||
#[new]
|
||||
#[pyo3(signature = (homeserver, /, capacity=100, max_content_length=4*1024, eviction_interval=60*1000, ttl=60*1000))]
|
||||
fn new(
|
||||
py: Python<'_>,
|
||||
homeserver: &PyAny,
|
||||
capacity: usize,
|
||||
max_content_length: u64,
|
||||
eviction_interval: u64,
|
||||
ttl: u64,
|
||||
) -> PyResult<Py<Self>> {
|
||||
let base: String = homeserver
|
||||
.getattr("config")?
|
||||
.getattr("server")?
|
||||
.getattr("public_baseurl")?
|
||||
.extract()?;
|
||||
let base = Uri::try_from(format!("{base}_synapse/client/rendezvous"))
|
||||
.map_err(|_| PyValueError::new_err("Invalid base URI"))?;
|
||||
|
||||
let clock = homeserver.call_method0("get_clock")?.to_object(py);
|
||||
|
||||
// Construct a Python object so that we can get a reference to the
|
||||
// evict method and schedule it to run.
|
||||
let self_ = Py::new(
|
||||
py,
|
||||
Self {
|
||||
base,
|
||||
clock,
|
||||
sessions: BTreeMap::new(),
|
||||
capacity,
|
||||
max_content_length,
|
||||
ttl: Duration::from_millis(ttl),
|
||||
},
|
||||
)?;
|
||||
|
||||
let evict = self_.getattr(py, "_evict")?;
|
||||
homeserver.call_method0("get_clock")?.call_method(
|
||||
"looping_call",
|
||||
(evict, eviction_interval),
|
||||
None,
|
||||
)?;
|
||||
|
||||
Ok(self_)
|
||||
}
|
||||
|
||||
fn _evict(&mut self, py: Python<'_>) -> PyResult<()> {
|
||||
let clock = self.clock.as_ref(py);
|
||||
let now: u64 = clock.call_method0("time_msec")?.extract()?;
|
||||
let now = SystemTime::UNIX_EPOCH + Duration::from_millis(now);
|
||||
self.evict(now);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_post(&mut self, py: Python<'_>, twisted_request: &PyAny) -> PyResult<()> {
|
||||
let request = http_request_from_twisted(twisted_request)?;
|
||||
|
||||
let content_type = self.check_input_headers(request.headers())?;
|
||||
|
||||
let clock = self.clock.as_ref(py);
|
||||
let now: u64 = clock.call_method0("time_msec")?.extract()?;
|
||||
let now = SystemTime::UNIX_EPOCH + Duration::from_millis(now);
|
||||
|
||||
// We trigger an immediate eviction if we're at 2x the capacity
|
||||
if self.sessions.len() >= self.capacity * 2 {
|
||||
self.evict(now);
|
||||
}
|
||||
|
||||
// Generate a new ULID for the session from the current time.
|
||||
let id = Ulid::from_datetime(now);
|
||||
|
||||
let uri = format!("{base}/{id}", base = self.base);
|
||||
|
||||
let body = request.into_body();
|
||||
|
||||
let session = Session::new(body, content_type, now, self.ttl);
|
||||
|
||||
let response = serde_json::json!({
|
||||
"url": uri,
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let mut response = Response::new(response.as_bytes());
|
||||
*response.status_mut() = StatusCode::CREATED;
|
||||
response.headers_mut().typed_insert(ContentType::json());
|
||||
prepare_headers(response.headers_mut(), &session);
|
||||
http_response_to_twisted(twisted_request, response)?;
|
||||
|
||||
self.sessions.insert(id, session);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_get(&mut self, py: Python<'_>, twisted_request: &PyAny, id: &str) -> PyResult<()> {
|
||||
let request = http_request_from_twisted(twisted_request)?;
|
||||
|
||||
let if_none_match: Option<IfNoneMatch> = request.headers().typed_get_optional()?;
|
||||
|
||||
let now: u64 = self.clock.call_method0(py, "time_msec")?.extract(py)?;
|
||||
let now = SystemTime::UNIX_EPOCH + Duration::from_millis(now);
|
||||
|
||||
let id: Ulid = id.parse().map_err(|_| NotFoundError::new())?;
|
||||
let session = self
|
||||
.sessions
|
||||
.get(&id)
|
||||
.filter(|s| !s.expired(now))
|
||||
.ok_or_else(NotFoundError::new)?;
|
||||
|
||||
if let Some(if_none_match) = if_none_match {
|
||||
if !if_none_match.precondition_passes(&session.etag()) {
|
||||
let mut response = Response::new(Bytes::new());
|
||||
*response.status_mut() = StatusCode::NOT_MODIFIED;
|
||||
prepare_headers(response.headers_mut(), session);
|
||||
http_response_to_twisted(twisted_request, response)?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let mut response = Response::new(session.data());
|
||||
*response.status_mut() = StatusCode::OK;
|
||||
let headers = response.headers_mut();
|
||||
prepare_headers(headers, session);
|
||||
headers.typed_insert(session.content_type());
|
||||
headers.typed_insert(session.content_length());
|
||||
http_response_to_twisted(twisted_request, response)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_put(&mut self, py: Python<'_>, twisted_request: &PyAny, id: &str) -> PyResult<()> {
|
||||
let request = http_request_from_twisted(twisted_request)?;
|
||||
|
||||
let content_type = self.check_input_headers(request.headers())?;
|
||||
|
||||
let if_match: IfMatch = request.headers().typed_get_required()?;
|
||||
|
||||
let data = request.into_body();
|
||||
|
||||
let now: u64 = self.clock.call_method0(py, "time_msec")?.extract(py)?;
|
||||
let now = SystemTime::UNIX_EPOCH + Duration::from_millis(now);
|
||||
|
||||
let id: Ulid = id.parse().map_err(|_| NotFoundError::new())?;
|
||||
let session = self
|
||||
.sessions
|
||||
.get_mut(&id)
|
||||
.filter(|s| !s.expired(now))
|
||||
.ok_or_else(NotFoundError::new)?;
|
||||
|
||||
if !if_match.precondition_passes(&session.etag()) {
|
||||
let mut headers = HeaderMap::new();
|
||||
prepare_headers(&mut headers, session);
|
||||
|
||||
let mut additional_fields = HashMap::with_capacity(1);
|
||||
additional_fields.insert(
|
||||
String::from("org.matrix.msc4108.errcode"),
|
||||
String::from("M_CONCURRENT_WRITE"),
|
||||
);
|
||||
|
||||
return Err(SynapseError::new(
|
||||
StatusCode::PRECONDITION_FAILED,
|
||||
"ETag does not match".to_owned(),
|
||||
"M_UNKNOWN", // Would be M_CONCURRENT_WRITE
|
||||
Some(additional_fields),
|
||||
Some(headers),
|
||||
));
|
||||
}
|
||||
|
||||
session.update(data, content_type, now);
|
||||
|
||||
let mut response = Response::new(Bytes::new());
|
||||
*response.status_mut() = StatusCode::ACCEPTED;
|
||||
prepare_headers(response.headers_mut(), session);
|
||||
http_response_to_twisted(twisted_request, response)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_delete(&mut self, twisted_request: &PyAny, id: &str) -> PyResult<()> {
|
||||
let _request = http_request_from_twisted(twisted_request)?;
|
||||
|
||||
let id: Ulid = id.parse().map_err(|_| NotFoundError::new())?;
|
||||
let _session = self.sessions.remove(&id).ok_or_else(NotFoundError::new)?;
|
||||
|
||||
let mut response = Response::new(Bytes::new());
|
||||
*response.status_mut() = StatusCode::NO_CONTENT;
|
||||
response
|
||||
.headers_mut()
|
||||
.typed_insert(AccessControlAllowOrigin::ANY);
|
||||
http_response_to_twisted(twisted_request, response)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
let child_module = PyModule::new(py, "rendezvous")?;
|
||||
|
||||
child_module.add_class::<RendezvousHandler>()?;
|
||||
|
||||
m.add_submodule(child_module)?;
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import rendezvous` work.
|
||||
py.import("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.rendezvous", child_module)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
91
rust/src/rendezvous/session.rs
Normal file
91
rust/src/rendezvous/session.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
*
|
||||
* Copyright (C) 2024 New Vector, Ltd
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* See the GNU Affero General Public License for more details:
|
||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
*/
|
||||
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _};
|
||||
use bytes::Bytes;
|
||||
use headers::{ContentLength, ContentType, ETag, Expires, LastModified};
|
||||
use mime::Mime;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
/// A single session, containing data, metadata, and expiry information.
|
||||
pub struct Session {
|
||||
hash: [u8; 32],
|
||||
data: Bytes,
|
||||
content_type: Mime,
|
||||
last_modified: SystemTime,
|
||||
expires: SystemTime,
|
||||
}
|
||||
|
||||
impl Session {
|
||||
/// Create a new session with the given data, content type, and time-to-live.
|
||||
pub fn new(data: Bytes, content_type: Mime, now: SystemTime, ttl: Duration) -> Self {
|
||||
let hash = Sha256::digest(&data).into();
|
||||
Self {
|
||||
hash,
|
||||
data,
|
||||
content_type,
|
||||
expires: now + ttl,
|
||||
last_modified: now,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the session has expired at the given time.
|
||||
pub fn expired(&self, now: SystemTime) -> bool {
|
||||
self.expires <= now
|
||||
}
|
||||
|
||||
/// Update the session with new data, content type, and last modified time.
|
||||
pub fn update(&mut self, data: Bytes, content_type: Mime, now: SystemTime) {
|
||||
self.hash = Sha256::digest(&data).into();
|
||||
self.data = data;
|
||||
self.content_type = content_type;
|
||||
self.last_modified = now;
|
||||
}
|
||||
|
||||
/// Returns the Content-Type header of the session.
|
||||
pub fn content_type(&self) -> ContentType {
|
||||
self.content_type.clone().into()
|
||||
}
|
||||
|
||||
/// Returns the Content-Length header of the session.
|
||||
pub fn content_length(&self) -> ContentLength {
|
||||
ContentLength(self.data.len() as _)
|
||||
}
|
||||
|
||||
/// Returns the ETag header of the session.
|
||||
pub fn etag(&self) -> ETag {
|
||||
let encoded = URL_SAFE_NO_PAD.encode(self.hash);
|
||||
// SAFETY: Base64 encoding is URL-safe, so ETag-safe
|
||||
format!("\"{encoded}\"")
|
||||
.parse()
|
||||
.expect("base64-encoded hash should be URL-safe")
|
||||
}
|
||||
|
||||
/// Returns the Last-Modified header of the session.
|
||||
pub fn last_modified(&self) -> LastModified {
|
||||
self.last_modified.into()
|
||||
}
|
||||
|
||||
/// Returns the Expires header of the session.
|
||||
pub fn expires(&self) -> Expires {
|
||||
self.expires.into()
|
||||
}
|
||||
|
||||
/// Returns the current data stored in the session.
|
||||
pub fn data(&self) -> Bytes {
|
||||
self.data.clone()
|
||||
}
|
||||
}
|
||||
@@ -214,7 +214,17 @@ fi
|
||||
|
||||
extra_test_args=()
|
||||
|
||||
test_packages="./tests/csapi ./tests ./tests/msc3874 ./tests/msc3890 ./tests/msc3391 ./tests/msc3930 ./tests/msc3902"
|
||||
test_packages=(
|
||||
./tests/csapi
|
||||
./tests
|
||||
./tests/msc3874
|
||||
./tests/msc3890
|
||||
./tests/msc3391
|
||||
./tests/msc3930
|
||||
./tests/msc3902
|
||||
./tests/msc3967
|
||||
./tests/msc4115
|
||||
)
|
||||
|
||||
# Enable dirty runs, so tests will reuse the same container where possible.
|
||||
# This significantly speeds up tests, but increases the possibility of test pollution.
|
||||
@@ -278,7 +288,7 @@ fi
|
||||
export PASS_SYNAPSE_LOG_TESTING=1
|
||||
|
||||
# Run the tests!
|
||||
echo "Images built; running complement with ${extra_test_args[@]} $@ $test_packages"
|
||||
echo "Images built; running complement with ${extra_test_args[@]} $@ ${test_packages[@]}"
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" $test_packages
|
||||
go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" "${test_packages[@]}"
|
||||
|
||||
@@ -91,7 +91,6 @@ else
|
||||
"synapse" "docker" "tests"
|
||||
"scripts-dev"
|
||||
"contrib" "synmark" "stubs" ".ci"
|
||||
"dev-docs"
|
||||
)
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -660,7 +660,7 @@ def _announce() -> None:
|
||||
Hi everyone. Synapse {current_version} has just been released.
|
||||
|
||||
[notes](https://github.com/element-hq/synapse/releases/tag/{tag_name}) | \
|
||||
[docker](https://hub.docker.com/r/vectorim/synapse/tags?name={tag_name}) | \
|
||||
[docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \
|
||||
[debs](https://packages.matrix.org/debian/) | \
|
||||
[pypi](https://pypi.org/project/matrix-synapse/{current_version}/)"""
|
||||
)
|
||||
|
||||
@@ -60,7 +60,7 @@ from synapse.logging.context import (
|
||||
)
|
||||
from synapse.notifier import ReplicationNotifier
|
||||
from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
|
||||
from synapse.storage.databases.main import FilteringWorkerStore, PushRuleStore
|
||||
from synapse.storage.databases.main import FilteringWorkerStore
|
||||
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
|
||||
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
|
||||
@@ -77,10 +77,8 @@ from synapse.storage.databases.main.media_repository import (
|
||||
)
|
||||
from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.profile import ProfileWorkerStore
|
||||
from synapse.storage.databases.main.pusher import (
|
||||
PusherBackgroundUpdatesStore,
|
||||
PusherWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.push_rule import PusherWorkerStore
|
||||
from synapse.storage.databases.main.pusher import PusherBackgroundUpdatesStore
|
||||
from synapse.storage.databases.main.receipts import ReceiptsBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.registration import (
|
||||
RegistrationBackgroundUpdateStore,
|
||||
@@ -129,7 +127,7 @@ BOOLEAN_COLUMNS = {
|
||||
"redactions": ["have_censored"],
|
||||
"room_stats_state": ["is_federatable"],
|
||||
"rooms": ["is_public", "has_auth_chain_index"],
|
||||
"users": ["shadow_banned", "approved", "locked"],
|
||||
"users": ["shadow_banned", "approved", "locked", "suspended"],
|
||||
"un_partial_stated_event_stream": ["rejection_status_changed"],
|
||||
"users_who_share_rooms": ["share_private"],
|
||||
"per_user_experimental_features": ["enabled"],
|
||||
@@ -245,7 +243,6 @@ class Store(
|
||||
AccountDataWorkerStore,
|
||||
FilteringWorkerStore,
|
||||
ProfileWorkerStore,
|
||||
PushRuleStore,
|
||||
PusherWorkerStore,
|
||||
PusherBackgroundUpdatesStore,
|
||||
PresenceBackgroundUpdateStore,
|
||||
@@ -1040,10 +1037,10 @@ class Porter:
|
||||
return done, remaining + done
|
||||
|
||||
async def _setup_state_group_id_seq(self) -> None:
|
||||
curr_id: Optional[
|
||||
int
|
||||
] = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True
|
||||
curr_id: Optional[int] = (
|
||||
await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True
|
||||
)
|
||||
)
|
||||
|
||||
if not curr_id:
|
||||
@@ -1132,13 +1129,13 @@ class Porter:
|
||||
)
|
||||
|
||||
async def _setup_auth_chain_sequence(self) -> None:
|
||||
curr_chain_id: Optional[
|
||||
int
|
||||
] = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table="event_auth_chains",
|
||||
keyvalues={},
|
||||
retcol="MAX(chain_id)",
|
||||
allow_none=True,
|
||||
curr_chain_id: Optional[int] = (
|
||||
await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table="event_auth_chains",
|
||||
keyvalues={},
|
||||
retcol="MAX(chain_id)",
|
||||
allow_none=True,
|
||||
)
|
||||
)
|
||||
|
||||
def r(txn: LoggingTransaction) -> None:
|
||||
|
||||
@@ -43,7 +43,6 @@ MAIN_TIMELINE: Final = "main"
|
||||
|
||||
|
||||
class Membership:
|
||||
|
||||
"""Represents the membership states of a user in a room."""
|
||||
|
||||
INVITE: Final = "invite"
|
||||
@@ -130,6 +129,8 @@ class EventTypes:
|
||||
|
||||
Reaction: Final = "m.reaction"
|
||||
|
||||
CallInvite: Final = "m.call.invite"
|
||||
|
||||
|
||||
class ToDeviceEventTypes:
|
||||
RoomKeyRequest: Final = "m.room_key_request"
|
||||
@@ -233,6 +234,13 @@ class EventContentFields:
|
||||
TO_DEVICE_MSGID: Final = "org.matrix.msgid"
|
||||
|
||||
|
||||
class EventUnsignedContentFields:
|
||||
"""Fields found inside the 'unsigned' data on events"""
|
||||
|
||||
# Requesting user's membership, per MSC4115
|
||||
MSC4115_MEMBERSHIP: Final = "io.element.msc4115.membership"
|
||||
|
||||
|
||||
class RoomTypes:
|
||||
"""Understood values of the room_type field of m.room.create events."""
|
||||
|
||||
|
||||
@@ -517,8 +517,6 @@ class InvalidCaptchaError(SynapseError):
|
||||
class LimitExceededError(SynapseError):
|
||||
"""A client has sent too many requests and is being throttled."""
|
||||
|
||||
include_retry_after_header = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
limiter_name: str,
|
||||
@@ -526,9 +524,10 @@ class LimitExceededError(SynapseError):
|
||||
retry_after_ms: Optional[int] = None,
|
||||
errcode: str = Codes.LIMIT_EXCEEDED,
|
||||
):
|
||||
# Use HTTP header Retry-After to enable library-assisted retry handling.
|
||||
headers = (
|
||||
{"Retry-After": str(math.ceil(retry_after_ms / 1000))}
|
||||
if self.include_retry_after_header and retry_after_ms is not None
|
||||
if retry_after_ms is not None
|
||||
else None
|
||||
)
|
||||
super().__init__(code, "Too Many Requests", errcode, headers=headers)
|
||||
|
||||
@@ -370,9 +370,11 @@ class RoomVersionCapability:
|
||||
|
||||
MSC3244_CAPABILITIES = {
|
||||
cap.identifier: {
|
||||
"preferred": cap.preferred_version.identifier
|
||||
if cap.preferred_version is not None
|
||||
else None,
|
||||
"preferred": (
|
||||
cap.preferred_version.identifier
|
||||
if cap.preferred_version is not None
|
||||
else None
|
||||
),
|
||||
"support": [
|
||||
v.identifier
|
||||
for v in KNOWN_ROOM_VERSIONS.values()
|
||||
|
||||
@@ -188,9 +188,9 @@ class SynapseHomeServer(HomeServer):
|
||||
PasswordResetSubmitTokenResource,
|
||||
)
|
||||
|
||||
resources[
|
||||
"/_synapse/client/password_reset/email/submit_token"
|
||||
] = PasswordResetSubmitTokenResource(self)
|
||||
resources["/_synapse/client/password_reset/email/submit_token"] = (
|
||||
PasswordResetSubmitTokenResource(self)
|
||||
)
|
||||
|
||||
if name == "consent":
|
||||
from synapse.rest.consent.consent_resource import ConsentResource
|
||||
|
||||
@@ -362,16 +362,16 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
# TODO: Update to stable prefixes once MSC3202 completes FCP merge
|
||||
if service.msc3202_transaction_extensions:
|
||||
if one_time_keys_count:
|
||||
body[
|
||||
"org.matrix.msc3202.device_one_time_key_counts"
|
||||
] = one_time_keys_count
|
||||
body[
|
||||
"org.matrix.msc3202.device_one_time_keys_count"
|
||||
] = one_time_keys_count
|
||||
body["org.matrix.msc3202.device_one_time_key_counts"] = (
|
||||
one_time_keys_count
|
||||
)
|
||||
body["org.matrix.msc3202.device_one_time_keys_count"] = (
|
||||
one_time_keys_count
|
||||
)
|
||||
if unused_fallback_keys:
|
||||
body[
|
||||
"org.matrix.msc3202.device_unused_fallback_key_types"
|
||||
] = unused_fallback_keys
|
||||
body["org.matrix.msc3202.device_unused_fallback_key_types"] = (
|
||||
unused_fallback_keys
|
||||
)
|
||||
if device_list_summary:
|
||||
body["org.matrix.msc3202.device_lists"] = {
|
||||
"changed": list(device_list_summary.changed),
|
||||
|
||||
@@ -52,6 +52,7 @@ DEFAULT_SUBJECTS = {
|
||||
"invite_from_person_to_space": "[%(app)s] %(person)s has invited you to join the %(space)s space on %(app)s...",
|
||||
"password_reset": "[%(server_name)s] Password reset",
|
||||
"email_validation": "[%(server_name)s] Validate your email",
|
||||
"email_already_in_use": "[%(server_name)s] Email already in use",
|
||||
}
|
||||
|
||||
LEGACY_TEMPLATE_DIR_WARNING = """
|
||||
@@ -76,6 +77,7 @@ class EmailSubjectConfig:
|
||||
invite_from_person_to_space: str
|
||||
password_reset: str
|
||||
email_validation: str
|
||||
email_already_in_use: str
|
||||
|
||||
|
||||
class EmailConfig(Config):
|
||||
@@ -180,6 +182,12 @@ class EmailConfig(Config):
|
||||
registration_template_text = email_config.get(
|
||||
"registration_template_text", "registration.txt"
|
||||
)
|
||||
already_in_use_template_html = email_config.get(
|
||||
"already_in_use_template_html", "already_in_use.html"
|
||||
)
|
||||
already_in_use_template_text = email_config.get(
|
||||
"already_in_use_template_html", "already_in_use.txt"
|
||||
)
|
||||
add_threepid_template_html = email_config.get(
|
||||
"add_threepid_template_html", "add_threepid.html"
|
||||
)
|
||||
@@ -215,6 +223,8 @@ class EmailConfig(Config):
|
||||
self.email_password_reset_template_text,
|
||||
self.email_registration_template_html,
|
||||
self.email_registration_template_text,
|
||||
self.email_already_in_use_template_html,
|
||||
self.email_already_in_use_template_text,
|
||||
self.email_add_threepid_template_html,
|
||||
self.email_add_threepid_template_text,
|
||||
self.email_password_reset_template_confirmation_html,
|
||||
@@ -230,6 +240,8 @@ class EmailConfig(Config):
|
||||
password_reset_template_text,
|
||||
registration_template_html,
|
||||
registration_template_text,
|
||||
already_in_use_template_html,
|
||||
already_in_use_template_text,
|
||||
add_threepid_template_html,
|
||||
add_threepid_template_text,
|
||||
"password_reset_confirmation.html",
|
||||
|
||||
@@ -25,7 +25,6 @@ from typing import TYPE_CHECKING, Any, Optional
|
||||
import attr
|
||||
import attr.validators
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config._base import Config, RootConfig
|
||||
@@ -394,11 +393,6 @@ class ExperimentalConfig(Config):
|
||||
# MSC3967: Do not require UIA when first uploading cross signing keys
|
||||
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
|
||||
|
||||
# MSC3981: Recurse relations
|
||||
self.msc3981_recurse_relations = experimental.get(
|
||||
"msc3981_recurse_relations", False
|
||||
)
|
||||
|
||||
# MSC3861: Matrix architecture change to delegate authentication via OIDC
|
||||
try:
|
||||
self.msc3861 = MSC3861(**experimental.get("msc3861", {}))
|
||||
@@ -410,19 +404,6 @@ class ExperimentalConfig(Config):
|
||||
# Check that none of the other config options conflict with MSC3861 when enabled
|
||||
self.msc3861.check_config_conflicts(self.root)
|
||||
|
||||
# MSC4010: Do not allow setting m.push_rules account data.
|
||||
self.msc4010_push_rules_account_data = experimental.get(
|
||||
"msc4010_push_rules_account_data", False
|
||||
)
|
||||
|
||||
# MSC4041: Use HTTP header Retry-After to enable library-assisted retry handling
|
||||
#
|
||||
# This is a bit hacky, but the most reasonable way to *alway* include the
|
||||
# headers.
|
||||
LimitExceededError.include_retry_after_header = experimental.get(
|
||||
"msc4041_enabled", False
|
||||
)
|
||||
|
||||
self.msc4028_push_encrypted_events = experimental.get(
|
||||
"msc4028_push_encrypted_events", False
|
||||
)
|
||||
@@ -430,3 +411,28 @@ class ExperimentalConfig(Config):
|
||||
self.msc4069_profile_inhibit_propagation = experimental.get(
|
||||
"msc4069_profile_inhibit_propagation", False
|
||||
)
|
||||
|
||||
# MSC4108: Mechanism to allow OIDC sign in and E2EE set up via QR code
|
||||
self.msc4108_enabled = experimental.get("msc4108_enabled", False)
|
||||
|
||||
self.msc4108_delegation_endpoint: Optional[str] = experimental.get(
|
||||
"msc4108_delegation_endpoint", None
|
||||
)
|
||||
|
||||
if (
|
||||
self.msc4108_enabled or self.msc4108_delegation_endpoint is not None
|
||||
) and not self.msc3861.enabled:
|
||||
raise ConfigError(
|
||||
"MSC4108 requires MSC3861 to be enabled",
|
||||
("experimental", "msc4108_delegation_endpoint"),
|
||||
)
|
||||
|
||||
if self.msc4108_delegation_endpoint is not None and self.msc4108_enabled:
|
||||
raise ConfigError(
|
||||
"You cannot have MSC4108 both enabled and delegated at the same time",
|
||||
("experimental", "msc4108_delegation_endpoint"),
|
||||
)
|
||||
|
||||
self.msc4115_membership_on_events = experimental.get(
|
||||
"msc4115_membership_on_events", False
|
||||
)
|
||||
|
||||
@@ -342,6 +342,9 @@ def _parse_oidc_config_dict(
|
||||
user_mapping_provider_config=user_mapping_provider_config,
|
||||
attribute_requirements=attribute_requirements,
|
||||
enable_registration=oidc_config.get("enable_registration", True),
|
||||
additional_authorization_parameters=oidc_config.get(
|
||||
"additional_authorization_parameters", {}
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -444,3 +447,6 @@ class OidcProviderConfig:
|
||||
|
||||
# Whether automatic registrations are enabled in the ODIC flow. Defaults to True
|
||||
enable_registration: bool
|
||||
|
||||
# Additional parameters that will be passed to the authorization grant URL
|
||||
additional_authorization_parameters: Mapping[str, str]
|
||||
|
||||
@@ -171,9 +171,9 @@ class RegistrationConfig(Config):
|
||||
refreshable_access_token_lifetime = self.parse_duration(
|
||||
refreshable_access_token_lifetime
|
||||
)
|
||||
self.refreshable_access_token_lifetime: Optional[
|
||||
int
|
||||
] = refreshable_access_token_lifetime
|
||||
self.refreshable_access_token_lifetime: Optional[int] = (
|
||||
refreshable_access_token_lifetime
|
||||
)
|
||||
|
||||
if (
|
||||
self.session_lifetime is not None
|
||||
@@ -237,6 +237,14 @@ class RegistrationConfig(Config):
|
||||
|
||||
self.inhibit_user_in_use_error = config.get("inhibit_user_in_use_error", False)
|
||||
|
||||
# List of user IDs not to send out device list updates for when they
|
||||
# register new devices. This is useful to handle bot accounts.
|
||||
#
|
||||
# Note: This will still send out device list updates if the device is
|
||||
# later updated, e.g. end to end keys are added.
|
||||
dont_notify_new_devices_for = config.get("dont_notify_new_devices_for", [])
|
||||
self.dont_notify_new_devices_for = frozenset(dont_notify_new_devices_for)
|
||||
|
||||
def generate_config_section(
|
||||
self, generate_secrets: bool = False, **kwargs: Any
|
||||
) -> str:
|
||||
|
||||
@@ -199,9 +199,9 @@ class ContentRepositoryConfig(Config):
|
||||
provider_config["module"] == "file_system"
|
||||
or provider_config["module"] == "synapse.rest.media.v1.storage_provider"
|
||||
):
|
||||
provider_config[
|
||||
"module"
|
||||
] = "synapse.media.storage_provider.FileStorageProviderBackend"
|
||||
provider_config["module"] = (
|
||||
"synapse.media.storage_provider.FileStorageProviderBackend"
|
||||
)
|
||||
|
||||
provider_class, parsed_config = load_module(
|
||||
provider_config, ("media_storage_providers", "<item %i>" % i)
|
||||
|
||||
@@ -156,6 +156,8 @@ class WriterLocations:
|
||||
can only be a single instance.
|
||||
presence: The instances that write to the presence stream. Currently
|
||||
can only be a single instance.
|
||||
push_rules: The instances that write to the push stream. Currently
|
||||
can only be a single instance.
|
||||
"""
|
||||
|
||||
events: List[str] = attr.ib(
|
||||
@@ -182,6 +184,10 @@ class WriterLocations:
|
||||
default=["master"],
|
||||
converter=_instance_to_list_converter,
|
||||
)
|
||||
push_rules: List[str] = attr.ib(
|
||||
default=["master"],
|
||||
converter=_instance_to_list_converter,
|
||||
)
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
@@ -341,6 +347,7 @@ class WorkerConfig(Config):
|
||||
"account_data",
|
||||
"receipts",
|
||||
"presence",
|
||||
"push_rules",
|
||||
):
|
||||
instances = _instance_to_list_converter(getattr(self.writers, stream))
|
||||
for instance in instances:
|
||||
@@ -378,6 +385,11 @@ class WorkerConfig(Config):
|
||||
"Must only specify one instance to handle `presence` messages."
|
||||
)
|
||||
|
||||
if len(self.writers.push_rules) != 1:
|
||||
raise ConfigError(
|
||||
"Must only specify one instance to handle `push` messages."
|
||||
)
|
||||
|
||||
self.events_shard_config = RoutableShardedWorkerHandlingConfig(
|
||||
self.writers.events
|
||||
)
|
||||
|
||||
@@ -839,11 +839,12 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
Map from server_name -> key_id -> FetchKeyResult
|
||||
"""
|
||||
|
||||
# We only need to do one request per server.
|
||||
servers_to_fetch = {k.server_name for k in keys_to_fetch}
|
||||
|
||||
results = {}
|
||||
|
||||
async def get_keys(key_to_fetch_item: _FetchKeyRequest) -> None:
|
||||
server_name = key_to_fetch_item.server_name
|
||||
|
||||
async def get_keys(server_name: str) -> None:
|
||||
try:
|
||||
keys = await self.get_server_verify_keys_v2_direct(server_name)
|
||||
results[server_name] = keys
|
||||
@@ -852,7 +853,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
except Exception:
|
||||
logger.exception("Error getting keys from %s", server_name)
|
||||
|
||||
await yieldable_gather_results(get_keys, keys_to_fetch)
|
||||
await yieldable_gather_results(get_keys, servers_to_fetch)
|
||||
return results
|
||||
|
||||
async def get_server_verify_keys_v2_direct(
|
||||
|
||||
@@ -23,7 +23,20 @@
|
||||
import collections.abc
|
||||
import logging
|
||||
import typing
|
||||
from typing import Any, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Union
|
||||
from typing import (
|
||||
Any,
|
||||
ChainMap,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
@@ -75,8 +88,7 @@ class _EventSourceStore(Protocol):
|
||||
redact_behaviour: EventRedactBehaviour,
|
||||
get_prev_content: bool = False,
|
||||
allow_rejected: bool = False,
|
||||
) -> Dict[str, "EventBase"]:
|
||||
...
|
||||
) -> Dict[str, "EventBase"]: ...
|
||||
|
||||
|
||||
def validate_event_for_room_version(event: "EventBase") -> None:
|
||||
@@ -175,12 +187,22 @@ async def check_state_independent_auth_rules(
|
||||
return
|
||||
|
||||
# 2. Reject if event has auth_events that: ...
|
||||
auth_events: ChainMap[str, EventBase] = ChainMap()
|
||||
if batched_auth_events:
|
||||
# Copy the batched auth events to avoid mutating them.
|
||||
auth_events = dict(batched_auth_events)
|
||||
needed_auth_event_ids = set(event.auth_event_ids()) - batched_auth_events.keys()
|
||||
# batched_auth_events can become very large. To avoid repeatedly copying it, which
|
||||
# would significantly impact performance, we use a ChainMap.
|
||||
# batched_auth_events must be cast to MutableMapping because .new_child() requires
|
||||
# this type. This casting is safe as the mapping is never mutated.
|
||||
auth_events = auth_events.new_child(
|
||||
cast(MutableMapping[str, "EventBase"], batched_auth_events)
|
||||
)
|
||||
needed_auth_event_ids = [
|
||||
event_id
|
||||
for event_id in event.auth_event_ids()
|
||||
if event_id not in batched_auth_events
|
||||
]
|
||||
if needed_auth_event_ids:
|
||||
auth_events.update(
|
||||
auth_events = auth_events.new_child(
|
||||
await store.get_events(
|
||||
needed_auth_event_ids,
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
@@ -188,10 +210,12 @@ async def check_state_independent_auth_rules(
|
||||
)
|
||||
)
|
||||
else:
|
||||
auth_events = await store.get_events(
|
||||
event.auth_event_ids(),
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
allow_rejected=True,
|
||||
auth_events = auth_events.new_child(
|
||||
await store.get_events(
|
||||
event.auth_event_ids(),
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
allow_rejected=True,
|
||||
)
|
||||
)
|
||||
|
||||
room_id = event.room_id
|
||||
|
||||
@@ -93,16 +93,14 @@ class DictProperty(Generic[T]):
|
||||
self,
|
||||
instance: Literal[None],
|
||||
owner: Optional[Type[_DictPropertyInstance]] = None,
|
||||
) -> "DictProperty":
|
||||
...
|
||||
) -> "DictProperty": ...
|
||||
|
||||
@overload
|
||||
def __get__(
|
||||
self,
|
||||
instance: _DictPropertyInstance,
|
||||
owner: Optional[Type[_DictPropertyInstance]] = None,
|
||||
) -> T:
|
||||
...
|
||||
) -> T: ...
|
||||
|
||||
def __get__(
|
||||
self,
|
||||
@@ -161,16 +159,14 @@ class DefaultDictProperty(DictProperty, Generic[T]):
|
||||
self,
|
||||
instance: Literal[None],
|
||||
owner: Optional[Type[_DictPropertyInstance]] = None,
|
||||
) -> "DefaultDictProperty":
|
||||
...
|
||||
) -> "DefaultDictProperty": ...
|
||||
|
||||
@overload
|
||||
def __get__(
|
||||
self,
|
||||
instance: _DictPropertyInstance,
|
||||
owner: Optional[Type[_DictPropertyInstance]] = None,
|
||||
) -> T:
|
||||
...
|
||||
) -> T: ...
|
||||
|
||||
def __get__(
|
||||
self,
|
||||
|
||||
@@ -49,7 +49,7 @@ from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.types import JsonDict, Requester
|
||||
|
||||
from . import EventBase
|
||||
from . import EventBase, make_event_from_dict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.handlers.relations import BundledAggregations
|
||||
@@ -82,17 +82,14 @@ def prune_event(event: EventBase) -> EventBase:
|
||||
"""
|
||||
pruned_event_dict = prune_event_dict(event.room_version, event.get_dict())
|
||||
|
||||
from . import make_event_from_dict
|
||||
|
||||
pruned_event = make_event_from_dict(
|
||||
pruned_event_dict, event.room_version, event.internal_metadata.get_dict()
|
||||
)
|
||||
|
||||
# copy the internal fields
|
||||
# Copy the bits of `internal_metadata` that aren't returned by `get_dict`
|
||||
pruned_event.internal_metadata.stream_ordering = (
|
||||
event.internal_metadata.stream_ordering
|
||||
)
|
||||
|
||||
pruned_event.internal_metadata.outlier = event.internal_metadata.outlier
|
||||
|
||||
# Mark the event as redacted
|
||||
@@ -101,6 +98,29 @@ def prune_event(event: EventBase) -> EventBase:
|
||||
return pruned_event
|
||||
|
||||
|
||||
def clone_event(event: EventBase) -> EventBase:
|
||||
"""Take a copy of the event.
|
||||
|
||||
This is mostly useful because it does a *shallow* copy of the `unsigned` data,
|
||||
which means it can then be updated without corrupting the in-memory cache. Note that
|
||||
other properties of the event, such as `content`, are *not* (currently) copied here.
|
||||
"""
|
||||
# XXX: We rely on at least one of `event.get_dict()` and `make_event_from_dict()`
|
||||
# making a copy of `unsigned`. Currently, both do, though I don't really know why.
|
||||
# Still, as long as they do, there's not much point doing yet another copy here.
|
||||
new_event = make_event_from_dict(
|
||||
event.get_dict(), event.room_version, event.internal_metadata.get_dict()
|
||||
)
|
||||
|
||||
# Copy the bits of `internal_metadata` that aren't returned by `get_dict`.
|
||||
new_event.internal_metadata.stream_ordering = (
|
||||
event.internal_metadata.stream_ordering
|
||||
)
|
||||
new_event.internal_metadata.outlier = event.internal_metadata.outlier
|
||||
|
||||
return new_event
|
||||
|
||||
|
||||
def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDict:
|
||||
"""Redacts the event_dict in the same way as `prune_event`, except it
|
||||
operates on dicts rather than event objects
|
||||
@@ -612,9 +632,9 @@ class EventClientSerializer:
|
||||
serialized_aggregations = {}
|
||||
|
||||
if event_aggregations.references:
|
||||
serialized_aggregations[
|
||||
RelationTypes.REFERENCE
|
||||
] = event_aggregations.references
|
||||
serialized_aggregations[RelationTypes.REFERENCE] = (
|
||||
event_aggregations.references
|
||||
)
|
||||
|
||||
if event_aggregations.replace:
|
||||
# Include information about it in the relations dict.
|
||||
|
||||
@@ -169,9 +169,9 @@ class FederationServer(FederationBase):
|
||||
|
||||
# We cache responses to state queries, as they take a while and often
|
||||
# come in waves.
|
||||
self._state_resp_cache: ResponseCache[
|
||||
Tuple[str, Optional[str]]
|
||||
] = ResponseCache(hs.get_clock(), "state_resp", timeout_ms=30000)
|
||||
self._state_resp_cache: ResponseCache[Tuple[str, Optional[str]]] = (
|
||||
ResponseCache(hs.get_clock(), "state_resp", timeout_ms=30000)
|
||||
)
|
||||
self._state_ids_resp_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
|
||||
hs.get_clock(), "state_ids_resp", timeout_ms=30000
|
||||
)
|
||||
@@ -546,7 +546,25 @@ class FederationServer(FederationBase):
|
||||
edu_type=edu_dict["edu_type"],
|
||||
content=edu_dict["content"],
|
||||
)
|
||||
await self.registry.on_edu(edu.edu_type, origin, edu.content)
|
||||
try:
|
||||
await self.registry.on_edu(edu.edu_type, origin, edu.content)
|
||||
except Exception:
|
||||
# If there was an error handling the EDU, we must reject the
|
||||
# transaction.
|
||||
#
|
||||
# Some EDU types (notably, to-device messages) are, despite their name,
|
||||
# expected to be reliable; if we weren't able to do something with it,
|
||||
# we have to tell the sender that, and the only way the protocol gives
|
||||
# us to do so is by sending an HTTP error back on the transaction.
|
||||
#
|
||||
# We log the exception now, and then raise a new SynapseError to cause
|
||||
# the transaction to be failed.
|
||||
logger.exception("Error handling EDU of type %s", edu.edu_type)
|
||||
raise SynapseError(500, f"Error handing EDU of type {edu.edu_type}")
|
||||
|
||||
# TODO: if the first EDU fails, we should probably abort the whole
|
||||
# thing rather than carrying on with the rest of them. That would
|
||||
# probably be best done inside `concurrently_execute`.
|
||||
|
||||
await concurrently_execute(
|
||||
_process_edu,
|
||||
@@ -1414,12 +1432,7 @@ class FederationHandlerRegistry:
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if handler:
|
||||
with start_active_span_from_edu(content, "handle_edu"):
|
||||
try:
|
||||
await handler(origin, content)
|
||||
except SynapseError as e:
|
||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||
except Exception:
|
||||
logger.exception("Failed to handle edu %r", edu_type)
|
||||
await handler(origin, content)
|
||||
return
|
||||
|
||||
# Check if we can route it somewhere else that isn't us
|
||||
@@ -1428,17 +1441,12 @@ class FederationHandlerRegistry:
|
||||
# Pick an instance randomly so that we don't overload one.
|
||||
route_to = random.choice(instances)
|
||||
|
||||
try:
|
||||
await self._send_edu(
|
||||
instance_name=route_to,
|
||||
edu_type=edu_type,
|
||||
origin=origin,
|
||||
content=content,
|
||||
)
|
||||
except SynapseError as e:
|
||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||
except Exception:
|
||||
logger.exception("Failed to handle edu %r", edu_type)
|
||||
await self._send_edu(
|
||||
instance_name=route_to,
|
||||
edu_type=edu_type,
|
||||
origin=origin,
|
||||
content=content,
|
||||
)
|
||||
return
|
||||
|
||||
# Oh well, let's just log and move on.
|
||||
|
||||
@@ -88,9 +88,9 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
# Stores the destinations we need to explicitly send presence to about a
|
||||
# given user.
|
||||
# Stream position -> (user_id, destinations)
|
||||
self.presence_destinations: SortedDict[
|
||||
int, Tuple[str, Iterable[str]]
|
||||
] = SortedDict()
|
||||
self.presence_destinations: SortedDict[int, Tuple[str, Iterable[str]]] = (
|
||||
SortedDict()
|
||||
)
|
||||
|
||||
# (destination, key) -> EDU
|
||||
self.keyed_edu: Dict[Tuple[str, tuple], Edu] = {}
|
||||
|
||||
@@ -192,10 +192,9 @@ sent_pdus_destination_dist_total = Counter(
|
||||
)
|
||||
|
||||
# Time (in s) to wait before trying to wake up destinations that have
|
||||
# catch-up outstanding. This will also be the delay applied at startup
|
||||
# before trying the same.
|
||||
# catch-up outstanding.
|
||||
# Please note that rate limiting still applies, so while the loop is
|
||||
# executed every X seconds the destinations may not be wake up because
|
||||
# executed every X seconds the destinations may not be woken up because
|
||||
# they are being rate limited following previous attempt failures.
|
||||
WAKEUP_RETRY_PERIOD_SEC = 60
|
||||
|
||||
@@ -428,18 +427,17 @@ class FederationSender(AbstractFederationSender):
|
||||
/ hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
|
||||
)
|
||||
|
||||
self._external_cache = hs.get_external_cache()
|
||||
self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock)
|
||||
|
||||
# Regularly wake up destinations that have outstanding PDUs to be caught up
|
||||
self.clock.looping_call(
|
||||
self.clock.looping_call_now(
|
||||
run_as_background_process,
|
||||
WAKEUP_RETRY_PERIOD_SEC * 1000.0,
|
||||
"wake_destinations_needing_catchup",
|
||||
self._wake_destinations_needing_catchup,
|
||||
)
|
||||
|
||||
self._external_cache = hs.get_external_cache()
|
||||
|
||||
self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock)
|
||||
|
||||
def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
|
||||
"""Get or create a PerDestinationQueue for the given destination
|
||||
|
||||
|
||||
@@ -118,10 +118,10 @@ class AccountHandler:
|
||||
}
|
||||
|
||||
if self._use_account_validity_in_account_status:
|
||||
status[
|
||||
"org.matrix.expired"
|
||||
] = await self._account_validity_handler.is_user_expired(
|
||||
user_id.to_string()
|
||||
status["org.matrix.expired"] = (
|
||||
await self._account_validity_handler.is_user_expired(
|
||||
user_id.to_string()
|
||||
)
|
||||
)
|
||||
|
||||
return status
|
||||
|
||||
@@ -42,6 +42,7 @@ class AdminHandler:
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._state_storage_controller = self._storage_controllers.state
|
||||
self._hs_config = hs.config
|
||||
self._msc3866_enabled = hs.config.experimental.msc3866.enabled
|
||||
|
||||
async def get_whois(self, user: UserID) -> JsonMapping:
|
||||
@@ -217,7 +218,10 @@ class AdminHandler:
|
||||
)
|
||||
|
||||
events = await filter_events_for_client(
|
||||
self._storage_controllers, user_id, events
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
events,
|
||||
msc4115_membership_on_events=self._hs_config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
writer.write_events(room_id, events)
|
||||
|
||||
@@ -2185,7 +2185,7 @@ class PasswordAuthProvider:
|
||||
# result is always the right type, but as it is 3rd party code it might not be
|
||||
|
||||
if not isinstance(result, tuple) or len(result) != 2:
|
||||
logger.warning(
|
||||
logger.warning( # type: ignore[unreachable]
|
||||
"Wrong type returned by module API callback %s: %s, expected"
|
||||
" Optional[Tuple[str, Optional[Callable]]]",
|
||||
callback,
|
||||
@@ -2248,7 +2248,7 @@ class PasswordAuthProvider:
|
||||
# result is always the right type, but as it is 3rd party code it might not be
|
||||
|
||||
if not isinstance(result, tuple) or len(result) != 2:
|
||||
logger.warning(
|
||||
logger.warning( # type: ignore[unreachable]
|
||||
"Wrong type returned by module API callback %s: %s, expected"
|
||||
" Optional[Tuple[str, Optional[Callable]]]",
|
||||
callback,
|
||||
|
||||
@@ -18,9 +18,11 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
import itertools
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -168,9 +170,9 @@ class DeactivateAccountHandler:
|
||||
# parts users from rooms (if it isn't already running)
|
||||
self._start_user_parting()
|
||||
|
||||
# Reject all pending invites for the user, so that the user doesn't show up in the
|
||||
# "invited" section of rooms' members list.
|
||||
await self._reject_pending_invites_for_user(user_id)
|
||||
# Reject all pending invites and knocks for the user, so that the
|
||||
# user doesn't show up in the "invited" section of rooms' members list.
|
||||
await self._reject_pending_invites_and_knocks_for_user(user_id)
|
||||
|
||||
# Remove all information on the user from the account_validity table.
|
||||
if self._account_validity_enabled:
|
||||
@@ -194,34 +196,37 @@ class DeactivateAccountHandler:
|
||||
|
||||
return identity_server_supports_unbinding
|
||||
|
||||
async def _reject_pending_invites_for_user(self, user_id: str) -> None:
|
||||
"""Reject pending invites addressed to a given user ID.
|
||||
async def _reject_pending_invites_and_knocks_for_user(self, user_id: str) -> None:
|
||||
"""Reject pending invites and knocks addressed to a given user ID.
|
||||
|
||||
Args:
|
||||
user_id: The user ID to reject pending invites for.
|
||||
user_id: The user ID to reject pending invites and knocks for.
|
||||
"""
|
||||
user = UserID.from_string(user_id)
|
||||
pending_invites = await self.store.get_invited_rooms_for_local_user(user_id)
|
||||
pending_knocks = await self.store.get_knocked_at_rooms_for_local_user(user_id)
|
||||
|
||||
for room in pending_invites:
|
||||
for room in itertools.chain(pending_invites, pending_knocks):
|
||||
try:
|
||||
await self._room_member_handler.update_membership(
|
||||
create_requester(user, authenticated_entity=self._server_name),
|
||||
user,
|
||||
room.room_id,
|
||||
"leave",
|
||||
Membership.LEAVE,
|
||||
ratelimit=False,
|
||||
require_consent=False,
|
||||
)
|
||||
logger.info(
|
||||
"Rejected invite for deactivated user %r in room %r",
|
||||
"Rejected %r for deactivated user %r in room %r",
|
||||
room.membership,
|
||||
user_id,
|
||||
room.room_id,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to reject invite for user %r in room %r:"
|
||||
"Failed to reject %r for user %r in room %r:"
|
||||
" ignoring and continuing",
|
||||
room.membership,
|
||||
user_id,
|
||||
room.room_id,
|
||||
)
|
||||
@@ -256,11 +261,22 @@ class DeactivateAccountHandler:
|
||||
user = UserID.from_string(user_id)
|
||||
|
||||
rooms_for_user = await self.store.get_rooms_for_user(user_id)
|
||||
requester = create_requester(user, authenticated_entity=self._server_name)
|
||||
should_erase = await self.store.is_user_erased(user_id)
|
||||
|
||||
for room_id in rooms_for_user:
|
||||
logger.info("User parter parting %r from %r", user_id, room_id)
|
||||
try:
|
||||
# Before parting the user, redact all membership events if requested
|
||||
if should_erase:
|
||||
event_ids = await self.store.get_membership_event_ids_for_user(
|
||||
user_id, room_id
|
||||
)
|
||||
for event_id in event_ids:
|
||||
await self.store.expire_event(event_id)
|
||||
|
||||
await self._room_member_handler.update_membership(
|
||||
create_requester(user, authenticated_entity=self._server_name),
|
||||
requester,
|
||||
user,
|
||||
room_id,
|
||||
"leave",
|
||||
|
||||
@@ -429,6 +429,10 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self.db_pool = hs.get_datastores().main.db_pool
|
||||
|
||||
self._dont_notify_new_devices_for = (
|
||||
hs.config.registration.dont_notify_new_devices_for
|
||||
)
|
||||
|
||||
self.device_list_updater = DeviceListUpdater(hs, self)
|
||||
|
||||
federation_registry = hs.get_federation_registry()
|
||||
@@ -505,6 +509,9 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
|
||||
self._check_device_name_length(initial_device_display_name)
|
||||
|
||||
# Check if we should send out device lists updates for this new device.
|
||||
notify = user_id not in self._dont_notify_new_devices_for
|
||||
|
||||
if device_id is not None:
|
||||
new_device = await self.store.store_device(
|
||||
user_id=user_id,
|
||||
@@ -514,7 +521,8 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
auth_provider_session_id=auth_provider_session_id,
|
||||
)
|
||||
if new_device:
|
||||
await self.notify_device_update(user_id, [device_id])
|
||||
if notify:
|
||||
await self.notify_device_update(user_id, [device_id])
|
||||
return device_id
|
||||
|
||||
# if the device id is not specified, we'll autogen one, but loop a few
|
||||
@@ -530,7 +538,8 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
auth_provider_session_id=auth_provider_session_id,
|
||||
)
|
||||
if new_device:
|
||||
await self.notify_device_update(user_id, [new_device_id])
|
||||
if notify:
|
||||
await self.notify_device_update(user_id, [new_device_id])
|
||||
return new_device_id
|
||||
attempts += 1
|
||||
|
||||
|
||||
@@ -104,6 +104,9 @@ class DeviceMessageHandler:
|
||||
"""
|
||||
Handle receiving to-device messages from remote homeservers.
|
||||
|
||||
Note that any errors thrown from this method will cause the federation /send
|
||||
request to receive an error response.
|
||||
|
||||
Args:
|
||||
origin: The remote homeserver.
|
||||
content: The JSON dictionary containing the to-device messages.
|
||||
|
||||
@@ -265,9 +265,9 @@ class DirectoryHandler:
|
||||
async def get_association(self, room_alias: RoomAlias) -> JsonDict:
|
||||
room_id = None
|
||||
if self.hs.is_mine(room_alias):
|
||||
result: Optional[
|
||||
RoomAliasMapping
|
||||
] = await self.get_association_from_room_alias(room_alias)
|
||||
result: Optional[RoomAliasMapping] = (
|
||||
await self.get_association_from_room_alias(room_alias)
|
||||
)
|
||||
|
||||
if result:
|
||||
room_id = result.room_id
|
||||
|
||||
@@ -1476,6 +1476,42 @@ class E2eKeysHandler:
|
||||
else:
|
||||
return exists, self.clock.time_msec() < ts_replacable_without_uia_before
|
||||
|
||||
async def has_different_keys(self, user_id: str, body: JsonDict) -> bool:
|
||||
"""
|
||||
Check if a key provided in `body` differs from the same key stored in the DB. Returns
|
||||
true on the first difference. If a key exists in `body` but does not exist in the DB,
|
||||
returns True. If `body` has no keys, this always returns False.
|
||||
Note by 'key' we mean Matrix key rather than JSON key.
|
||||
|
||||
The purpose of this function is to detect whether or not we need to apply UIA checks.
|
||||
We must apply UIA checks if any key in the database is being overwritten. If a key is
|
||||
being inserted for the first time, or if the key exactly matches what is in the database,
|
||||
then no UIA check needs to be performed.
|
||||
|
||||
Args:
|
||||
user_id: The user who sent the `body`.
|
||||
body: The JSON request body from POST /keys/device_signing/upload
|
||||
Returns:
|
||||
True if any key in `body` has a different value in the database.
|
||||
"""
|
||||
# Ensure that each key provided in the request body exactly matches the one we have stored.
|
||||
# The first time we see the DB having a different key to the matching request key, bail.
|
||||
# Note: we do not care if the DB has a key which the request does not specify, as we only
|
||||
# care about *replacements* or *insertions* (i.e UPSERT)
|
||||
req_body_key_to_db_key = {
|
||||
"master_key": "master",
|
||||
"self_signing_key": "self_signing",
|
||||
"user_signing_key": "user_signing",
|
||||
}
|
||||
for req_body_key, db_key in req_body_key_to_db_key.items():
|
||||
if req_body_key in body:
|
||||
existing_key = await self.store.get_e2e_cross_signing_key(
|
||||
user_id, db_key
|
||||
)
|
||||
if existing_key != body[req_body_key]:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _check_cross_signing_key(
|
||||
key: JsonDict, user_id: str, key_type: str, signing_key: Optional[VerifyKey] = None
|
||||
|
||||
@@ -148,6 +148,7 @@ class EventHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastores().main
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._config = hs.config
|
||||
|
||||
async def get_event(
|
||||
self,
|
||||
@@ -189,7 +190,11 @@ class EventHandler:
|
||||
is_peeking = not is_user_in_room
|
||||
|
||||
filtered = await filter_events_for_client(
|
||||
self._storage_controllers, user.to_string(), [event], is_peeking=is_peeking
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
[event],
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
if not filtered:
|
||||
|
||||
@@ -1001,11 +1001,11 @@ class FederationHandler:
|
||||
)
|
||||
|
||||
if include_auth_user_id:
|
||||
event_content[
|
||||
EventContentFields.AUTHORISING_USER
|
||||
] = await self._event_auth_handler.get_user_which_could_invite(
|
||||
room_id,
|
||||
state_ids,
|
||||
event_content[EventContentFields.AUTHORISING_USER] = (
|
||||
await self._event_auth_handler.get_user_which_could_invite(
|
||||
room_id,
|
||||
state_ids,
|
||||
)
|
||||
)
|
||||
|
||||
builder = self.event_builder_factory.for_room_version(
|
||||
|
||||
@@ -1367,9 +1367,9 @@ class FederationEventHandler:
|
||||
)
|
||||
|
||||
if remote_event.is_state() and remote_event.rejected_reason is None:
|
||||
state_map[
|
||||
(remote_event.type, remote_event.state_key)
|
||||
] = remote_event.event_id
|
||||
state_map[(remote_event.type, remote_event.state_key)] = (
|
||||
remote_event.event_id
|
||||
)
|
||||
|
||||
return state_map
|
||||
|
||||
@@ -1757,17 +1757,25 @@ class FederationEventHandler:
|
||||
|
||||
events_and_contexts_to_persist.append((event, context))
|
||||
|
||||
for event in sorted_auth_events:
|
||||
for i, event in enumerate(sorted_auth_events):
|
||||
await prep(event)
|
||||
|
||||
await self.persist_events_and_notify(
|
||||
room_id,
|
||||
events_and_contexts_to_persist,
|
||||
# Mark these events backfilled as they're historic events that will
|
||||
# eventually be backfilled. For example, missing events we fetch
|
||||
# during backfill should be marked as backfilled as well.
|
||||
backfilled=True,
|
||||
)
|
||||
# The above function is typically not async, and so won't yield to
|
||||
# the reactor. For large rooms let's yield to the reactor
|
||||
# occasionally to ensure we don't block other work.
|
||||
if (i + 1) % 1000 == 0:
|
||||
await self._clock.sleep(0)
|
||||
|
||||
# Also persist the new event in batches for similar reasons as above.
|
||||
for batch in batch_iter(events_and_contexts_to_persist, 1000):
|
||||
await self.persist_events_and_notify(
|
||||
room_id,
|
||||
batch,
|
||||
# Mark these events as backfilled as they're historic events that will
|
||||
# eventually be backfilled. For example, missing events we fetch
|
||||
# during backfill should be marked as backfilled as well.
|
||||
backfilled=True,
|
||||
)
|
||||
|
||||
@trace
|
||||
async def _check_event_auth(
|
||||
|
||||
@@ -221,7 +221,10 @@ class InitialSyncHandler:
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
messages = await filter_events_for_client(
|
||||
self._storage_controllers, user_id, messages
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
messages,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
@@ -380,6 +383,7 @@ class InitialSyncHandler:
|
||||
requester.user.to_string(),
|
||||
messages,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
@@ -494,6 +498,7 @@ class InitialSyncHandler:
|
||||
requester.user.to_string(),
|
||||
messages,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
|
||||
@@ -34,6 +34,7 @@ from synapse.api.constants import (
|
||||
EventTypes,
|
||||
GuestAccess,
|
||||
HistoryVisibility,
|
||||
JoinRules,
|
||||
Membership,
|
||||
RelationTypes,
|
||||
UserTypes,
|
||||
@@ -1325,6 +1326,18 @@ class EventCreationHandler:
|
||||
|
||||
self.validator.validate_new(event, self.config)
|
||||
await self._validate_event_relation(event)
|
||||
|
||||
if event.type == EventTypes.CallInvite:
|
||||
room_id = event.room_id
|
||||
room_info = await self.store.get_room_with_stats(room_id)
|
||||
assert room_info is not None
|
||||
|
||||
if room_info.join_rules == JoinRules.PUBLIC:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Call invites are not allowed in public rooms.",
|
||||
Codes.FORBIDDEN,
|
||||
)
|
||||
logger.debug("Created event %s", event.event_id)
|
||||
|
||||
return event, context
|
||||
@@ -1654,9 +1667,9 @@ class EventCreationHandler:
|
||||
expiry_ms=60 * 60 * 1000,
|
||||
)
|
||||
|
||||
self._external_cache_joined_hosts_updates[
|
||||
state_entry.state_group
|
||||
] = None
|
||||
self._external_cache_joined_hosts_updates[state_entry.state_group] = (
|
||||
None
|
||||
)
|
||||
|
||||
async def _validate_canonical_alias(
|
||||
self,
|
||||
|
||||
@@ -65,6 +65,7 @@ from synapse.http.server import finish_request
|
||||
from synapse.http.servlet import parse_string
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart
|
||||
from synapse.util import Clock, json_decoder
|
||||
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
||||
@@ -421,9 +422,19 @@ class OidcProvider:
|
||||
# from the IdP's jwks_uri, if required.
|
||||
self._jwks = RetryOnExceptionCachedCall(self._load_jwks)
|
||||
|
||||
self._user_mapping_provider = provider.user_mapping_provider_class(
|
||||
provider.user_mapping_provider_config
|
||||
user_mapping_provider_init_method = (
|
||||
provider.user_mapping_provider_class.__init__
|
||||
)
|
||||
if len(inspect.signature(user_mapping_provider_init_method).parameters) == 3:
|
||||
self._user_mapping_provider = provider.user_mapping_provider_class(
|
||||
provider.user_mapping_provider_config,
|
||||
ModuleApi(hs, hs.get_auth_handler()),
|
||||
)
|
||||
else:
|
||||
self._user_mapping_provider = provider.user_mapping_provider_class(
|
||||
provider.user_mapping_provider_config,
|
||||
)
|
||||
|
||||
self._skip_verification = provider.skip_verification
|
||||
self._allow_existing_users = provider.allow_existing_users
|
||||
|
||||
@@ -442,6 +453,10 @@ class OidcProvider:
|
||||
# optional brand identifier for this auth provider
|
||||
self.idp_brand = provider.idp_brand
|
||||
|
||||
self.additional_authorization_parameters = (
|
||||
provider.additional_authorization_parameters
|
||||
)
|
||||
|
||||
self._sso_handler = hs.get_sso_handler()
|
||||
self._device_handler = hs.get_device_handler()
|
||||
|
||||
@@ -818,14 +833,38 @@ class OidcProvider:
|
||||
logger.debug("Using the OAuth2 access_token to request userinfo")
|
||||
metadata = await self.load_metadata()
|
||||
|
||||
resp = await self._http_client.get_json(
|
||||
resp = await self._http_client.request(
|
||||
"GET",
|
||||
metadata["userinfo_endpoint"],
|
||||
headers={"Authorization": ["Bearer {}".format(token["access_token"])]},
|
||||
headers=Headers(
|
||||
{"Authorization": ["Bearer {}".format(token["access_token"])]}
|
||||
),
|
||||
)
|
||||
|
||||
logger.debug("Retrieved user info from userinfo endpoint: %r", resp)
|
||||
body = await readBody(resp)
|
||||
|
||||
return UserInfo(resp)
|
||||
content_type_headers = resp.headers.getRawHeaders("Content-Type")
|
||||
assert content_type_headers
|
||||
# We use `startswith` because the header value can contain the `charset` parameter
|
||||
# even if it is useless, and Twisted doesn't take care of that for us.
|
||||
if content_type_headers[0].startswith("application/jwt"):
|
||||
alg_values = metadata.get(
|
||||
"id_token_signing_alg_values_supported", ["RS256"]
|
||||
)
|
||||
jwt = JsonWebToken(alg_values)
|
||||
jwk_set = await self.load_jwks()
|
||||
try:
|
||||
decoded_resp = jwt.decode(body, key=jwk_set)
|
||||
except ValueError:
|
||||
logger.info("Reloading JWKS after decode error")
|
||||
jwk_set = await self.load_jwks(force=True) # try reloading the jwks
|
||||
decoded_resp = jwt.decode(body, key=jwk_set)
|
||||
else:
|
||||
decoded_resp = json_decoder.decode(body.decode("utf-8"))
|
||||
|
||||
logger.debug("Retrieved user info from userinfo endpoint: %r", decoded_resp)
|
||||
|
||||
return UserInfo(decoded_resp)
|
||||
|
||||
async def _verify_jwt(
|
||||
self,
|
||||
@@ -971,17 +1010,21 @@ class OidcProvider:
|
||||
|
||||
metadata = await self.load_metadata()
|
||||
|
||||
additional_authorization_parameters = dict(
|
||||
self.additional_authorization_parameters
|
||||
)
|
||||
# Automatically enable PKCE if it is supported.
|
||||
extra_grant_values = {}
|
||||
if metadata.get("code_challenge_methods_supported"):
|
||||
code_verifier = generate_token(48)
|
||||
|
||||
# Note that we verified the server supports S256 earlier (in
|
||||
# OidcProvider._validate_metadata).
|
||||
extra_grant_values = {
|
||||
"code_challenge_method": "S256",
|
||||
"code_challenge": create_s256_code_challenge(code_verifier),
|
||||
}
|
||||
additional_authorization_parameters.update(
|
||||
{
|
||||
"code_challenge_method": "S256",
|
||||
"code_challenge": create_s256_code_challenge(code_verifier),
|
||||
}
|
||||
)
|
||||
|
||||
cookie = self._macaroon_generaton.generate_oidc_session_token(
|
||||
state=state,
|
||||
@@ -1020,7 +1063,7 @@ class OidcProvider:
|
||||
scope=self._scopes,
|
||||
state=state,
|
||||
nonce=nonce,
|
||||
**extra_grant_values,
|
||||
**additional_authorization_parameters,
|
||||
)
|
||||
|
||||
async def handle_oidc_callback(
|
||||
@@ -1583,7 +1626,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
||||
This is the default mapping provider.
|
||||
"""
|
||||
|
||||
def __init__(self, config: JinjaOidcMappingConfig):
|
||||
def __init__(self, config: JinjaOidcMappingConfig, module_api: ModuleApi):
|
||||
self._config = config
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -623,6 +623,7 @@ class PaginationHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
# if after the filter applied there are no more events
|
||||
|
||||
@@ -493,9 +493,9 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
|
||||
# The number of ongoing syncs on this process, by (user ID, device ID).
|
||||
# Empty if _presence_enabled is false.
|
||||
self._user_device_to_num_current_syncs: Dict[
|
||||
Tuple[str, Optional[str]], int
|
||||
] = {}
|
||||
self._user_device_to_num_current_syncs: Dict[Tuple[str, Optional[str]], int] = (
|
||||
{}
|
||||
)
|
||||
|
||||
self.notifier = hs.get_notifier()
|
||||
self.instance_id = hs.get_instance_id()
|
||||
@@ -818,9 +818,9 @@ class PresenceHandler(BasePresenceHandler):
|
||||
|
||||
# Keeps track of the number of *ongoing* syncs on this process. While
|
||||
# this is non zero a user will never go offline.
|
||||
self._user_device_to_num_current_syncs: Dict[
|
||||
Tuple[str, Optional[str]], int
|
||||
] = {}
|
||||
self._user_device_to_num_current_syncs: Dict[Tuple[str, Optional[str]], int] = (
|
||||
{}
|
||||
)
|
||||
|
||||
# Keeps track of the number of *ongoing* syncs on other processes.
|
||||
#
|
||||
|
||||
@@ -320,9 +320,9 @@ class ProfileHandler:
|
||||
server_name = host
|
||||
|
||||
if self._is_mine_server_name(server_name):
|
||||
media_info: Optional[
|
||||
Union[LocalMedia, RemoteMedia]
|
||||
] = await self.store.get_local_media(media_id)
|
||||
media_info: Optional[Union[LocalMedia, RemoteMedia]] = (
|
||||
await self.store.get_local_media(media_id)
|
||||
)
|
||||
else:
|
||||
media_info = await self.store.get_cached_remote_media(server_name, media_id)
|
||||
|
||||
|
||||
@@ -55,12 +55,12 @@ class ReadMarkerHandler:
|
||||
|
||||
should_update = True
|
||||
# Get event ordering, this also ensures we know about the event
|
||||
event_ordering = await self.store.get_event_ordering(event_id)
|
||||
event_ordering = await self.store.get_event_ordering(event_id, room_id)
|
||||
|
||||
if existing_read_marker:
|
||||
try:
|
||||
old_event_ordering = await self.store.get_event_ordering(
|
||||
existing_read_marker["event_id"]
|
||||
existing_read_marker["event_id"], room_id
|
||||
)
|
||||
except SynapseError:
|
||||
# Old event no longer exists, assume new is ahead. This may
|
||||
|
||||
@@ -95,6 +95,7 @@ class RelationsHandler:
|
||||
self._event_handler = hs.get_event_handler()
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self._event_creation_handler = hs.get_event_creation_handler()
|
||||
self._config = hs.config
|
||||
|
||||
async def get_relations(
|
||||
self,
|
||||
@@ -163,6 +164,7 @@ class RelationsHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
# The relations returned for the requested event do include their
|
||||
@@ -188,13 +190,13 @@ class RelationsHandler:
|
||||
if include_original_event:
|
||||
# Do not bundle aggregations when retrieving the original event because
|
||||
# we want the content before relations are applied to it.
|
||||
return_value[
|
||||
"original_event"
|
||||
] = await self._event_serializer.serialize_event(
|
||||
event,
|
||||
now,
|
||||
bundle_aggregations=None,
|
||||
config=serialize_options,
|
||||
return_value["original_event"] = (
|
||||
await self._event_serializer.serialize_event(
|
||||
event,
|
||||
now,
|
||||
bundle_aggregations=None,
|
||||
config=serialize_options,
|
||||
)
|
||||
)
|
||||
|
||||
if next_token:
|
||||
@@ -608,6 +610,7 @@ class RelationsHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
aggregations = await self.get_bundled_aggregations(
|
||||
|
||||
@@ -151,7 +151,7 @@ class RoomCreationHandler:
|
||||
"history_visibility": HistoryVisibility.SHARED,
|
||||
"original_invitees_have_ops": False,
|
||||
"guest_can_join": False,
|
||||
"power_level_content_override": {},
|
||||
"power_level_content_override": {EventTypes.CallInvite: 50},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -538,10 +538,10 @@ class RoomCreationHandler:
|
||||
# deep-copy the power-levels event before we start modifying it
|
||||
# note that if frozen_dicts are enabled, `power_levels` will be a frozen
|
||||
# dict so we can't just copy.deepcopy it.
|
||||
initial_state[
|
||||
(EventTypes.PowerLevels, "")
|
||||
] = power_levels = copy_and_fixup_power_levels_contents(
|
||||
initial_state[(EventTypes.PowerLevels, "")]
|
||||
initial_state[(EventTypes.PowerLevels, "")] = power_levels = (
|
||||
copy_and_fixup_power_levels_contents(
|
||||
initial_state[(EventTypes.PowerLevels, "")]
|
||||
)
|
||||
)
|
||||
|
||||
# Resolve the minimum power level required to send any state event
|
||||
@@ -956,6 +956,7 @@ class RoomCreationHandler:
|
||||
room_alias=room_alias,
|
||||
power_level_content_override=power_level_content_override,
|
||||
creator_join_profile=creator_join_profile,
|
||||
ignore_forced_encryption=ignore_forced_encryption,
|
||||
)
|
||||
|
||||
# we avoid dropping the lock between invites, as otherwise joins can
|
||||
@@ -1362,9 +1363,11 @@ class RoomCreationHandler:
|
||||
visibility = room_config.get("visibility", "private")
|
||||
preset_name = room_config.get(
|
||||
"preset",
|
||||
RoomCreationPreset.PRIVATE_CHAT
|
||||
if visibility == "private"
|
||||
else RoomCreationPreset.PUBLIC_CHAT,
|
||||
(
|
||||
RoomCreationPreset.PRIVATE_CHAT
|
||||
if visibility == "private"
|
||||
else RoomCreationPreset.PUBLIC_CHAT
|
||||
),
|
||||
)
|
||||
try:
|
||||
preset_config = self._presets_dict[preset_name]
|
||||
@@ -1473,6 +1476,7 @@ class RoomContextHandler:
|
||||
user.to_string(),
|
||||
events,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
event = await self.store.get_event(
|
||||
|
||||
@@ -51,6 +51,7 @@ from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.logging import opentracing
|
||||
from synapse.metrics import event_processing_positions
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.http.push import ReplicationCopyPusherRestServlet
|
||||
from synapse.storage.databases.main.state_deltas import StateDelta
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
@@ -181,6 +182,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
hs.config.server.forgotten_room_retention_period
|
||||
)
|
||||
|
||||
self._is_push_writer = (
|
||||
hs.get_instance_name() in hs.config.worker.writers.push_rules
|
||||
)
|
||||
self._push_writer = hs.config.worker.writers.push_rules[0]
|
||||
self._copy_push_client = ReplicationCopyPusherRestServlet.make_client(hs)
|
||||
|
||||
def _on_user_joined_room(self, event_id: str, room_id: str) -> None:
|
||||
"""Notify the rate limiter that a room join has occurred.
|
||||
|
||||
@@ -745,6 +752,36 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
and requester.user.to_string() == self._server_notices_mxid
|
||||
)
|
||||
|
||||
requester_suspended = await self.store.get_user_suspended_status(
|
||||
requester.user.to_string()
|
||||
)
|
||||
if action == Membership.INVITE and requester_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Sending invites while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
if target.to_string() != requester.user.to_string():
|
||||
target_suspended = await self.store.get_user_suspended_status(
|
||||
target.to_string()
|
||||
)
|
||||
else:
|
||||
target_suspended = requester_suspended
|
||||
|
||||
if action == Membership.JOIN and target_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Joining rooms while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
if action == Membership.KNOCK and target_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Knocking on rooms while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
if (
|
||||
not self.allow_per_room_profiles and not is_requester_server_notices_user
|
||||
) or requester.shadow_banned:
|
||||
@@ -1236,11 +1273,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
# If this is going to be a local join, additional information must
|
||||
# be included in the event content in order to efficiently validate
|
||||
# the event.
|
||||
content[
|
||||
EventContentFields.AUTHORISING_USER
|
||||
] = await self.event_auth_handler.get_user_which_could_invite(
|
||||
room_id,
|
||||
state_before_join,
|
||||
content[EventContentFields.AUTHORISING_USER] = (
|
||||
await self.event_auth_handler.get_user_which_could_invite(
|
||||
room_id,
|
||||
state_before_join,
|
||||
)
|
||||
)
|
||||
|
||||
return False, []
|
||||
@@ -1301,9 +1338,17 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
old_room_id, new_room_id, user_id
|
||||
)
|
||||
# Copy over push rules
|
||||
await self.store.copy_push_rules_from_room_to_room_for_user(
|
||||
old_room_id, new_room_id, user_id
|
||||
)
|
||||
if self._is_push_writer:
|
||||
await self.store.copy_push_rules_from_room_to_room_for_user(
|
||||
old_room_id, new_room_id, user_id
|
||||
)
|
||||
else:
|
||||
await self._copy_push_client(
|
||||
instance_name=self._push_writer,
|
||||
user_id=user_id,
|
||||
old_room_id=old_room_id,
|
||||
new_room_id=new_room_id,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Error copying tags and/or push rules from rooms %s to %s for user %s. "
|
||||
|
||||
@@ -480,7 +480,10 @@ class SearchHandler:
|
||||
filtered_events = await search_filter.filter([r["event"] for r in results])
|
||||
|
||||
events = await filter_events_for_client(
|
||||
self._storage_controllers, user.to_string(), filtered_events
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
filtered_events,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
events.sort(key=lambda e: -rank_map[e.event_id])
|
||||
@@ -579,7 +582,10 @@ class SearchHandler:
|
||||
filtered_events = await search_filter.filter([r["event"] for r in results])
|
||||
|
||||
events = await filter_events_for_client(
|
||||
self._storage_controllers, user.to_string(), filtered_events
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
filtered_events,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
room_events.extend(events)
|
||||
@@ -664,11 +670,17 @@ class SearchHandler:
|
||||
)
|
||||
|
||||
events_before = await filter_events_for_client(
|
||||
self._storage_controllers, user.to_string(), res.events_before
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
res.events_before,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
events_after = await filter_events_for_client(
|
||||
self._storage_controllers, user.to_string(), res.events_after
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
res.events_after,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
context: JsonDict = {
|
||||
|
||||
@@ -150,7 +150,7 @@ class UserAttributes:
|
||||
display_name: Optional[str] = None
|
||||
picture: Optional[str] = None
|
||||
# mypy thinks these are incompatible for some reason.
|
||||
emails: StrCollection = attr.Factory(list) # type: ignore[assignment]
|
||||
emails: StrCollection = attr.Factory(list)
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
@@ -169,6 +169,7 @@ class UsernameMappingSession:
|
||||
# attributes returned by the ID mapper
|
||||
display_name: Optional[str]
|
||||
emails: StrCollection
|
||||
avatar_url: Optional[str]
|
||||
|
||||
# An optional dictionary of extra attributes to be provided to the client in the
|
||||
# login response.
|
||||
@@ -183,6 +184,7 @@ class UsernameMappingSession:
|
||||
# choices made by the user
|
||||
chosen_localpart: Optional[str] = None
|
||||
use_display_name: bool = True
|
||||
use_avatar: bool = True
|
||||
emails_to_use: StrCollection = ()
|
||||
terms_accepted_version: Optional[str] = None
|
||||
|
||||
@@ -660,6 +662,9 @@ class SsoHandler:
|
||||
remote_user_id=remote_user_id,
|
||||
display_name=attributes.display_name,
|
||||
emails=attributes.emails,
|
||||
avatar_url=attributes.picture,
|
||||
# Default to using all mapped emails. Will be overwritten in handle_submit_username_request.
|
||||
emails_to_use=attributes.emails,
|
||||
client_redirect_url=client_redirect_url,
|
||||
expiry_time_ms=now + self._MAPPING_SESSION_VALIDITY_PERIOD_MS,
|
||||
extra_login_attributes=extra_login_attributes,
|
||||
@@ -966,6 +971,7 @@ class SsoHandler:
|
||||
session_id: str,
|
||||
localpart: str,
|
||||
use_display_name: bool,
|
||||
use_avatar: bool,
|
||||
emails_to_use: Iterable[str],
|
||||
) -> None:
|
||||
"""Handle a request to the username-picker 'submit' endpoint
|
||||
@@ -988,6 +994,7 @@ class SsoHandler:
|
||||
# update the session with the user's choices
|
||||
session.chosen_localpart = localpart
|
||||
session.use_display_name = use_display_name
|
||||
session.use_avatar = use_avatar
|
||||
|
||||
emails_from_idp = set(session.emails)
|
||||
filtered_emails: Set[str] = set()
|
||||
@@ -1068,6 +1075,9 @@ class SsoHandler:
|
||||
if session.use_display_name:
|
||||
attributes.display_name = session.display_name
|
||||
|
||||
if session.use_avatar:
|
||||
attributes.picture = session.avatar_url
|
||||
|
||||
# the following will raise a 400 error if the username has been taken in the
|
||||
# meantime.
|
||||
user_id = await self._register_mapped_user(
|
||||
|
||||
@@ -41,6 +41,7 @@ from synapse.api.constants import (
|
||||
AccountDataTypes,
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
JoinRules,
|
||||
Membership,
|
||||
)
|
||||
from synapse.api.filtering import FilterCollection
|
||||
@@ -595,6 +596,7 @@ class SyncHandler:
|
||||
sync_config.user.to_string(),
|
||||
recents,
|
||||
always_include_ids=current_state_ids,
|
||||
msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
log_kv({"recents_after_visibility_filtering": len(recents)})
|
||||
else:
|
||||
@@ -675,13 +677,23 @@ class SyncHandler:
|
||||
)
|
||||
)
|
||||
|
||||
loaded_recents = await filter_events_for_client(
|
||||
filtered_recents = await filter_events_for_client(
|
||||
self._storage_controllers,
|
||||
sync_config.user.to_string(),
|
||||
loaded_recents,
|
||||
always_include_ids=current_state_ids,
|
||||
msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
loaded_recents = []
|
||||
for event in filtered_recents:
|
||||
if event.type == EventTypes.CallInvite:
|
||||
room_info = await self.store.get_room_with_stats(event.room_id)
|
||||
assert room_info is not None
|
||||
if room_info.join_rules == JoinRules.PUBLIC:
|
||||
continue
|
||||
loaded_recents.append(event)
|
||||
|
||||
log_kv({"loaded_recents_after_client_filtering": len(loaded_recents)})
|
||||
|
||||
loaded_recents.extend(recents)
|
||||
@@ -943,7 +955,7 @@ class SyncHandler:
|
||||
batch: TimelineBatch,
|
||||
sync_config: SyncConfig,
|
||||
since_token: Optional[StreamToken],
|
||||
now_token: StreamToken,
|
||||
end_token: StreamToken,
|
||||
full_state: bool,
|
||||
) -> MutableStateMap[EventBase]:
|
||||
"""Works out the difference in state between the end of the previous sync and
|
||||
@@ -954,7 +966,9 @@ class SyncHandler:
|
||||
batch: The timeline batch for the room that will be sent to the user.
|
||||
sync_config:
|
||||
since_token: Token of the end of the previous batch. May be `None`.
|
||||
now_token: Token of the end of the current batch.
|
||||
end_token: Token of the end of the current batch. Normally this will be
|
||||
the same as the global "now_token", but if the user has left the room,
|
||||
the point just after their leave event.
|
||||
full_state: Whether to force returning the full state.
|
||||
`lazy_load_members` still applies when `full_state` is `True`.
|
||||
|
||||
@@ -1014,30 +1028,6 @@ class SyncHandler:
|
||||
if event.is_state():
|
||||
timeline_state[(event.type, event.state_key)] = event.event_id
|
||||
|
||||
if full_state:
|
||||
# always make sure we LL ourselves so we know we're in the room
|
||||
# (if we are) to fix https://github.com/vector-im/riot-web/issues/7209
|
||||
# We only need apply this on full state syncs given we disabled
|
||||
# LL for incr syncs in https://github.com/matrix-org/synapse/pull/3840.
|
||||
# We don't insert ourselves into `members_to_fetch`, because in some
|
||||
# rare cases (an empty event batch with a now_token after the user's
|
||||
# leave in a partial state room which another local user has
|
||||
# joined), the room state will be missing our membership and there
|
||||
# is no guarantee that our membership will be in the auth events of
|
||||
# timeline events when the room is partial stated.
|
||||
state_filter = StateFilter.from_lazy_load_member_list(
|
||||
members_to_fetch.union((sync_config.user.to_string(),))
|
||||
)
|
||||
else:
|
||||
state_filter = StateFilter.from_lazy_load_member_list(
|
||||
members_to_fetch
|
||||
)
|
||||
|
||||
# We are happy to use partial state to compute the `/sync` response.
|
||||
# Since partial state may not include the lazy-loaded memberships we
|
||||
# require, we fix up the state response afterwards with memberships from
|
||||
# auth events.
|
||||
await_full_state = False
|
||||
else:
|
||||
timeline_state = {
|
||||
(event.type, event.state_key): event.event_id
|
||||
@@ -1045,9 +1035,6 @@ class SyncHandler:
|
||||
if event.is_state()
|
||||
}
|
||||
|
||||
state_filter = StateFilter.all()
|
||||
await_full_state = True
|
||||
|
||||
# Now calculate the state to return in the sync response for the room.
|
||||
# This is more or less the change in state between the end of the previous
|
||||
# sync's timeline and the start of the current sync's timeline.
|
||||
@@ -1057,132 +1044,29 @@ class SyncHandler:
|
||||
# whether the room is partial stated *before* fetching it.
|
||||
is_partial_state_room = await self.store.is_partial_state_room(room_id)
|
||||
if full_state:
|
||||
if batch:
|
||||
state_at_timeline_end = (
|
||||
await self._state_storage_controller.get_state_ids_for_event(
|
||||
batch.events[-1].event_id,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
)
|
||||
|
||||
state_at_timeline_start = (
|
||||
await self._state_storage_controller.get_state_ids_for_event(
|
||||
batch.events[0].event_id,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
)
|
||||
|
||||
else:
|
||||
state_at_timeline_end = await self.get_state_at(
|
||||
room_id,
|
||||
stream_position=now_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
state_at_timeline_start = state_at_timeline_end
|
||||
|
||||
state_ids = _calculate_state(
|
||||
timeline_contains=timeline_state,
|
||||
timeline_start=state_at_timeline_start,
|
||||
timeline_end=state_at_timeline_end,
|
||||
previous_timeline_end={},
|
||||
lazy_load_members=lazy_load_members,
|
||||
state_ids = await self._compute_state_delta_for_full_sync(
|
||||
room_id,
|
||||
sync_config.user,
|
||||
batch,
|
||||
end_token,
|
||||
members_to_fetch,
|
||||
timeline_state,
|
||||
)
|
||||
elif batch.limited:
|
||||
if batch:
|
||||
state_at_timeline_start = (
|
||||
await self._state_storage_controller.get_state_ids_for_event(
|
||||
batch.events[0].event_id,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
)
|
||||
else:
|
||||
# We can get here if the user has ignored the senders of all
|
||||
# the recent events.
|
||||
state_at_timeline_start = await self.get_state_at(
|
||||
room_id,
|
||||
stream_position=now_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
# for now, we disable LL for gappy syncs - see
|
||||
# https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
|
||||
# N.B. this slows down incr syncs as we are now processing way
|
||||
# more state in the server than if we were LLing.
|
||||
#
|
||||
# We still have to filter timeline_start to LL entries (above) in order
|
||||
# for _calculate_state's LL logic to work, as we have to include LL
|
||||
# members for timeline senders in case they weren't loaded in the initial
|
||||
# sync. We do this by (counterintuitively) by filtering timeline_start
|
||||
# members to just be ones which were timeline senders, which then ensures
|
||||
# all of the rest get included in the state block (if we need to know
|
||||
# about them).
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
else:
|
||||
# If this is an initial sync then full_state should be set, and
|
||||
# that case is handled above. We assert here to ensure that this
|
||||
# is indeed the case.
|
||||
assert since_token is not None
|
||||
state_at_previous_sync = await self.get_state_at(
|
||||
|
||||
state_ids = await self._compute_state_delta_for_incremental_sync(
|
||||
room_id,
|
||||
stream_position=since_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
batch,
|
||||
since_token,
|
||||
end_token,
|
||||
members_to_fetch,
|
||||
timeline_state,
|
||||
)
|
||||
|
||||
if batch:
|
||||
state_at_timeline_end = (
|
||||
await self._state_storage_controller.get_state_ids_for_event(
|
||||
batch.events[-1].event_id,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
)
|
||||
else:
|
||||
# We can get here if the user has ignored the senders of all
|
||||
# the recent events.
|
||||
state_at_timeline_end = await self.get_state_at(
|
||||
room_id,
|
||||
stream_position=now_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
state_ids = _calculate_state(
|
||||
timeline_contains=timeline_state,
|
||||
timeline_start=state_at_timeline_start,
|
||||
timeline_end=state_at_timeline_end,
|
||||
previous_timeline_end=state_at_previous_sync,
|
||||
# we have to include LL members in case LL initial sync missed them
|
||||
lazy_load_members=lazy_load_members,
|
||||
)
|
||||
else:
|
||||
state_ids = {}
|
||||
if lazy_load_members:
|
||||
if members_to_fetch and batch.events:
|
||||
# We're returning an incremental sync, with no
|
||||
# "gap" since the previous sync, so normally there would be
|
||||
# no state to return.
|
||||
# But we're lazy-loading, so the client might need some more
|
||||
# member events to understand the events in this timeline.
|
||||
# So we fish out all the member events corresponding to the
|
||||
# timeline here, and then dedupe any redundant ones below.
|
||||
|
||||
state_ids = await self._state_storage_controller.get_state_ids_for_event(
|
||||
batch.events[0].event_id,
|
||||
# we only want members!
|
||||
state_filter=StateFilter.from_types(
|
||||
(EventTypes.Member, member)
|
||||
for member in members_to_fetch
|
||||
),
|
||||
await_full_state=False,
|
||||
)
|
||||
|
||||
# If we only have partial state for the room, `state_ids` may be missing the
|
||||
# memberships we wanted. We attempt to find some by digging through the auth
|
||||
# events of timeline events.
|
||||
@@ -1245,6 +1129,240 @@ class SyncHandler:
|
||||
if e.type != EventTypes.Aliases # until MSC2261 or alternative solution
|
||||
}
|
||||
|
||||
async def _compute_state_delta_for_full_sync(
|
||||
self,
|
||||
room_id: str,
|
||||
syncing_user: UserID,
|
||||
batch: TimelineBatch,
|
||||
end_token: StreamToken,
|
||||
members_to_fetch: Optional[Set[str]],
|
||||
timeline_state: StateMap[str],
|
||||
) -> StateMap[str]:
|
||||
"""Calculate the state events to be included in a full sync response.
|
||||
|
||||
As with `_compute_state_delta_for_incremental_sync`, the result will include
|
||||
the membership events for the senders of each event in `members_to_fetch`.
|
||||
|
||||
Args:
|
||||
room_id: The room we are calculating for.
|
||||
syncing_user: The user that is calling `/sync`.
|
||||
batch: The timeline batch for the room that will be sent to the user.
|
||||
end_token: Token of the end of the current batch. Normally this will be
|
||||
the same as the global "now_token", but if the user has left the room,
|
||||
the point just after their leave event.
|
||||
members_to_fetch: If lazy-loading is enabled, the memberships needed for
|
||||
events in the timeline.
|
||||
timeline_state: The contribution to the room state from state events in
|
||||
`batch`. Only contains the last event for any given state key.
|
||||
|
||||
Returns:
|
||||
A map from (type, state_key) to event_id, for each event that we believe
|
||||
should be included in the `state` part of the sync response.
|
||||
"""
|
||||
if members_to_fetch is not None:
|
||||
# Lazy-loading of membership events is enabled.
|
||||
#
|
||||
# Always make sure we load our own membership event so we know if
|
||||
# we're in the room, to fix https://github.com/vector-im/riot-web/issues/7209.
|
||||
#
|
||||
# We only need apply this on full state syncs given we disabled
|
||||
# LL for incr syncs in https://github.com/matrix-org/synapse/pull/3840.
|
||||
#
|
||||
# We don't insert ourselves into `members_to_fetch`, because in some
|
||||
# rare cases (an empty event batch with a now_token after the user's
|
||||
# leave in a partial state room which another local user has
|
||||
# joined), the room state will be missing our membership and there
|
||||
# is no guarantee that our membership will be in the auth events of
|
||||
# timeline events when the room is partial stated.
|
||||
state_filter = StateFilter.from_lazy_load_member_list(
|
||||
members_to_fetch.union((syncing_user.to_string(),))
|
||||
)
|
||||
|
||||
# We are happy to use partial state to compute the `/sync` response.
|
||||
# Since partial state may not include the lazy-loaded memberships we
|
||||
# require, we fix up the state response afterwards with memberships from
|
||||
# auth events.
|
||||
await_full_state = False
|
||||
lazy_load_members = True
|
||||
else:
|
||||
state_filter = StateFilter.all()
|
||||
await_full_state = True
|
||||
lazy_load_members = False
|
||||
|
||||
state_at_timeline_end = await self.get_state_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
if batch:
|
||||
# Strictly speaking, this returns the state *after* the first event in the
|
||||
# timeline, but that is good enough here.
|
||||
state_at_timeline_start = (
|
||||
await self._state_storage_controller.get_state_ids_for_event(
|
||||
batch.events[0].event_id,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
)
|
||||
else:
|
||||
state_at_timeline_start = state_at_timeline_end
|
||||
|
||||
state_ids = _calculate_state(
|
||||
timeline_contains=timeline_state,
|
||||
timeline_start=state_at_timeline_start,
|
||||
timeline_end=state_at_timeline_end,
|
||||
previous_timeline_end={},
|
||||
lazy_load_members=lazy_load_members,
|
||||
)
|
||||
return state_ids
|
||||
|
||||
async def _compute_state_delta_for_incremental_sync(
|
||||
self,
|
||||
room_id: str,
|
||||
batch: TimelineBatch,
|
||||
since_token: StreamToken,
|
||||
end_token: StreamToken,
|
||||
members_to_fetch: Optional[Set[str]],
|
||||
timeline_state: StateMap[str],
|
||||
) -> StateMap[str]:
|
||||
"""Calculate the state events to be included in an incremental sync response.
|
||||
|
||||
If lazy-loading of membership events is enabled (as indicated by
|
||||
`members_to_fetch` being not-`None`), the result will include the membership
|
||||
events for each member in `members_to_fetch`. The caller
|
||||
(`compute_state_delta`) is responsible for keeping track of which membership
|
||||
events we have already sent to the client, and hence ripping them out.
|
||||
|
||||
Args:
|
||||
room_id: The room we are calculating for.
|
||||
batch: The timeline batch for the room that will be sent to the user.
|
||||
since_token: Token of the end of the previous batch.
|
||||
end_token: Token of the end of the current batch. Normally this will be
|
||||
the same as the global "now_token", but if the user has left the room,
|
||||
the point just after their leave event.
|
||||
members_to_fetch: If lazy-loading is enabled, the memberships needed for
|
||||
events in the timeline. Otherwise, `None`.
|
||||
timeline_state: The contribution to the room state from state events in
|
||||
`batch`. Only contains the last event for any given state key.
|
||||
|
||||
Returns:
|
||||
A map from (type, state_key) to event_id, for each event that we believe
|
||||
should be included in the `state` part of the sync response.
|
||||
"""
|
||||
if members_to_fetch is not None:
|
||||
# Lazy-loading is enabled. Only return the state that is needed.
|
||||
state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch)
|
||||
await_full_state = False
|
||||
lazy_load_members = True
|
||||
else:
|
||||
state_filter = StateFilter.all()
|
||||
await_full_state = True
|
||||
lazy_load_members = False
|
||||
|
||||
# For a non-gappy sync if the events in the timeline are simply a linear
|
||||
# chain (i.e. no merging/branching of the graph), then we know the state
|
||||
# delta between the end of the previous sync and start of the new one is
|
||||
# empty.
|
||||
#
|
||||
# c.f. #16941 for an example of why we can't do this for all non-gappy
|
||||
# syncs.
|
||||
is_linear_timeline = True
|
||||
if batch.events:
|
||||
# We need to make sure the first event in our batch points to the
|
||||
# last event in the previous batch.
|
||||
last_event_id_prev_batch = (
|
||||
await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=since_token.room_key,
|
||||
)
|
||||
)
|
||||
|
||||
prev_event_id = last_event_id_prev_batch
|
||||
for e in batch.events:
|
||||
if e.prev_event_ids() != [prev_event_id]:
|
||||
is_linear_timeline = False
|
||||
break
|
||||
prev_event_id = e.event_id
|
||||
|
||||
if is_linear_timeline and not batch.limited:
|
||||
state_ids: StateMap[str] = {}
|
||||
if lazy_load_members:
|
||||
if members_to_fetch and batch.events:
|
||||
# We're lazy-loading, so the client might need some more
|
||||
# member events to understand the events in this timeline.
|
||||
# So we fish out all the member events corresponding to the
|
||||
# timeline here. The caller will then dedupe any redundant
|
||||
# ones.
|
||||
|
||||
state_ids = await self._state_storage_controller.get_state_ids_for_event(
|
||||
batch.events[0].event_id,
|
||||
# we only want members!
|
||||
state_filter=StateFilter.from_types(
|
||||
(EventTypes.Member, member) for member in members_to_fetch
|
||||
),
|
||||
await_full_state=False,
|
||||
)
|
||||
return state_ids
|
||||
|
||||
if batch:
|
||||
state_at_timeline_start = (
|
||||
await self._state_storage_controller.get_state_ids_for_event(
|
||||
batch.events[0].event_id,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
)
|
||||
else:
|
||||
# We can get here if the user has ignored the senders of all
|
||||
# the recent events.
|
||||
state_at_timeline_start = await self.get_state_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
if batch.limited:
|
||||
# for now, we disable LL for gappy syncs - see
|
||||
# https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
|
||||
# N.B. this slows down incr syncs as we are now processing way
|
||||
# more state in the server than if we were LLing.
|
||||
#
|
||||
# We still have to filter timeline_start to LL entries (above) in order
|
||||
# for _calculate_state's LL logic to work, as we have to include LL
|
||||
# members for timeline senders in case they weren't loaded in the initial
|
||||
# sync. We do this by (counterintuitively) by filtering timeline_start
|
||||
# members to just be ones which were timeline senders, which then ensures
|
||||
# all of the rest get included in the state block (if we need to know
|
||||
# about them).
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
state_at_previous_sync = await self.get_state_at(
|
||||
room_id,
|
||||
stream_position=since_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
state_at_timeline_end = await self.get_state_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
state_ids = _calculate_state(
|
||||
timeline_contains=timeline_state,
|
||||
timeline_start=state_at_timeline_start,
|
||||
timeline_end=state_at_timeline_end,
|
||||
previous_timeline_end=state_at_previous_sync,
|
||||
lazy_load_members=lazy_load_members,
|
||||
)
|
||||
|
||||
return state_ids
|
||||
|
||||
async def _find_missing_partial_state_memberships(
|
||||
self,
|
||||
room_id: str,
|
||||
@@ -1333,9 +1451,9 @@ class SyncHandler:
|
||||
and auth_event.state_key == member
|
||||
):
|
||||
missing_members.discard(member)
|
||||
additional_state_ids[
|
||||
(EventTypes.Member, member)
|
||||
] = auth_event.event_id
|
||||
additional_state_ids[(EventTypes.Member, member)] = (
|
||||
auth_event.event_id
|
||||
)
|
||||
break
|
||||
|
||||
if missing_members:
|
||||
@@ -2243,6 +2361,7 @@ class SyncHandler:
|
||||
full_state=False,
|
||||
since_token=since_token,
|
||||
upto_token=leave_token,
|
||||
end_token=leave_token,
|
||||
out_of_band=leave_event.internal_metadata.is_out_of_band_membership(),
|
||||
)
|
||||
)
|
||||
@@ -2280,6 +2399,7 @@ class SyncHandler:
|
||||
full_state=False,
|
||||
since_token=None if newly_joined else since_token,
|
||||
upto_token=prev_batch_token,
|
||||
end_token=now_token,
|
||||
)
|
||||
else:
|
||||
entry = RoomSyncResultBuilder(
|
||||
@@ -2290,6 +2410,7 @@ class SyncHandler:
|
||||
full_state=False,
|
||||
since_token=since_token,
|
||||
upto_token=since_token,
|
||||
end_token=now_token,
|
||||
)
|
||||
|
||||
room_entries.append(entry)
|
||||
@@ -2348,6 +2469,7 @@ class SyncHandler:
|
||||
full_state=True,
|
||||
since_token=since_token,
|
||||
upto_token=now_token,
|
||||
end_token=now_token,
|
||||
)
|
||||
)
|
||||
elif event.membership == Membership.INVITE:
|
||||
@@ -2377,6 +2499,7 @@ class SyncHandler:
|
||||
full_state=True,
|
||||
since_token=since_token,
|
||||
upto_token=leave_token,
|
||||
end_token=leave_token,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -2447,6 +2570,7 @@ class SyncHandler:
|
||||
{
|
||||
"since_token": since_token,
|
||||
"upto_token": upto_token,
|
||||
"end_token": room_builder.end_token,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -2520,7 +2644,7 @@ class SyncHandler:
|
||||
batch,
|
||||
sync_config,
|
||||
since_token,
|
||||
now_token,
|
||||
room_builder.end_token,
|
||||
full_state=full_state,
|
||||
)
|
||||
else:
|
||||
@@ -2680,6 +2804,61 @@ def _calculate_state(
|
||||
e for t, e in timeline_start.items() if t[0] == EventTypes.Member
|
||||
)
|
||||
|
||||
# Naively, we would just return the difference between the state at the start
|
||||
# of the timeline (`timeline_start_ids`) and that at the end of the previous sync
|
||||
# (`previous_timeline_end_ids`). However, that fails in the presence of forks in
|
||||
# the DAG.
|
||||
#
|
||||
# For example, consider a DAG such as the following:
|
||||
#
|
||||
# E1
|
||||
# ↗ ↖
|
||||
# | S2
|
||||
# | ↑
|
||||
# --|------|----
|
||||
# | |
|
||||
# E3 |
|
||||
# ↖ /
|
||||
# E4
|
||||
#
|
||||
# ... and a filter that means we only return 2 events, represented by the dashed
|
||||
# horizontal line. Assuming S2 was *not* included in the previous sync, we need to
|
||||
# include it in the `state` section.
|
||||
#
|
||||
# Note that the state at the start of the timeline (E3) does not include S2. So,
|
||||
# to make sure it gets included in the calculation here, we actually look at
|
||||
# the state at the *end* of the timeline, and subtract any events that are present
|
||||
# in the timeline.
|
||||
#
|
||||
# ----------
|
||||
#
|
||||
# Aside 1: You may then wonder if we need to include `timeline_start` in the
|
||||
# calculation. Consider a linear DAG:
|
||||
#
|
||||
# E1
|
||||
# ↑
|
||||
# S2
|
||||
# ↑
|
||||
# ----|------
|
||||
# |
|
||||
# E3
|
||||
# ↑
|
||||
# S4
|
||||
# ↑
|
||||
# E5
|
||||
#
|
||||
# ... where S2 and S4 change the same piece of state; and where we have a filter
|
||||
# that returns 3 events (E3, S4, E5). We still need to tell the client about S2,
|
||||
# because it might affect the display of E3. However, the state at the end of the
|
||||
# timeline only tells us about S4; if we don't inspect `timeline_start` we won't
|
||||
# find out about S2.
|
||||
#
|
||||
# (There are yet more complicated cases in which a state event is excluded from the
|
||||
# timeline, but whose effect actually lands in the DAG in the *middle* of the
|
||||
# timeline. We have no way to represent that in the /sync response, and we don't
|
||||
# even try; it is ether omitted or plonked into `state` as if it were at the start
|
||||
# of the timeline, depending on what else is in the timeline.)
|
||||
|
||||
state_ids = (
|
||||
(timeline_end_ids | timeline_start_ids)
|
||||
- previous_timeline_end_ids
|
||||
@@ -2746,7 +2925,7 @@ class SyncResultBuilder:
|
||||
if self.since_token:
|
||||
for joined_sync in self.joined:
|
||||
it = itertools.chain(
|
||||
joined_sync.timeline.events, joined_sync.state.values()
|
||||
joined_sync.state.values(), joined_sync.timeline.events
|
||||
)
|
||||
for event in it:
|
||||
if event.type == EventTypes.Member:
|
||||
@@ -2758,13 +2937,20 @@ class SyncResultBuilder:
|
||||
newly_joined_or_invited_or_knocked_users.add(
|
||||
event.state_key
|
||||
)
|
||||
# If the user left and rejoined in the same batch, they
|
||||
# count as a newly-joined user, *not* a newly-left user.
|
||||
newly_left_users.discard(event.state_key)
|
||||
else:
|
||||
prev_content = event.unsigned.get("prev_content", {})
|
||||
prev_membership = prev_content.get("membership", None)
|
||||
if prev_membership == Membership.JOIN:
|
||||
newly_left_users.add(event.state_key)
|
||||
# If the user joined and left in the same batch, they
|
||||
# count as a newly-left user, not a newly-joined user.
|
||||
newly_joined_or_invited_or_knocked_users.discard(
|
||||
event.state_key
|
||||
)
|
||||
|
||||
newly_left_users -= newly_joined_or_invited_or_knocked_users
|
||||
return newly_joined_or_invited_or_knocked_users, newly_left_users
|
||||
|
||||
|
||||
@@ -2775,13 +2961,30 @@ class RoomSyncResultBuilder:
|
||||
|
||||
Attributes:
|
||||
room_id
|
||||
|
||||
rtype: One of `"joined"` or `"archived"`
|
||||
|
||||
events: List of events to include in the room (more events may be added
|
||||
when generating result).
|
||||
|
||||
newly_joined: If the user has newly joined the room
|
||||
|
||||
full_state: Whether the full state should be sent in result
|
||||
|
||||
since_token: Earliest point to return events from, or None
|
||||
upto_token: Latest point to return events from.
|
||||
|
||||
upto_token: Latest point to return events from. If `events` is populated,
|
||||
this is set to the token at the start of `events`
|
||||
|
||||
end_token: The last point in the timeline that the client should see events
|
||||
from. Normally this will be the same as the global `now_token`, but in
|
||||
the case of rooms where the user has left the room, this will be the point
|
||||
just after their leave event.
|
||||
|
||||
This is used in the calculation of the state which is returned in `state`:
|
||||
any state changes *up to* `end_token` (and not beyond!) which are not
|
||||
reflected in the timeline need to be returned in `state`.
|
||||
|
||||
out_of_band: whether the events in the room are "out of band" events
|
||||
and the server isn't in the room.
|
||||
"""
|
||||
@@ -2793,5 +2996,5 @@ class RoomSyncResultBuilder:
|
||||
full_state: bool
|
||||
since_token: Optional[StreamToken]
|
||||
upto_token: StreamToken
|
||||
|
||||
end_token: StreamToken
|
||||
out_of_band: bool = False
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user