mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-05 01:10:13 +00:00
Compare commits
331 Commits
erikj/dock
...
v1.114.0rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1caff75526 | ||
|
|
7b75922020 | ||
|
|
26c1330764 | ||
|
|
48303fcbcc | ||
|
|
53a3783750 | ||
|
|
b913aaa788 | ||
|
|
dab88a7b1f | ||
|
|
8678516e79 | ||
|
|
573c6d7e69 | ||
|
|
689641b903 | ||
|
|
e75a23a63d | ||
|
|
e563e4bdf3 | ||
|
|
f4032d3e71 | ||
|
|
8da16e55fe | ||
|
|
d9cc0faf4b | ||
|
|
cca77af68f | ||
|
|
48742da536 | ||
|
|
940b932405 | ||
|
|
a2b2f6d09b | ||
|
|
defd4aca67 | ||
|
|
b4d95409fb | ||
|
|
f1a1c7fc53 | ||
|
|
cb9fa062b7 | ||
|
|
74b75cfd54 | ||
|
|
87d13fd143 | ||
|
|
ad2cd9aefd | ||
|
|
ad0ee53993 | ||
|
|
92b38c1afd | ||
|
|
a8e313836d | ||
|
|
7c9684b5dc | ||
|
|
f1e8d2d15a | ||
|
|
10428046e4 | ||
|
|
6eb98a4f1c | ||
|
|
950ba844f7 | ||
|
|
8b8d74d12f | ||
|
|
261e746281 | ||
|
|
993644ded0 | ||
|
|
a5d25bb623 | ||
|
|
f162c92f2a | ||
|
|
9ce489be5e | ||
|
|
fae75b0376 | ||
|
|
f77bfbfa30 | ||
|
|
1892ba5f67 | ||
|
|
a51daffba5 | ||
|
|
b05b2e14bb | ||
|
|
a308d99f30 | ||
|
|
a9fc1fd112 | ||
|
|
6a11bdf01d | ||
|
|
8fea190a1f | ||
|
|
81c19c4cd2 | ||
|
|
aaa3c36420 | ||
|
|
3e7eb45eb1 | ||
|
|
bab37dfc6f | ||
|
|
9f9ec92526 | ||
|
|
ff7b27013e | ||
|
|
e1f5f0fbb8 | ||
|
|
8c9f2743bc | ||
|
|
b076941a36 | ||
|
|
8bbe65f3c0 | ||
|
|
b7faf01f26 | ||
|
|
4f7f6ee9a0 | ||
|
|
a640b318df | ||
|
|
34b7586446 | ||
|
|
70b0e38603 | ||
|
|
f31360e34b | ||
|
|
3ad38b644d | ||
|
|
44ac2aa3b6 | ||
|
|
11db575218 | ||
|
|
30e9f6e469 | ||
|
|
eb62d12063 | ||
|
|
ceb3686dcd | ||
|
|
1dfa59b238 | ||
|
|
bef6568537 | ||
|
|
244a255065 | ||
|
|
932cb0a928 | ||
|
|
2dad718265 | ||
|
|
5d8446298c | ||
|
|
d845e939a9 | ||
|
|
23727869c7 | ||
|
|
c270355349 | ||
|
|
e3db7b2d81 | ||
|
|
2b620e0a15 | ||
|
|
39731bb205 | ||
|
|
1d6186265a | ||
|
|
46de0ee16b | ||
|
|
b221f0b84b | ||
|
|
b2c55bd049 | ||
|
|
ed583d9c81 | ||
|
|
f76dc9923c | ||
|
|
7e997fb8b1 | ||
|
|
dbc2290cbe | ||
|
|
2f6b86e79a | ||
|
|
37f9876ccf | ||
|
|
8b449a8ce6 | ||
|
|
53db8a914e | ||
|
|
e4868f8a1e | ||
|
|
dcad81082c | ||
|
|
c56b070e6f | ||
|
|
be726724a8 | ||
|
|
62ae56a4ac | ||
|
|
808dab0699 | ||
|
|
34306be5aa | ||
|
|
be4a16ff44 | ||
|
|
568051c0f0 | ||
|
|
ebbabfe782 | ||
|
|
69ac4b6a6e | ||
|
|
729026e604 | ||
|
|
bdf37ad4c4 | ||
|
|
8bbc98e66d | ||
|
|
4b9f4c2abf | ||
|
|
e8ee784c75 | ||
|
|
48c1307911 | ||
|
|
d225b6b3eb | ||
|
|
1daae43f3a | ||
|
|
a9ee832e48 | ||
|
|
13a99fba1b | ||
|
|
e3a0681ecf | ||
|
|
de05a64246 | ||
|
|
d221512498 | ||
|
|
ed0face8ad | ||
|
|
73529d3732 | ||
|
|
1648337775 | ||
|
|
dc8ddc6472 | ||
|
|
d3f9afd8d9 | ||
|
|
43c865f7c9 | ||
|
|
71d83477cb | ||
|
|
6a01af59e1 | ||
|
|
f583f1dce4 | ||
|
|
a574de0062 | ||
|
|
3fee32ed6b | ||
|
|
5884f0a956 | ||
|
|
79924aebef | ||
|
|
574aa53126 | ||
|
|
83894180b2 | ||
|
|
429ecb7564 | ||
|
|
9e1acea051 | ||
|
|
899d33f2ba | ||
|
|
df11af14db | ||
|
|
d88ba45db9 | ||
|
|
14f2b1eb00 | ||
|
|
2af729a193 | ||
|
|
0de0689ae8 | ||
|
|
4c44020838 | ||
|
|
4f6194492a | ||
|
|
ab62aa09da | ||
|
|
fb66e938b2 | ||
|
|
5a97bbd895 | ||
|
|
606da398fc | ||
|
|
677142b6a9 | ||
|
|
342f0c35b7 | ||
|
|
5871daf877 | ||
|
|
30e14c8510 | ||
|
|
3232bc2982 | ||
|
|
4ca13ce0dd | ||
|
|
8e229535fa | ||
|
|
e0ff850cb7 | ||
|
|
22fbc5be54 | ||
|
|
1cf3ff6b40 | ||
|
|
62d8b0361b | ||
|
|
4d6f7c0fc9 | ||
|
|
d48061b7e6 | ||
|
|
963a60c7e7 | ||
|
|
8e7da35402 | ||
|
|
028b103ae0 | ||
|
|
abb1384502 | ||
|
|
0ed1c64c83 | ||
|
|
1353fb3347 | ||
|
|
b15e17ce6e | ||
|
|
8cdd2d214e | ||
|
|
3fef535ff2 | ||
|
|
62134dcc77 | ||
|
|
23eed4f72a | ||
|
|
4721177241 | ||
|
|
7879f288df | ||
|
|
ccbd619b43 | ||
|
|
c896030f67 | ||
|
|
4d7e53707c | ||
|
|
cf69f8d59b | ||
|
|
20de685a4b | ||
|
|
8e9e6f1a0a | ||
|
|
57538eb4d9 | ||
|
|
45b35f8eae | ||
|
|
2ec257d608 | ||
|
|
daeaeb2a7b | ||
|
|
7786ae7e1c | ||
|
|
22aeb78b77 | ||
|
|
a9d2e40ea4 | ||
|
|
0c4f7a3d16 | ||
|
|
75b788f49f | ||
|
|
7be03d854b | ||
|
|
fa91655805 | ||
|
|
0d2b75cf92 | ||
|
|
ccce858c4a | ||
|
|
99c107920d | ||
|
|
1609855ff8 | ||
|
|
8f890447b0 | ||
|
|
b905ae27ca | ||
|
|
1ce59d7ba0 | ||
|
|
b3b793786c | ||
|
|
9c8f1a6d41 | ||
|
|
5b5280e3e5 | ||
|
|
635e3927d2 | ||
|
|
a1b8897668 | ||
|
|
76b9f14c0a | ||
|
|
1eccbfb82f | ||
|
|
2f5a77efae | ||
|
|
b11f5c984b | ||
|
|
27756c9fdf | ||
|
|
cc5e5893fe | ||
|
|
7c169f4d2c | ||
|
|
f75da9cc53 | ||
|
|
75c19bf57a | ||
|
|
b924a8e1a9 | ||
|
|
a8dcd686fb | ||
|
|
315b8d2032 | ||
|
|
9f47513458 | ||
|
|
ef7fbdfebd | ||
|
|
9cf0ef9c70 | ||
|
|
a023538822 | ||
|
|
f79dbd0f61 | ||
|
|
c89fea3fd1 | ||
|
|
554a92601a | ||
|
|
a98cb87bee | ||
|
|
6e8af83193 | ||
|
|
805e6c9a8f | ||
|
|
3c61ddbbc9 | ||
|
|
ae4c236a6d | ||
|
|
930a64b6c1 | ||
|
|
7a11c0ac4f | ||
|
|
cf711ac03c | ||
|
|
700d2cc4a0 | ||
|
|
1e74b50dc6 | ||
|
|
7c2d8f1f01 | ||
|
|
118b734081 | ||
|
|
7a6186b888 | ||
|
|
452a59f887 | ||
|
|
adeedb7b7c | ||
|
|
7c5fb13f7b | ||
|
|
f8d57ce656 | ||
|
|
13ed84c573 | ||
|
|
4243c1f074 | ||
|
|
3239b7459c | ||
|
|
c99203d98c | ||
|
|
9104a9f0d0 | ||
|
|
a412a5829d | ||
|
|
7ef89b985d | ||
|
|
bdf82efea5 | ||
|
|
afaf2d9388 | ||
|
|
199223062a | ||
|
|
97c3d98816 | ||
|
|
fa3adc896a | ||
|
|
79767a1108 | ||
|
|
4af654f0da | ||
|
|
1c7d85fdfe | ||
|
|
5a65e8a0d1 | ||
|
|
088992a484 | ||
|
|
d17d931a53 | ||
|
|
334123f0cd | ||
|
|
d8e81f67eb | ||
|
|
19a3d5b606 | ||
|
|
52813a8d94 | ||
|
|
a5485437cf | ||
|
|
e5b8a3e37f | ||
|
|
e88332b5f4 | ||
|
|
edfb7aad3a | ||
|
|
f983a77ab0 | ||
|
|
12d7303707 | ||
|
|
a3cb244755 | ||
|
|
3aae60f17b | ||
|
|
2c36a679ae | ||
|
|
c12ee0d5ba | ||
|
|
8aaff851b1 | ||
|
|
8c58eb7f17 | ||
|
|
ebdce69f6a | ||
|
|
c6eb99c878 | ||
|
|
5db3eec5bc | ||
|
|
f1c4dfb08b | ||
|
|
0edf1cacf7 | ||
|
|
d0f90bd04e | ||
|
|
0248ed70a9 | ||
|
|
e6816babf6 | ||
|
|
a8069e9739 | ||
|
|
863578bfcf | ||
|
|
9e59d18022 | ||
|
|
491365f199 | ||
|
|
dad1559721 | ||
|
|
8c4937b216 | ||
|
|
b84e31375b | ||
|
|
06953bc193 | ||
|
|
265ee88f34 | ||
|
|
ab94bce02c | ||
|
|
17d6c28285 | ||
|
|
4a7c58642c | ||
|
|
ce9385819b | ||
|
|
a963f579de | ||
|
|
3f06bbc0ac | ||
|
|
fcbc79bb87 | ||
|
|
aabf577166 | ||
|
|
7d8f0ef351 | ||
|
|
eab0b548e4 | ||
|
|
81cef38d4b | ||
|
|
e2f8476044 | ||
|
|
18c1196893 | ||
|
|
8a3270075b | ||
|
|
f458dff16d | ||
|
|
6b709c512d | ||
|
|
5c2a837e3c | ||
|
|
64f5a4a353 | ||
|
|
7dd14fadb1 | ||
|
|
5624c8b961 | ||
|
|
4e3868dc46 | ||
|
|
d16910ca02 | ||
|
|
225f378ffa | ||
|
|
8bd9ff0783 | ||
|
|
466f344547 | ||
|
|
726006cdf2 | ||
|
|
967b6948b0 | ||
|
|
d7198dfb95 | ||
|
|
94ef2f4f5d | ||
|
|
bb5a692946 | ||
|
|
ad179b0136 | ||
|
|
5147ce294a | ||
|
|
f35bc08d39 | ||
|
|
f2616edb73 | ||
|
|
86a2a0258f | ||
|
|
0893ee9af8 | ||
|
|
887f773472 | ||
|
|
9edb725ebc | ||
|
|
c97251d5ba | ||
|
|
7e2412265d | ||
|
|
7ef00b7628 |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -2,4 +2,4 @@
|
||||
(using a matrix.org account if necessary). We do not use GitHub issues for
|
||||
support.
|
||||
|
||||
**If you want to report a security issue** please see https://matrix.org/security-disclosure-policy/
|
||||
**If you want to report a security issue** please see https://element.io/security/security-disclosure-policy
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -7,7 +7,7 @@ body:
|
||||
**THIS IS NOT A SUPPORT CHANNEL!**
|
||||
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
|
||||
|
||||
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
|
||||
If you want to report a security issue, please see https://element.io/security/security-disclosure-policy
|
||||
|
||||
This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.
|
||||
|
||||
|
||||
4
.github/workflows/docker.yml
vendored
4
.github/workflows/docker.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
run: docker buildx inspect
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.5.0
|
||||
uses: sigstore/cosign-installer@v3.6.0
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
|
||||
- name: Build and push all platforms
|
||||
id: build-and-push
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
push: true
|
||||
labels: |
|
||||
|
||||
2
.github/workflows/docs-pr-netlify.yaml
vendored
2
.github/workflows/docs-pr-netlify.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@09f2f74827fd3a8607589e5ad7f9398816f540fe # v3.1.4
|
||||
uses: dawidd6/action-download-artifact@bf251b5aa9c2f7eeb574a96ee720e24f801b7c11 # v6
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
|
||||
12
.github/workflows/fix_lint.yaml
vendored
12
.github/workflows/fix_lint.yaml
vendored
@@ -29,17 +29,9 @@ jobs:
|
||||
with:
|
||||
install-project: "false"
|
||||
|
||||
- name: Import order (isort)
|
||||
- name: Run ruff
|
||||
continue-on-error: true
|
||||
run: poetry run isort .
|
||||
|
||||
- name: Code style (black)
|
||||
continue-on-error: true
|
||||
run: poetry run black .
|
||||
|
||||
- name: Semantic checks (ruff)
|
||||
continue-on-error: true
|
||||
run: poetry run ruff --fix .
|
||||
run: poetry run ruff check --fix .
|
||||
|
||||
- run: cargo clippy --all-features --fix -- -D warnings
|
||||
continue-on-error: true
|
||||
|
||||
8
.github/workflows/release-artifacts.yml
vendored
8
.github/workflows/release-artifacts.yml
vendored
@@ -102,7 +102,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-11]
|
||||
os: [ubuntu-20.04, macos-12]
|
||||
arch: [x86_64, aarch64]
|
||||
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
||||
# It is not read by the rest of the workflow.
|
||||
@@ -112,9 +112,9 @@ jobs:
|
||||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-11"
|
||||
os: "macos-12"
|
||||
# Don't build aarch64 wheels on mac.
|
||||
- os: "macos-11"
|
||||
- os: "macos-12"
|
||||
arch: aarch64
|
||||
# Don't build aarch64 wheels on PR CI.
|
||||
- is_pr: true
|
||||
@@ -130,7 +130,7 @@ jobs:
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: python -m pip install cibuildwheel==2.16.2
|
||||
run: python -m pip install cibuildwheel==2.19.1
|
||||
|
||||
- name: Set up QEMU to emulate aarch64
|
||||
if: matrix.arch == 'aarch64'
|
||||
|
||||
38
.github/workflows/tests.yml
vendored
38
.github/workflows/tests.yml
vendored
@@ -21,6 +21,7 @@ jobs:
|
||||
trial: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.trial }}
|
||||
integration: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.integration }}
|
||||
linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }}
|
||||
linting_readme: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting_readme }}
|
||||
steps:
|
||||
- uses: dorny/paths-filter@v3
|
||||
id: filter
|
||||
@@ -73,6 +74,9 @@ jobs:
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/tests.yml'
|
||||
|
||||
linting_readme:
|
||||
- 'README.rst'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
@@ -127,15 +131,8 @@ jobs:
|
||||
with:
|
||||
install-project: "false"
|
||||
|
||||
- name: Import order (isort)
|
||||
run: poetry run isort --check --diff .
|
||||
|
||||
- name: Code style (black)
|
||||
run: poetry run black --check --diff .
|
||||
|
||||
- name: Semantic checks (ruff)
|
||||
# --quiet suppresses the update check.
|
||||
run: poetry run ruff --quiet .
|
||||
- name: Check style
|
||||
run: poetry run ruff check --output-format=github .
|
||||
|
||||
lint-mypy:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -269,6 +266,20 @@ jobs:
|
||||
|
||||
- run: cargo fmt --check
|
||||
|
||||
# This is to detect issues with the rst file, which can otherwise cause issues
|
||||
# when uploading packages to PyPi.
|
||||
lint-readme:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install rstcheck"
|
||||
- run: "rstcheck --report-level=WARNING README.rst"
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
@@ -284,9 +295,10 @@ jobs:
|
||||
- lint-clippy
|
||||
- lint-clippy-nightly
|
||||
- lint-rustfmt
|
||||
- lint-readme
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
- uses: matrix-org/done-action@v3
|
||||
with:
|
||||
needs: ${{ toJSON(needs) }}
|
||||
|
||||
@@ -301,6 +313,7 @@ jobs:
|
||||
lint-clippy
|
||||
lint-clippy-nightly
|
||||
lint-rustfmt
|
||||
lint-readme
|
||||
|
||||
|
||||
calculate-test-jobs:
|
||||
@@ -479,6 +492,9 @@ jobs:
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
# If this is a pull request to a release branch, use that branch as default branch for sytest, else use develop
|
||||
# This works because the release script always create a branch on the sytest repo with the same name as the release branch
|
||||
SYTEST_DEFAULT_BRANCH: ${{ startsWith(github.base_ref, 'release-') && github.base_ref || 'develop' }}
|
||||
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.job.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') || '' }}
|
||||
@@ -714,7 +730,7 @@ jobs:
|
||||
- linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
- uses: matrix-org/done-action@v3
|
||||
with:
|
||||
needs: ${{ toJSON(needs) }}
|
||||
|
||||
|
||||
533
CHANGES.md
533
CHANGES.md
@@ -1,3 +1,536 @@
|
||||
# Synapse 1.114.0rc2 (2024-08-30)
|
||||
|
||||
### Features
|
||||
|
||||
- Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. ([\#17509](https://github.com/element-hq/synapse/issues/17509))
|
||||
- Make `hash_password` accept password input from stdin. ([\#17608](https://github.com/element-hq/synapse/issues/17608))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix hierarchy returning 403 when room is accessible through federation. Contributed by Krishan (@kfiven). ([\#17194](https://github.com/element-hq/synapse/issues/17194))
|
||||
- Fix content-length on federation `/thumbnail` responses. ([\#17532](https://github.com/element-hq/synapse/issues/17532))
|
||||
- Fix authenticated media responses using a wrong limit when following redirects over federation. ([\#17543](https://github.com/element-hq/synapse/issues/17543))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- MSC3861: load the issuer and account management URLs from OIDC discovery. ([\#17407](https://github.com/element-hq/synapse/issues/17407))
|
||||
- Refactor sliding sync class into multiple files. ([\#17595](https://github.com/element-hq/synapse/issues/17595))
|
||||
- Store sliding sync per-connection state in the database. ([\#17599](https://github.com/element-hq/synapse/issues/17599))
|
||||
- Make the sliding sync `PerConnectionState` class immutable. ([\#17600](https://github.com/element-hq/synapse/issues/17600))
|
||||
- Add support to `@tag_args` for standalone functions. ([\#17604](https://github.com/element-hq/synapse/issues/17604))
|
||||
- Speed up incremental syncs in sliding sync by adding some more caching. ([\#17606](https://github.com/element-hq/synapse/issues/17606))
|
||||
- Always return the user's own read receipts in sliding sync. ([\#17617](https://github.com/element-hq/synapse/issues/17617))
|
||||
- Replace `isort` and `black with `ruff`. ([\#17620](https://github.com/element-hq/synapse/issues/17620))
|
||||
- Refactor sliding sync code to move room list logic out into a separate class. ([\#17622](https://github.com/element-hq/synapse/issues/17622))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump attrs from 23.2.0 to 24.2.0. ([\#17609](https://github.com/element-hq/synapse/issues/17609))
|
||||
* Bump cryptography from 42.0.8 to 43.0.0. ([\#17584](https://github.com/element-hq/synapse/issues/17584))
|
||||
* Bump phonenumbers from 8.13.43 to 8.13.44. ([\#17610](https://github.com/element-hq/synapse/issues/17610))
|
||||
* Bump pygithub from 2.3.0 to 2.4.0. ([\#17612](https://github.com/element-hq/synapse/issues/17612))
|
||||
* Bump pyyaml from 6.0.1 to 6.0.2. ([\#17611](https://github.com/element-hq/synapse/issues/17611))
|
||||
* Bump sentry-sdk from 2.12.0 to 2.13.0. ([\#17585](https://github.com/element-hq/synapse/issues/17585))
|
||||
* Bump serde from 1.0.206 to 1.0.208. ([\#17581](https://github.com/element-hq/synapse/issues/17581))
|
||||
* Bump serde from 1.0.208 to 1.0.209. ([\#17613](https://github.com/element-hq/synapse/issues/17613))
|
||||
* Bump serde_json from 1.0.124 to 1.0.125. ([\#17582](https://github.com/element-hq/synapse/issues/17582))
|
||||
* Bump serde_json from 1.0.125 to 1.0.127. ([\#17614](https://github.com/element-hq/synapse/issues/17614))
|
||||
* Bump types-jsonschema from 4.23.0.20240712 to 4.23.0.20240813. ([\#17583](https://github.com/element-hq/synapse/issues/17583))
|
||||
* Bump types-setuptools from 71.1.0.20240726 to 71.1.0.20240818. ([\#17586](https://github.com/element-hq/synapse/issues/17586))
|
||||
|
||||
# Synapse 1.114.0rc1 (2024-08-20)
|
||||
|
||||
### Features
|
||||
|
||||
- Add a flag to `/versions`, `org.matrix.simplified_msc3575`, to indicate whether experimental sliding sync support has been enabled. ([\#17571](https://github.com/element-hq/synapse/issues/17571))
|
||||
- Handle changes in `timeline_limit` in experimental sliding sync. ([\#17579](https://github.com/element-hq/synapse/issues/17579))
|
||||
- Correctly track read receipts that should be sent down in experimental sliding sync. ([\#17575](https://github.com/element-hq/synapse/issues/17575), [\#17589](https://github.com/element-hq/synapse/issues/17589), [\#17592](https://github.com/element-hq/synapse/issues/17592))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Start handlers for new media endpoints when media resource configured. ([\#17483](https://github.com/element-hq/synapse/issues/17483))
|
||||
- Fix timeline ordering (using `stream_ordering` instead of topological ordering) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17510](https://github.com/element-hq/synapse/issues/17510))
|
||||
- Fix experimental sliding sync implementation to remember any updates in rooms that were not sent down immediately. ([\#17535](https://github.com/element-hq/synapse/issues/17535))
|
||||
- Better exclude partially stated rooms if we must await full state in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17538](https://github.com/element-hq/synapse/issues/17538))
|
||||
- Handle lower-case http headers in `_Mulitpart_Parser_Protocol`. ([\#17545](https://github.com/element-hq/synapse/issues/17545))
|
||||
- Fix fetching federation signing keys from servers that omit `old_verify_keys`. Contributed by @tulir @ Beeper. ([\#17568](https://github.com/element-hq/synapse/issues/17568))
|
||||
- Fix bug where we would respond with an error when a remote server asked for media that had a length of 0, using the new multipart federation media endpoint. ([\#17570](https://github.com/element-hq/synapse/issues/17570))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Clarify default behaviour of the
|
||||
[`auto_accept_invites.worker_to_run_on`](https://element-hq.github.io/synapse/develop/usage/configuration/config_documentation.html#auto-accept-invites)
|
||||
option. ([\#17515](https://github.com/element-hq/synapse/issues/17515))
|
||||
- Improve docstrings for profile methods. ([\#17559](https://github.com/element-hq/synapse/issues/17559))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Add more tracing to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17514](https://github.com/element-hq/synapse/issues/17514))
|
||||
- Fixup comment in sliding sync implementation. ([\#17531](https://github.com/element-hq/synapse/issues/17531))
|
||||
- Replace override of deprecated method `HTTPAdapter.get_connection` with `get_connection_with_tls_context`. ([\#17536](https://github.com/element-hq/synapse/issues/17536))
|
||||
- Fix performance of device lists in `/key/changes` and sliding sync. ([\#17537](https://github.com/element-hq/synapse/issues/17537), [\#17548](https://github.com/element-hq/synapse/issues/17548))
|
||||
- Bump setuptools from 67.6.0 to 72.1.0. ([\#17542](https://github.com/element-hq/synapse/issues/17542))
|
||||
- Add a utility function for generating random event IDs. ([\#17557](https://github.com/element-hq/synapse/issues/17557))
|
||||
- Speed up responding to media requests. ([\#17558](https://github.com/element-hq/synapse/issues/17558), [\#17561](https://github.com/element-hq/synapse/issues/17561), [\#17564](https://github.com/element-hq/synapse/issues/17564), [\#17566](https://github.com/element-hq/synapse/issues/17566), [\#17567](https://github.com/element-hq/synapse/issues/17567), [\#17569](https://github.com/element-hq/synapse/issues/17569))
|
||||
- Test github token before running release script steps. ([\#17562](https://github.com/element-hq/synapse/issues/17562))
|
||||
- Reduce log spam of multipart files. ([\#17563](https://github.com/element-hq/synapse/issues/17563))
|
||||
- Refactor per-connection state in experimental sliding sync handler. ([\#17574](https://github.com/element-hq/synapse/issues/17574))
|
||||
- Add histogram metrics for sliding sync processing time. ([\#17593](https://github.com/element-hq/synapse/issues/17593))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump bytes from 1.6.1 to 1.7.1. ([\#17526](https://github.com/element-hq/synapse/issues/17526))
|
||||
* Bump lxml from 5.2.2 to 5.3.0. ([\#17550](https://github.com/element-hq/synapse/issues/17550))
|
||||
* Bump phonenumbers from 8.13.42 to 8.13.43. ([\#17551](https://github.com/element-hq/synapse/issues/17551))
|
||||
* Bump regex from 1.10.5 to 1.10.6. ([\#17527](https://github.com/element-hq/synapse/issues/17527))
|
||||
* Bump sentry-sdk from 2.10.0 to 2.12.0. ([\#17553](https://github.com/element-hq/synapse/issues/17553))
|
||||
* Bump serde from 1.0.204 to 1.0.206. ([\#17556](https://github.com/element-hq/synapse/issues/17556))
|
||||
* Bump serde_json from 1.0.122 to 1.0.124. ([\#17555](https://github.com/element-hq/synapse/issues/17555))
|
||||
* Bump sigstore/cosign-installer from 3.5.0 to 3.6.0. ([\#17549](https://github.com/element-hq/synapse/issues/17549))
|
||||
* Bump types-pyyaml from 6.0.12.20240311 to 6.0.12.20240808. ([\#17552](https://github.com/element-hq/synapse/issues/17552))
|
||||
* Bump types-requests from 2.31.0.20240406 to 2.32.0.20240712. ([\#17524](https://github.com/element-hq/synapse/issues/17524))
|
||||
|
||||
# Synapse 1.113.0 (2024-08-13)
|
||||
|
||||
No significant changes since 1.113.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.113.0rc1 (2024-08-06)
|
||||
|
||||
### Features
|
||||
|
||||
- Track which rooms have been sent to clients in the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17447](https://github.com/element-hq/synapse/issues/17447))
|
||||
- Add Account Data extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17477](https://github.com/element-hq/synapse/issues/17477))
|
||||
- Add receipts extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17489](https://github.com/element-hq/synapse/issues/17489))
|
||||
- Add typing notification extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17505](https://github.com/element-hq/synapse/issues/17505))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to handle invite/knock rooms when filtering. ([\#17450](https://github.com/element-hq/synapse/issues/17450))
|
||||
- Fix a bug introduced in v1.110.0 which caused `/keys/query` to return incomplete results, leading to high network activity and CPU usage on Matrix clients. ([\#17499](https://github.com/element-hq/synapse/issues/17499))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Update the [`allowed_local_3pids`](https://element-hq.github.io/synapse/v1.112/usage/configuration/config_documentation.html#allowed_local_3pids) config option's msisdn address to a working example. ([\#17476](https://github.com/element-hq/synapse/issues/17476))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Change sliding sync to use their own token format in preparation for storing per-connection state. ([\#17452](https://github.com/element-hq/synapse/issues/17452))
|
||||
- Ensure we don't send down negative `bump_stamp` in experimental sliding sync endpoint. ([\#17478](https://github.com/element-hq/synapse/issues/17478))
|
||||
- Do not send down empty room entries down experimental sliding sync endpoint. ([\#17479](https://github.com/element-hq/synapse/issues/17479))
|
||||
- Refactor Sliding Sync tests to better utilize the `SlidingSyncBase`. ([\#17481](https://github.com/element-hq/synapse/issues/17481), [\#17482](https://github.com/element-hq/synapse/issues/17482))
|
||||
- Add some opentracing tags and logging to the experimental sliding sync implementation. ([\#17501](https://github.com/element-hq/synapse/issues/17501))
|
||||
- Split and move Sliding Sync tests so we have some more sane test file sizes. ([\#17504](https://github.com/element-hq/synapse/issues/17504))
|
||||
- Update the `limited` field description in the Sliding Sync response to accurately describe what it actually represents. ([\#17507](https://github.com/element-hq/synapse/issues/17507))
|
||||
- Easier to understand `timeline` assertions in Sliding Sync tests. ([\#17511](https://github.com/element-hq/synapse/issues/17511))
|
||||
- Reset the sliding sync connection if we don't recognize the per-connection state position. ([\#17529](https://github.com/element-hq/synapse/issues/17529))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump bcrypt from 4.1.3 to 4.2.0. ([\#17495](https://github.com/element-hq/synapse/issues/17495))
|
||||
* Bump black from 24.4.2 to 24.8.0. ([\#17522](https://github.com/element-hq/synapse/issues/17522))
|
||||
* Bump phonenumbers from 8.13.39 to 8.13.42. ([\#17521](https://github.com/element-hq/synapse/issues/17521))
|
||||
* Bump ruff from 0.5.4 to 0.5.5. ([\#17494](https://github.com/element-hq/synapse/issues/17494))
|
||||
* Bump serde_json from 1.0.120 to 1.0.121. ([\#17493](https://github.com/element-hq/synapse/issues/17493))
|
||||
* Bump serde_json from 1.0.121 to 1.0.122. ([\#17525](https://github.com/element-hq/synapse/issues/17525))
|
||||
* Bump towncrier from 23.11.0 to 24.7.1. ([\#17523](https://github.com/element-hq/synapse/issues/17523))
|
||||
* Bump types-pyopenssl from 24.1.0.20240425 to 24.1.0.20240722. ([\#17496](https://github.com/element-hq/synapse/issues/17496))
|
||||
* Bump types-setuptools from 70.1.0.20240627 to 71.1.0.20240726. ([\#17497](https://github.com/element-hq/synapse/issues/17497))
|
||||
|
||||
# Synapse 1.112.0 (2024-07-30)
|
||||
|
||||
This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7).
|
||||
|
||||
Note that this security fix is also available as **Synapse 1.111.1**, which does not include the rest of the changes in Synapse 1.112.0.
|
||||
|
||||
This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request.
|
||||
If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality.
|
||||
|
||||
With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**.
|
||||
The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration.
|
||||
|
||||
Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today.
|
||||
|
||||
**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time.
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502))
|
||||
|
||||
|
||||
# Synapse 1.112.0rc1 (2024-07-23)
|
||||
|
||||
Please note that this release candidate does not include the security dependency update
|
||||
included in version 1.111.1 as this version was released before 1.111.1.
|
||||
The same security fix can be found in the full release of 1.112.0.
|
||||
|
||||
### Features
|
||||
|
||||
- Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17416](https://github.com/element-hq/synapse/issues/17416))
|
||||
- Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17418](https://github.com/element-hq/synapse/issues/17418))
|
||||
- Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17419](https://github.com/element-hq/synapse/issues/17419))
|
||||
- Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17429](https://github.com/element-hq/synapse/issues/17429))
|
||||
- Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17432](https://github.com/element-hq/synapse/issues/17432))
|
||||
- Prepare for authenticated media freeze. ([\#17433](https://github.com/element-hq/synapse/issues/17433))
|
||||
- Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17454](https://github.com/element-hq/synapse/issues/17454))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Add configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231))
|
||||
- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. ([\#17434](https://github.com/element-hq/synapse/issues/17434))
|
||||
- Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). ([\#17435](https://github.com/element-hq/synapse/issues/17435))
|
||||
- Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. ([\#17438](https://github.com/element-hq/synapse/issues/17438))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Update the readme image to have a white background, so that it is readable in dark mode. ([\#17387](https://github.com/element-hq/synapse/issues/17387))
|
||||
- Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. ([\#17423](https://github.com/element-hq/synapse/issues/17423))
|
||||
- Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. ([\#17451](https://github.com/element-hq/synapse/issues/17451))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Make sure we always use the right logic for enabling the media repo. ([\#17424](https://github.com/element-hq/synapse/issues/17424))
|
||||
- Fix argument documentation for method `RateLimiter.record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426))
|
||||
- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428))
|
||||
- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439))
|
||||
- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449))
|
||||
- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. ([\#17453](https://github.com/element-hq/synapse/issues/17453))
|
||||
- Speed up generating sliding sync responses. ([\#17458](https://github.com/element-hq/synapse/issues/17458))
|
||||
- Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. ([\#17460](https://github.com/element-hq/synapse/issues/17460))
|
||||
- Speed up fetching room keys from backup. ([\#17461](https://github.com/element-hq/synapse/issues/17461))
|
||||
- Speed up sorting of the room list in sliding sync. ([\#17468](https://github.com/element-hq/synapse/issues/17468))
|
||||
- Implement handling of `$ME` as a state key in sliding sync. ([\#17469](https://github.com/element-hq/synapse/issues/17469))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump bytes from 1.6.0 to 1.6.1. ([\#17441](https://github.com/element-hq/synapse/issues/17441))
|
||||
* Bump hiredis from 2.3.2 to 3.0.0. ([\#17464](https://github.com/element-hq/synapse/issues/17464))
|
||||
* Bump jsonschema from 4.22.0 to 4.23.0. ([\#17444](https://github.com/element-hq/synapse/issues/17444))
|
||||
* Bump matrix-org/done-action from 2 to 3. ([\#17440](https://github.com/element-hq/synapse/issues/17440))
|
||||
* Bump mypy from 1.9.0 to 1.10.1. ([\#17445](https://github.com/element-hq/synapse/issues/17445))
|
||||
* Bump pyopenssl from 24.1.0 to 24.2.1. ([\#17465](https://github.com/element-hq/synapse/issues/17465))
|
||||
* Bump ruff from 0.5.0 to 0.5.4. ([\#17466](https://github.com/element-hq/synapse/issues/17466))
|
||||
* Bump sentry-sdk from 2.6.0 to 2.8.0. ([\#17456](https://github.com/element-hq/synapse/issues/17456))
|
||||
* Bump sentry-sdk from 2.8.0 to 2.10.0. ([\#17467](https://github.com/element-hq/synapse/issues/17467))
|
||||
* Bump setuptools from 67.6.0 to 70.0.0. ([\#17448](https://github.com/element-hq/synapse/issues/17448))
|
||||
* Bump twine from 5.1.0 to 5.1.1. ([\#17443](https://github.com/element-hq/synapse/issues/17443))
|
||||
* Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712. ([\#17446](https://github.com/element-hq/synapse/issues/17446))
|
||||
* Bump ulid from 1.1.2 to 1.1.3. ([\#17442](https://github.com/element-hq/synapse/issues/17442))
|
||||
* Bump zipp from 3.15.0 to 3.19.1. ([\#17427](https://github.com/element-hq/synapse/issues/17427))
|
||||
|
||||
|
||||
# Synapse 1.111.1 (2024-07-30)
|
||||
|
||||
This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7).
|
||||
|
||||
This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request.
|
||||
If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality.
|
||||
|
||||
With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**.
|
||||
The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration.
|
||||
|
||||
Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today.
|
||||
|
||||
**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time.
|
||||
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502))
|
||||
|
||||
|
||||
# Synapse 1.111.0 (2024-07-16)
|
||||
|
||||
No significant changes since 1.111.0rc2.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.111.0rc2 (2024-07-10)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug where using `synapse.app.media_repository` worker configuration would break the new media endpoints. ([\#17420](https://github.com/element-hq/synapse/issues/17420))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Document the new federation media worker endpoints in the [upgrade notes](https://element-hq.github.io/synapse/v1.111/upgrade.html) and [worker docs](https://element-hq.github.io/synapse/v1.111/workers.html). ([\#17421](https://github.com/element-hq/synapse/issues/17421))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Route authenticated federation media requests to media repository workers in Complement tests. ([\#17422](https://github.com/element-hq/synapse/issues/17422))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.111.0rc1 (2024-07-09)
|
||||
|
||||
### Features
|
||||
|
||||
- Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17320](https://github.com/element-hq/synapse/issues/17320))
|
||||
- Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17337](https://github.com/element-hq/synapse/issues/17337))
|
||||
- Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17342](https://github.com/element-hq/synapse/issues/17342))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding [`_matrix/client/v1/media/download`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediadownloadservernamemediaid) endpoint. ([\#17365](https://github.com/element-hq/synapse/issues/17365))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
|
||||
by adding [`_matrix/client/v1/media/thumbnail`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediathumbnailservernamemediaid), [`_matrix/federation/v1/media/thumbnail`](https://spec.matrix.org/v1.11/server-server-api/#get_matrixfederationv1mediathumbnailmediaid) endpoints and stabilizing the
|
||||
remaining [`_matrix/client/v1/media`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediaconfig) endpoints. ([\#17388](https://github.com/element-hq/synapse/issues/17388))
|
||||
- Add `rooms.bump_stamp` for easier client-side sorting in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17395](https://github.com/element-hq/synapse/issues/17395))
|
||||
- Forget all of a user's rooms upon deactivation, preventing local room purges from being blocked on deactivated users. ([\#17400](https://github.com/element-hq/synapse/issues/17400))
|
||||
- Declare support for [Matrix 1.11](https://matrix.org/blog/2024/06/20/matrix-v1.11-release/). ([\#17403](https://github.com/element-hq/synapse/issues/17403))
|
||||
- [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow overriding the introspection endpoint. ([\#17406](https://github.com/element-hq/synapse/issues/17406))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix rare race which caused no new to-device messages to be received from remote server. ([\#17362](https://github.com/element-hq/synapse/issues/17362))
|
||||
- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using an old database. ([\#17398](https://github.com/element-hq/synapse/issues/17398))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Clarify that `url_preview_url_blacklist` is a usability feature. ([\#17356](https://github.com/element-hq/synapse/issues/17356))
|
||||
- Fix broken links in README. ([\#17379](https://github.com/element-hq/synapse/issues/17379))
|
||||
- Clarify that changelog content *and file extension* need to match in order for entries to merge. ([\#17399](https://github.com/element-hq/synapse/issues/17399))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Make the release script create a release branch for Complement as well. ([\#17318](https://github.com/element-hq/synapse/issues/17318))
|
||||
- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363))
|
||||
- Add CI check for the README. ([\#17367](https://github.com/element-hq/synapse/issues/17367))
|
||||
- Fix linting errors from new `ruff` version. ([\#17381](https://github.com/element-hq/synapse/issues/17381), [\#17411](https://github.com/element-hq/synapse/issues/17411))
|
||||
- Fix building debian packages on non-clean checkouts. ([\#17390](https://github.com/element-hq/synapse/issues/17390))
|
||||
- Finish up work to allow per-user feature flags. ([\#17392](https://github.com/element-hq/synapse/issues/17392), [\#17410](https://github.com/element-hq/synapse/issues/17410))
|
||||
- Allow enabling sliding sync per-user. ([\#17393](https://github.com/element-hq/synapse/issues/17393))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump certifi from 2023.7.22 to 2024.7.4. ([\#17404](https://github.com/element-hq/synapse/issues/17404))
|
||||
* Bump cryptography from 42.0.7 to 42.0.8. ([\#17382](https://github.com/element-hq/synapse/issues/17382))
|
||||
* Bump ijson from 3.2.3 to 3.3.0. ([\#17413](https://github.com/element-hq/synapse/issues/17413))
|
||||
* Bump log from 0.4.21 to 0.4.22. ([\#17384](https://github.com/element-hq/synapse/issues/17384))
|
||||
* Bump mypy-zope from 1.0.4 to 1.0.5. ([\#17414](https://github.com/element-hq/synapse/issues/17414))
|
||||
* Bump pillow from 10.3.0 to 10.4.0. ([\#17412](https://github.com/element-hq/synapse/issues/17412))
|
||||
* Bump pydantic from 2.7.1 to 2.8.2. ([\#17415](https://github.com/element-hq/synapse/issues/17415))
|
||||
* Bump ruff from 0.3.7 to 0.5.0. ([\#17381](https://github.com/element-hq/synapse/issues/17381))
|
||||
* Bump serde from 1.0.203 to 1.0.204. ([\#17409](https://github.com/element-hq/synapse/issues/17409))
|
||||
* Bump serde_json from 1.0.117 to 1.0.120. ([\#17385](https://github.com/element-hq/synapse/issues/17385), [\#17408](https://github.com/element-hq/synapse/issues/17408))
|
||||
* Bump types-setuptools from 69.5.0.20240423 to 70.1.0.20240627. ([\#17380](https://github.com/element-hq/synapse/issues/17380))
|
||||
|
||||
# Synapse 1.110.0 (2024-07-03)
|
||||
|
||||
No significant changes since 1.110.0rc3.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc3 (2024-07-02)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug where `/sync` requests could get blocked indefinitely after an upgrade from Synapse versions before v1.109.0. ([\#17386](https://github.com/element-hq/synapse/issues/17386), [\#17391](https://github.com/element-hq/synapse/issues/17391))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Limit size of presence EDUs to 50 entries. ([\#17371](https://github.com/element-hq/synapse/issues/17371))
|
||||
- Fix building debian package for debian sid. ([\#17389](https://github.com/element-hq/synapse/issues/17389))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc2 (2024-06-26)
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc1 (2024-06-26)
|
||||
|
||||
### Features
|
||||
|
||||
- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17187](https://github.com/element-hq/synapse/issues/17187))
|
||||
- Add experimental support for [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension. ([\#17255](https://github.com/element-hq/synapse/issues/17255))
|
||||
- Improve ratelimiting in Synapse. ([\#17256](https://github.com/element-hq/synapse/issues/17256))
|
||||
- Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API. ([\#17270](https://github.com/element-hq/synapse/issues/17270), [\#17296](https://github.com/element-hq/synapse/issues/17296))
|
||||
- Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api). ([\#17276](https://github.com/element-hq/synapse/issues/17276))
|
||||
- Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17277](https://github.com/element-hq/synapse/issues/17277))
|
||||
- Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17281](https://github.com/element-hq/synapse/issues/17281))
|
||||
- Include user membership in events served to clients, per [MSC4115](https://github.com/matrix-org/matrix-spec-proposals/pull/4115). ([\#17282](https://github.com/element-hq/synapse/issues/17282))
|
||||
- Do not require user-interactive authentication for uploading cross-signing keys for the first time, per [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967). ([\#17284](https://github.com/element-hq/synapse/issues/17284))
|
||||
- Add `stream_ordering` sort to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17293](https://github.com/element-hq/synapse/issues/17293))
|
||||
- `register_new_matrix_user` now supports a --password-file flag, which
|
||||
is useful for scripting. ([\#17294](https://github.com/element-hq/synapse/issues/17294))
|
||||
- `register_new_matrix_user` now supports a --exists-ok flag to allow registration of users that already exist in the database.
|
||||
This is useful for scripts that bootstrap user accounts with initial passwords. ([\#17304](https://github.com/element-hq/synapse/issues/17304))
|
||||
- Add support for via query parameter from [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156). ([\#17322](https://github.com/element-hq/synapse/issues/17322))
|
||||
- Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17335](https://github.com/element-hq/synapse/issues/17335))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix searching for users with their exact localpart whose ID includes a hyphen. ([\#17254](https://github.com/element-hq/synapse/issues/17254))
|
||||
- Fix wrong retention policy being used when filtering events. ([\#17272](https://github.com/element-hq/synapse/issues/17272))
|
||||
- Fix bug where OTKs were not always included in `/sync` response when using workers. ([\#17275](https://github.com/element-hq/synapse/issues/17275))
|
||||
- Fix a long-standing bug where an invalid 'from' parameter to [`/notifications`](https://spec.matrix.org/v1.10/client-server-api/#get_matrixclientv3notifications) would result in an Internal Server Error. ([\#17283](https://github.com/element-hq/synapse/issues/17283))
|
||||
- Fix edge case in `/sync` returning the wrong the state when using sharded event persisters. ([\#17295](https://github.com/element-hq/synapse/issues/17295))
|
||||
- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17301](https://github.com/element-hq/synapse/issues/17301))
|
||||
- Fix email notification subject when invited to a space. ([\#17336](https://github.com/element-hq/synapse/issues/17336))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Add missing quotes for example for `exclude_rooms_from_sync`. ([\#17308](https://github.com/element-hq/synapse/issues/17308))
|
||||
- Update header in the README to visually fix the the auto-generated table of contents. ([\#17329](https://github.com/element-hq/synapse/issues/17329))
|
||||
- Fix stale references to the Foundation's Security Disclosure Policy. ([\#17341](https://github.com/element-hq/synapse/issues/17341))
|
||||
- Add default values for `rc_invites.per_issuer` to docs. ([\#17347](https://github.com/element-hq/synapse/issues/17347))
|
||||
- Fix an error in the docs for `search_all_users` parameter under `user_directory`. ([\#17348](https://github.com/element-hq/synapse/issues/17348))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Remove unused `expire_access_token` option in the Synapse Docker config file. Contributed by @AaronDewes. ([\#17198](https://github.com/element-hq/synapse/issues/17198))
|
||||
- Use fully-qualified `PersistedEventPosition` when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation. ([\#17265](https://github.com/element-hq/synapse/issues/17265))
|
||||
- Add debug logging for when room keys are uploaded, including whether they are replacing other room keys. ([\#17266](https://github.com/element-hq/synapse/issues/17266))
|
||||
- Handle OTK uploads off master. ([\#17271](https://github.com/element-hq/synapse/issues/17271))
|
||||
- Don't try and resync devices for remote users whose servers are marked as down. ([\#17273](https://github.com/element-hq/synapse/issues/17273))
|
||||
- Re-organize Pydantic models and types used in handlers. ([\#17279](https://github.com/element-hq/synapse/issues/17279))
|
||||
- Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`. ([\#17300](https://github.com/element-hq/synapse/issues/17300))
|
||||
- Update the README with Element branding, improve headers and fix the #synapse:matrix.org support room link rendering. ([\#17324](https://github.com/element-hq/synapse/issues/17324))
|
||||
- Change path of the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync implementation to `/org.matrix.simplified_msc3575/sync` since our simplified API is slightly incompatible with what's in the current MSC. ([\#17331](https://github.com/element-hq/synapse/issues/17331))
|
||||
- Handle device lists notifications for large accounts more efficiently in worker mode. ([\#17333](https://github.com/element-hq/synapse/issues/17333), [\#17358](https://github.com/element-hq/synapse/issues/17358))
|
||||
- Do not block event sending/receiving while calculating large event auth chains. ([\#17338](https://github.com/element-hq/synapse/issues/17338))
|
||||
- Tidy up `parse_integer` docs and call sites to reflect the fact that they require non-negative integers by default, and bring `parse_integer_from_args` default in alignment. Contributed by Denis Kasak (@dkasak). ([\#17339](https://github.com/element-hq/synapse/issues/17339))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump authlib from 1.3.0 to 1.3.1. ([\#17343](https://github.com/element-hq/synapse/issues/17343))
|
||||
* Bump dawidd6/action-download-artifact from 3.1.4 to 5. ([\#17289](https://github.com/element-hq/synapse/issues/17289))
|
||||
* Bump dawidd6/action-download-artifact from 5 to 6. ([\#17313](https://github.com/element-hq/synapse/issues/17313))
|
||||
* Bump docker/build-push-action from 5 to 6. ([\#17312](https://github.com/element-hq/synapse/issues/17312))
|
||||
* Bump jinja2 from 3.1.3 to 3.1.4. ([\#17287](https://github.com/element-hq/synapse/issues/17287))
|
||||
* Bump lazy_static from 1.4.0 to 1.5.0. ([\#17355](https://github.com/element-hq/synapse/issues/17355))
|
||||
* Bump msgpack from 1.0.7 to 1.0.8. ([\#17317](https://github.com/element-hq/synapse/issues/17317))
|
||||
* Bump netaddr from 1.2.1 to 1.3.0. ([\#17353](https://github.com/element-hq/synapse/issues/17353))
|
||||
* Bump packaging from 24.0 to 24.1. ([\#17352](https://github.com/element-hq/synapse/issues/17352))
|
||||
* Bump phonenumbers from 8.13.37 to 8.13.39. ([\#17315](https://github.com/element-hq/synapse/issues/17315))
|
||||
* Bump regex from 1.10.4 to 1.10.5. ([\#17290](https://github.com/element-hq/synapse/issues/17290))
|
||||
* Bump requests from 2.31.0 to 2.32.2. ([\#17345](https://github.com/element-hq/synapse/issues/17345))
|
||||
* Bump sentry-sdk from 2.1.1 to 2.3.1. ([\#17263](https://github.com/element-hq/synapse/issues/17263))
|
||||
* Bump sentry-sdk from 2.3.1 to 2.6.0. ([\#17351](https://github.com/element-hq/synapse/issues/17351))
|
||||
* Bump tornado from 6.4 to 6.4.1. ([\#17344](https://github.com/element-hq/synapse/issues/17344))
|
||||
* Bump mypy from 1.8.0 to 1.9.0. ([\#17297](https://github.com/element-hq/synapse/issues/17297))
|
||||
* Bump types-jsonschema from 4.21.0.20240311 to 4.22.0.20240610. ([\#17288](https://github.com/element-hq/synapse/issues/17288))
|
||||
* Bump types-netaddr from 1.2.0.20240219 to 1.3.0.20240530. ([\#17314](https://github.com/element-hq/synapse/issues/17314))
|
||||
* Bump types-pillow from 10.2.0.20240423 to 10.2.0.20240520. ([\#17285](https://github.com/element-hq/synapse/issues/17285))
|
||||
* Bump types-pyyaml from 6.0.12.12 to 6.0.12.20240311. ([\#17316](https://github.com/element-hq/synapse/issues/17316))
|
||||
* Bump typing-extensions from 4.11.0 to 4.12.2. ([\#17354](https://github.com/element-hq/synapse/issues/17354))
|
||||
* Bump urllib3 from 2.0.7 to 2.2.2. ([\#17346](https://github.com/element-hq/synapse/issues/17346))
|
||||
|
||||
# Synapse 1.109.0 (2024-06-18)
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Fix the building of binary wheels for macOS by switching to macOS 12 CI runners. ([\#17319](https://github.com/element-hq/synapse/issues/17319))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.109.0rc3 (2024-06-17)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- When rolling back to a previous Synapse version and then forwards again to this release, don't require server operators to manually run SQL. ([\#17305](https://github.com/element-hq/synapse/issues/17305), [\#17309](https://github.com/element-hq/synapse/issues/17309))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Use the release branch for sytest in release-branch PRs. ([\#17306](https://github.com/element-hq/synapse/issues/17306))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.109.0rc2 (2024-06-11)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug where one-time-keys were not always included in `/sync` response when using workers. Introduced in v1.109.0rc1. ([\#17275](https://github.com/element-hq/synapse/issues/17275))
|
||||
- Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1. ([\#17292](https://github.com/element-hq/synapse/issues/17292))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.109.0rc1 (2024-06-04)
|
||||
|
||||
### Features
|
||||
|
||||
- Add the ability to auto-accept invites on the behalf of users. See the [`auto_accept_invites`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#auto-accept-invites) config option for details. ([\#17147](https://github.com/element-hq/synapse/issues/17147))
|
||||
- Add experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync/e2ee` endpoint for to-device messages and device encryption info. ([\#17167](https://github.com/element-hq/synapse/issues/17167))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/issues/3916) by adding unstable media endpoints to `/_matrix/client`. ([\#17213](https://github.com/element-hq/synapse/issues/17213))
|
||||
- Add logging to tasks managed by the task scheduler, showing CPU and database usage. ([\#17219](https://github.com/element-hq/synapse/issues/17219))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix deduplicating of membership events to not create unused state groups. ([\#17164](https://github.com/element-hq/synapse/issues/17164))
|
||||
- Fix bug where duplicate events could be sent down sync when using workers that are overloaded. ([\#17215](https://github.com/element-hq/synapse/issues/17215))
|
||||
- Ignore attempts to send to-device messages to bad users, to avoid log spam when we try to connect to the bad server. ([\#17240](https://github.com/element-hq/synapse/issues/17240))
|
||||
- Fix handling of duplicate concurrent uploading of device one-time-keys. ([\#17241](https://github.com/element-hq/synapse/issues/17241))
|
||||
- Fix reporting of default tags to Sentry, such as worker name. Broke in v1.108.0. ([\#17251](https://github.com/element-hq/synapse/issues/17251))
|
||||
- Fix bug where typing updates would not be sent when using workers after a restart. ([\#17252](https://github.com/element-hq/synapse/issues/17252))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Update the LemonLDAP documentation to say that claims should be explicitly included in the returned `id_token`, as Synapse won't request them. ([\#17204](https://github.com/element-hq/synapse/issues/17204))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Improve DB usage when fetching related events. ([\#17083](https://github.com/element-hq/synapse/issues/17083))
|
||||
- Log exceptions when failing to auto-join new user according to the `auto_join_rooms` option. ([\#17176](https://github.com/element-hq/synapse/issues/17176))
|
||||
- Reduce work of calculating outbound device lists updates. ([\#17211](https://github.com/element-hq/synapse/issues/17211))
|
||||
- Improve performance of calculating device lists changes in `/sync`. ([\#17216](https://github.com/element-hq/synapse/issues/17216))
|
||||
- Move towards using `MultiWriterIdGenerator` everywhere. ([\#17226](https://github.com/element-hq/synapse/issues/17226))
|
||||
- Replaces all usages of `StreamIdGenerator` with `MultiWriterIdGenerator`. ([\#17229](https://github.com/element-hq/synapse/issues/17229))
|
||||
- Change the `allow_unsafe_locale` config option to also apply when setting up new databases. ([\#17238](https://github.com/element-hq/synapse/issues/17238))
|
||||
- Fix errors in logs about closing incorrect logging contexts when media gets rejected by a module. ([\#17239](https://github.com/element-hq/synapse/issues/17239), [\#17246](https://github.com/element-hq/synapse/issues/17246))
|
||||
- Clean out invalid destinations from `device_federation_outbox` table. ([\#17242](https://github.com/element-hq/synapse/issues/17242))
|
||||
- Stop logging errors when receiving invalid User IDs in key querys requests. ([\#17250](https://github.com/element-hq/synapse/issues/17250))
|
||||
|
||||
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump anyhow from 1.0.83 to 1.0.86. ([\#17220](https://github.com/element-hq/synapse/issues/17220))
|
||||
* Bump bcrypt from 4.1.2 to 4.1.3. ([\#17224](https://github.com/element-hq/synapse/issues/17224))
|
||||
* Bump lxml from 5.2.1 to 5.2.2. ([\#17261](https://github.com/element-hq/synapse/issues/17261))
|
||||
* Bump mypy-zope from 1.0.3 to 1.0.4. ([\#17262](https://github.com/element-hq/synapse/issues/17262))
|
||||
* Bump phonenumbers from 8.13.35 to 8.13.37. ([\#17235](https://github.com/element-hq/synapse/issues/17235))
|
||||
* Bump prometheus-client from 0.19.0 to 0.20.0. ([\#17233](https://github.com/element-hq/synapse/issues/17233))
|
||||
* Bump pyasn1 from 0.5.1 to 0.6.0. ([\#17223](https://github.com/element-hq/synapse/issues/17223))
|
||||
* Bump pyicu from 2.13 to 2.13.1. ([\#17236](https://github.com/element-hq/synapse/issues/17236))
|
||||
* Bump pyopenssl from 24.0.0 to 24.1.0. ([\#17234](https://github.com/element-hq/synapse/issues/17234))
|
||||
* Bump serde from 1.0.201 to 1.0.202. ([\#17221](https://github.com/element-hq/synapse/issues/17221))
|
||||
* Bump serde from 1.0.202 to 1.0.203. ([\#17232](https://github.com/element-hq/synapse/issues/17232))
|
||||
* Bump twine from 5.0.0 to 5.1.0. ([\#17225](https://github.com/element-hq/synapse/issues/17225))
|
||||
* Bump types-psycopg2 from 2.9.21.20240311 to 2.9.21.20240417. ([\#17222](https://github.com/element-hq/synapse/issues/17222))
|
||||
* Bump types-pyopenssl from 24.0.0.20240311 to 24.1.0.20240425. ([\#17260](https://github.com/element-hq/synapse/issues/17260))
|
||||
|
||||
# Synapse 1.108.0 (2024-05-28)
|
||||
|
||||
No significant changes since 1.108.0rc1.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.108.0rc1 (2024-05-21)
|
||||
|
||||
### Features
|
||||
|
||||
33
Cargo.lock
generated
33
Cargo.lock
generated
@@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "1.6.0"
|
||||
version = "1.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9"
|
||||
checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
@@ -212,9 +212,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
@@ -234,9 +234,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.21"
|
||||
version = "0.4.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
|
||||
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
@@ -444,9 +444,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.10.4"
|
||||
version = "1.10.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
|
||||
checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.202"
|
||||
version = "1.0.209"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395"
|
||||
checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.202"
|
||||
version = "1.0.209"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838"
|
||||
checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -505,11 +505,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.117"
|
||||
version = "1.0.127"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3"
|
||||
checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
@@ -597,9 +598,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
|
||||
|
||||
[[package]]
|
||||
name = "ulid"
|
||||
version = "1.1.2"
|
||||
version = "1.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34778c17965aa2a08913b57e1f34db9b4a63f5de31768b55bf20d2795f921259"
|
||||
checksum = "04f903f293d11f31c0c29e4148f6dc0d033a7f80cebc0282bea147611667d289"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"rand",
|
||||
|
||||
71
README.rst
71
README.rst
@@ -1,21 +1,34 @@
|
||||
=========================================================================
|
||||
Synapse |support| |development| |documentation| |license| |pypi| |python|
|
||||
=========================================================================
|
||||
.. image:: ./docs/element_logo_white_bg.svg
|
||||
:height: 60px
|
||||
|
||||
Synapse is an open-source `Matrix <https://matrix.org/>`_ homeserver written and
|
||||
maintained by the Matrix.org Foundation. We began rapid development in 2014,
|
||||
reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues
|
||||
in earnest today.
|
||||
**Element Synapse - Matrix homeserver implementation**
|
||||
|
||||
Briefly, Matrix is an open standard for communications on the internet, supporting
|
||||
federation, encryption and VoIP. Matrix.org has more to say about the `goals of the
|
||||
Matrix project <https://matrix.org/docs/guides/introduction>`_, and the `formal specification
|
||||
<https://spec.matrix.org/>`_ describes the technical details.
|
||||
|support| |development| |documentation| |license| |pypi| |python|
|
||||
|
||||
Synapse is an open source `Matrix <https://matrix.org>`__ homeserver
|
||||
implementation, written and maintained by `Element <https://element.io>`_.
|
||||
`Matrix <https://github.com/matrix-org>`__ is the open standard for
|
||||
secure and interoperable real time communications. You can directly run
|
||||
and manage the source code in this repository, available under an AGPL
|
||||
license. There is no support provided from Element unless you have a
|
||||
subscription.
|
||||
|
||||
Subscription alternative
|
||||
========================
|
||||
|
||||
Alternatively, for those that need an enterprise-ready solution, Element
|
||||
Server Suite (ESS) is `available as a subscription <https://element.io/pricing>`_.
|
||||
ESS builds on Synapse to offer a complete Matrix-based backend including the full
|
||||
`Admin Console product <https://element.io/enterprise-functionality/admin-console>`_,
|
||||
giving admins the power to easily manage an organization-wide
|
||||
deployment. It includes advanced identity management, auditing,
|
||||
moderation and data retention options as well as Long Term Support and
|
||||
SLAs. ESS can be used to support any Matrix-based frontend client.
|
||||
|
||||
.. contents::
|
||||
|
||||
Installing and configuration
|
||||
============================
|
||||
🛠️ Installing and configuration
|
||||
===============================
|
||||
|
||||
The Synapse documentation describes `how to install Synapse <https://element-hq.github.io/synapse/latest/setup/installation.html>`_. We recommend using
|
||||
`Docker images <https://element-hq.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org
|
||||
@@ -105,8 +118,8 @@ Following this advice ensures that even if an XSS is found in Synapse, the
|
||||
impact to other applications will be minimal.
|
||||
|
||||
|
||||
Testing a new installation
|
||||
==========================
|
||||
🧪 Testing a new installation
|
||||
=============================
|
||||
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client.
|
||||
@@ -159,8 +172,20 @@ the form of::
|
||||
As when logging in, you will need to specify a "Custom server". Specify your
|
||||
desired ``localpart`` in the 'User name' box.
|
||||
|
||||
Troubleshooting and support
|
||||
===========================
|
||||
🎯 Troubleshooting and support
|
||||
==============================
|
||||
|
||||
🚀 Professional support
|
||||
-----------------------
|
||||
|
||||
Enterprise quality support for Synapse including SLAs is available as part of an
|
||||
`Element Server Suite (ESS) <https://element.io/pricing>`_ subscription.
|
||||
|
||||
If you are an existing ESS subscriber then you can raise a `support request <https://ems.element.io/support>`_
|
||||
and access the `knowledge base <https://ems-docs.element.io>`_.
|
||||
|
||||
🤝 Community support
|
||||
--------------------
|
||||
|
||||
The `Admin FAQ <https://element-hq.github.io/synapse/latest/usage/administration/admin_faq.html>`_
|
||||
includes tips on dealing with some common problems. For more details, see
|
||||
@@ -176,8 +201,8 @@ issues for support requests, only for bug reports and feature requests.
|
||||
.. |docs| replace:: ``docs``
|
||||
.. _docs: docs
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
🪪 Identity Servers
|
||||
===================
|
||||
|
||||
Identity servers have the job of mapping email addresses and other 3rd Party
|
||||
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
|
||||
@@ -206,8 +231,8 @@ an email address with your account, or send an invite to another user via their
|
||||
email address.
|
||||
|
||||
|
||||
Development
|
||||
===========
|
||||
🛠️ Development
|
||||
==============
|
||||
|
||||
We welcome contributions to Synapse from the community!
|
||||
The best place to get started is our
|
||||
@@ -225,8 +250,8 @@ Alongside all that, join our developer community on Matrix:
|
||||
`#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans!
|
||||
|
||||
|
||||
.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
|
||||
:alt: (get support on #synapse:matrix.org)
|
||||
.. |support| image:: https://img.shields.io/badge/matrix-community%20support-success
|
||||
:alt: (get community support in #synapse:matrix.org)
|
||||
:target: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
.. |development| image:: https://img.shields.io/matrix/synapse-dev:matrix.org?label=development&logo=matrix
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Add the ability to auto-accept invites on the behalf of users. See the [`auto_accept_invites`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#auto-accept-invites) config option for details.
|
||||
@@ -1 +0,0 @@
|
||||
Update OIDC documentation: by default Matrix doesn't query userinfo endpoint, then claims should be put on id_token.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce work of calculating outbound device lists updates.
|
||||
@@ -1 +0,0 @@
|
||||
Improve performance of calculating device lists changes in `/sync`.
|
||||
115
debian/changelog
vendored
115
debian/changelog
vendored
@@ -1,3 +1,118 @@
|
||||
matrix-synapse-py3 (1.114.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.114.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 30 Aug 2024 15:35:13 +0100
|
||||
|
||||
matrix-synapse-py3 (1.114.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.114.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Aug 2024 12:55:28 +0000
|
||||
|
||||
matrix-synapse-py3 (1.113.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.113.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Aug 2024 14:36:56 +0100
|
||||
|
||||
matrix-synapse-py3 (1.113.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.113.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Aug 2024 12:23:23 +0100
|
||||
|
||||
matrix-synapse-py3 (1.112.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.112.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Jul 2024 17:15:48 +0100
|
||||
|
||||
matrix-synapse-py3 (1.112.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.112.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 23 Jul 2024 08:58:55 -0600
|
||||
|
||||
matrix-synapse-py3 (1.111.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.111.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Jul 2024 16:13:52 +0100
|
||||
|
||||
matrix-synapse-py3 (1.111.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.111.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Jul 2024 12:42:46 +0200
|
||||
|
||||
matrix-synapse-py3 (1.111.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.111.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 10 Jul 2024 08:46:54 +0000
|
||||
|
||||
matrix-synapse-py3 (1.111.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.111.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 09 Jul 2024 09:49:25 +0000
|
||||
|
||||
matrix-synapse-py3 (1.110.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 03 Jul 2024 09:08:59 -0600
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Jul 2024 08:28:56 -0600
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Jun 2024 18:14:48 +0200
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc1) stable; urgency=medium
|
||||
|
||||
* `register_new_matrix_user` now supports a --password-file and a --exists-ok flag.
|
||||
* New Synapse release 1.110.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Jun 2024 14:07:56 +0200
|
||||
|
||||
matrix-synapse-py3 (1.109.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.109.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jun 2024 09:45:15 +0000
|
||||
|
||||
matrix-synapse-py3 (1.109.0~rc3) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.109.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Jun 2024 12:05:24 +0000
|
||||
|
||||
matrix-synapse-py3 (1.109.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.109.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jun 2024 13:20:17 +0000
|
||||
|
||||
matrix-synapse-py3 (1.109.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.109.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Jun 2024 09:42:46 +0100
|
||||
|
||||
matrix-synapse-py3 (1.108.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.108.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 28 May 2024 11:54:22 +0100
|
||||
|
||||
matrix-synapse-py3 (1.108.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.108.0rc1.
|
||||
|
||||
27
debian/hash_password.1
vendored
27
debian/hash_password.1
vendored
@@ -1,10 +1,13 @@
|
||||
.\" generated with Ronn-NG/v0.8.0
|
||||
.\" http://github.com/apjanke/ronn-ng/tree/0.8.0
|
||||
.TH "HASH_PASSWORD" "1" "July 2021" "" ""
|
||||
.\" generated with Ronn-NG/v0.10.1
|
||||
.\" http://github.com/apjanke/ronn-ng/tree/0.10.1
|
||||
.TH "HASH_PASSWORD" "1" "August 2024" ""
|
||||
.SH "NAME"
|
||||
\fBhash_password\fR \- Calculate the hash of a new password, so that passwords can be reset
|
||||
.SH "SYNOPSIS"
|
||||
\fBhash_password\fR [\fB\-p\fR|\fB\-\-password\fR [password]] [\fB\-c\fR|\fB\-\-config\fR \fIfile\fR]
|
||||
.TS
|
||||
allbox;
|
||||
\fBhash_password\fR [\fB\-p\fR \fB\-\-password\fR [password]] [\fB\-c\fR \fB\-\-config\fR \fIfile\fR]
|
||||
.TE
|
||||
.SH "DESCRIPTION"
|
||||
\fBhash_password\fR calculates the hash of a supplied password using bcrypt\.
|
||||
.P
|
||||
@@ -20,7 +23,7 @@ bcrypt_rounds: 17 password_config: pepper: "random hashing pepper"
|
||||
.SH "OPTIONS"
|
||||
.TP
|
||||
\fB\-p\fR, \fB\-\-password\fR
|
||||
Read the password form the command line if [password] is supplied\. If not, prompt the user and read the password form the \fBSTDIN\fR\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\.
|
||||
Read the password form the command line if [password] is supplied, or from \fBSTDIN\fR\. If not, prompt the user and read the password from the tty prompt\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\.
|
||||
.TP
|
||||
\fB\-c\fR, \fB\-\-config\fR
|
||||
Read the supplied YAML \fIfile\fR containing the options \fBbcrypt_rounds\fR and the \fBpassword_config\fR section containing the \fBpepper\fR value\.
|
||||
@@ -33,7 +36,17 @@ $2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8\.X8fWFpum7SxZ9MFe
|
||||
.fi
|
||||
.IP "" 0
|
||||
.P
|
||||
Hash from the STDIN:
|
||||
Hash from the stdin:
|
||||
.IP "" 4
|
||||
.nf
|
||||
$ cat password_file | hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX\.rcuAbM8ErLoUhybG
|
||||
.fi
|
||||
.IP "" 0
|
||||
.P
|
||||
Hash from the prompt:
|
||||
.IP "" 4
|
||||
.nf
|
||||
$ hash_password
|
||||
@@ -53,6 +66,6 @@ $2b$12$CwI\.wBNr\.w3kmiUlV3T5s\.GT2wH7uebDCovDrCOh18dFedlANK99O
|
||||
.fi
|
||||
.IP "" 0
|
||||
.SH "COPYRIGHT"
|
||||
This man page was written by Rahul De <\fI\%mailto:rahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
|
||||
This man page was written by Rahul De «rahulde@swecha\.net» for Debian GNU/Linux distribution\.
|
||||
.SH "SEE ALSO"
|
||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
||||
|
||||
182
debian/hash_password.1.html
vendored
Normal file
182
debian/hash_password.1.html
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv='content-type' content='text/html;charset=utf-8'>
|
||||
<meta name='generator' content='Ronn-NG/v0.10.1 (http://github.com/apjanke/ronn-ng/tree/0.10.1)'>
|
||||
<title>hash_password(1) - Calculate the hash of a new password, so that passwords can be reset</title>
|
||||
<style type='text/css' media='all'>
|
||||
/* style: man */
|
||||
body#manpage {margin:0}
|
||||
.mp {max-width:100ex;padding:0 9ex 1ex 4ex}
|
||||
.mp p,.mp pre,.mp ul,.mp ol,.mp dl {margin:0 0 20px 0}
|
||||
.mp h2 {margin:10px 0 0 0}
|
||||
.mp > p,.mp > pre,.mp > ul,.mp > ol,.mp > dl {margin-left:8ex}
|
||||
.mp h3 {margin:0 0 0 4ex}
|
||||
.mp dt {margin:0;clear:left}
|
||||
.mp dt.flush {float:left;width:8ex}
|
||||
.mp dd {margin:0 0 0 9ex}
|
||||
.mp h1,.mp h2,.mp h3,.mp h4 {clear:left}
|
||||
.mp pre {margin-bottom:20px}
|
||||
.mp pre+h2,.mp pre+h3 {margin-top:22px}
|
||||
.mp h2+pre,.mp h3+pre {margin-top:5px}
|
||||
.mp img {display:block;margin:auto}
|
||||
.mp h1.man-title {display:none}
|
||||
.mp,.mp code,.mp pre,.mp tt,.mp kbd,.mp samp,.mp h3,.mp h4 {font-family:monospace;font-size:14px;line-height:1.42857142857143}
|
||||
.mp h2 {font-size:16px;line-height:1.25}
|
||||
.mp h1 {font-size:20px;line-height:2}
|
||||
.mp {text-align:justify;background:#fff}
|
||||
.mp,.mp code,.mp pre,.mp pre code,.mp tt,.mp kbd,.mp samp {color:#131211}
|
||||
.mp h1,.mp h2,.mp h3,.mp h4 {color:#030201}
|
||||
.mp u {text-decoration:underline}
|
||||
.mp code,.mp strong,.mp b {font-weight:bold;color:#131211}
|
||||
.mp em,.mp var {font-style:italic;color:#232221;text-decoration:none}
|
||||
.mp a,.mp a:link,.mp a:hover,.mp a code,.mp a pre,.mp a tt,.mp a kbd,.mp a samp {color:#0000ff}
|
||||
.mp b.man-ref {font-weight:normal;color:#434241}
|
||||
.mp pre {padding:0 4ex}
|
||||
.mp pre code {font-weight:normal;color:#434241}
|
||||
.mp h2+pre,h3+pre {padding-left:0}
|
||||
ol.man-decor,ol.man-decor li {margin:3px 0 10px 0;padding:0;float:left;width:33%;list-style-type:none;text-transform:uppercase;color:#999;letter-spacing:1px}
|
||||
ol.man-decor {width:100%}
|
||||
ol.man-decor li.tl {text-align:left}
|
||||
ol.man-decor li.tc {text-align:center;letter-spacing:4px}
|
||||
ol.man-decor li.tr {text-align:right;float:right}
|
||||
</style>
|
||||
</head>
|
||||
<!--
|
||||
The following styles are deprecated and will be removed at some point:
|
||||
div#man, div#man ol.man, div#man ol.head, div#man ol.man.
|
||||
|
||||
The .man-page, .man-decor, .man-head, .man-foot, .man-title, and
|
||||
.man-navigation should be used instead.
|
||||
-->
|
||||
<body id='manpage'>
|
||||
<div class='mp' id='man'>
|
||||
|
||||
<div class='man-navigation' style='display:none'>
|
||||
<a href="#NAME">NAME</a>
|
||||
<a href="#SYNOPSIS">SYNOPSIS</a>
|
||||
<a href="#DESCRIPTION">DESCRIPTION</a>
|
||||
<a href="#FILES">FILES</a>
|
||||
<a href="#OPTIONS">OPTIONS</a>
|
||||
<a href="#EXAMPLES">EXAMPLES</a>
|
||||
<a href="#COPYRIGHT">COPYRIGHT</a>
|
||||
<a href="#SEE-ALSO">SEE ALSO</a>
|
||||
</div>
|
||||
|
||||
<ol class='man-decor man-head man head'>
|
||||
<li class='tl'>hash_password(1)</li>
|
||||
<li class='tc'></li>
|
||||
<li class='tr'>hash_password(1)</li>
|
||||
</ol>
|
||||
|
||||
|
||||
|
||||
<h2 id="NAME">NAME</h2>
|
||||
<p class="man-name">
|
||||
<code>hash_password</code> - <span class="man-whatis">Calculate the hash of a new password, so that passwords can be reset</span>
|
||||
</p>
|
||||
<h2 id="SYNOPSIS">SYNOPSIS</h2>
|
||||
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>
|
||||
<code>hash_password</code> [<code>-p</code>
|
||||
</td>
|
||||
<td>
|
||||
<code>--password</code> [password]] [<code>-c</code>
|
||||
</td>
|
||||
<td>
|
||||
<code>--config</code> <var>file</var>]</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<h2 id="DESCRIPTION">DESCRIPTION</h2>
|
||||
|
||||
<p><strong>hash_password</strong> calculates the hash of a supplied password using bcrypt.</p>
|
||||
|
||||
<p><code>hash_password</code> takes a password as an parameter either on the command line
|
||||
or the <code>STDIN</code> if not supplied.</p>
|
||||
|
||||
<p>It accepts an YAML file which can be used to specify parameters like the
|
||||
number of rounds for bcrypt and password_config section having the pepper
|
||||
value used for the hashing. By default <code>bcrypt_rounds</code> is set to <strong>12</strong>.</p>
|
||||
|
||||
<p>The hashed password is written on the <code>STDOUT</code>.</p>
|
||||
|
||||
<h2 id="FILES">FILES</h2>
|
||||
|
||||
<p>A sample YAML file accepted by <code>hash_password</code> is described below:</p>
|
||||
|
||||
<p>bcrypt_rounds: 17
|
||||
password_config:
|
||||
pepper: "random hashing pepper"</p>
|
||||
|
||||
<h2 id="OPTIONS">OPTIONS</h2>
|
||||
|
||||
<dl>
|
||||
<dt>
|
||||
<code>-p</code>, <code>--password</code>
|
||||
</dt>
|
||||
<dd>Read the password form the command line if [password] is supplied, or from <code>STDIN</code>.
|
||||
If not, prompt the user and read the password from the tty prompt.
|
||||
It is not recommended to type the password on the command line
|
||||
directly. Use the STDIN instead.</dd>
|
||||
<dt>
|
||||
<code>-c</code>, <code>--config</code>
|
||||
</dt>
|
||||
<dd>Read the supplied YAML <var>file</var> containing the options <code>bcrypt_rounds</code>
|
||||
and the <code>password_config</code> section containing the <code>pepper</code> value.</dd>
|
||||
</dl>
|
||||
|
||||
<h2 id="EXAMPLES">EXAMPLES</h2>
|
||||
|
||||
<p>Hash from the command line:</p>
|
||||
|
||||
<pre><code>$ hash_password -p "p@ssw0rd"
|
||||
$2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8.X8fWFpum7SxZ9MFe
|
||||
</code></pre>
|
||||
|
||||
<p>Hash from the stdin:</p>
|
||||
|
||||
<pre><code>$ cat password_file | hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG
|
||||
</code></pre>
|
||||
|
||||
<p>Hash from the prompt:</p>
|
||||
|
||||
<pre><code>$ hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG
|
||||
</code></pre>
|
||||
|
||||
<p>Using a config file:</p>
|
||||
|
||||
<pre><code>$ hash_password -c config.yml
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$CwI.wBNr.w3kmiUlV3T5s.GT2wH7uebDCovDrCOh18dFedlANK99O
|
||||
</code></pre>
|
||||
|
||||
<h2 id="COPYRIGHT">COPYRIGHT</h2>
|
||||
|
||||
<p>This man page was written by Rahul De «rahulde@swecha.net»
|
||||
for Debian GNU/Linux distribution.</p>
|
||||
|
||||
<h2 id="SEE-ALSO">SEE ALSO</h2>
|
||||
|
||||
<p><span class="man-ref">synctl<span class="s">(1)</span></span>, <span class="man-ref">synapse_port_db<span class="s">(1)</span></span>, <span class="man-ref">register_new_matrix_user<span class="s">(1)</span></span>, <span class="man-ref">synapse_review_recent_signups<span class="s">(1)</span></span></p>
|
||||
|
||||
<ol class='man-decor man-foot man foot'>
|
||||
<li class='tl'></li>
|
||||
<li class='tc'>August 2024</li>
|
||||
<li class='tr'>hash_password(1)</li>
|
||||
</ol>
|
||||
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
13
debian/hash_password.ronn
vendored
13
debian/hash_password.ronn
vendored
@@ -29,8 +29,8 @@ A sample YAML file accepted by `hash_password` is described below:
|
||||
## OPTIONS
|
||||
|
||||
* `-p`, `--password`:
|
||||
Read the password form the command line if [password] is supplied.
|
||||
If not, prompt the user and read the password form the `STDIN`.
|
||||
Read the password form the command line if [password] is supplied, or from `STDIN`.
|
||||
If not, prompt the user and read the password from the tty prompt.
|
||||
It is not recommended to type the password on the command line
|
||||
directly. Use the STDIN instead.
|
||||
|
||||
@@ -45,7 +45,14 @@ Hash from the command line:
|
||||
$ hash_password -p "p@ssw0rd"
|
||||
$2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8.X8fWFpum7SxZ9MFe
|
||||
|
||||
Hash from the STDIN:
|
||||
Hash from the stdin:
|
||||
|
||||
$ cat password_file | hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG
|
||||
|
||||
Hash from the prompt:
|
||||
|
||||
$ hash_password
|
||||
Password:
|
||||
|
||||
11
debian/register_new_matrix_user.ronn
vendored
11
debian/register_new_matrix_user.ronn
vendored
@@ -31,8 +31,12 @@ A sample YAML file accepted by `register_new_matrix_user` is described below:
|
||||
Local part of the new user. Will prompt if omitted.
|
||||
|
||||
* `-p`, `--password`:
|
||||
New password for user. Will prompt if omitted. Supplying the password
|
||||
on the command line is not recommended. Use the STDIN instead.
|
||||
New password for user. Will prompt if this option and `--password-file` are omitted.
|
||||
Supplying the password on the command line is not recommended.
|
||||
|
||||
* `--password-file`:
|
||||
File containing the new password for user. If set, overrides `--password`.
|
||||
This is a more secure alternative to specifying the password on the command line.
|
||||
|
||||
* `-a`, `--admin`:
|
||||
Register new user as an admin. Will prompt if omitted.
|
||||
@@ -44,6 +48,9 @@ A sample YAML file accepted by `register_new_matrix_user` is described below:
|
||||
Shared secret as defined in server config file. This is an optional
|
||||
parameter as it can be also supplied via the YAML file.
|
||||
|
||||
* `--exists-ok`:
|
||||
Do not fail if the user already exists. The user account will be not updated in this case.
|
||||
|
||||
* `server_url`:
|
||||
URL of the home server. Defaults to 'https://localhost:8448'.
|
||||
|
||||
|
||||
2
debian/templates
vendored
2
debian/templates
vendored
@@ -5,7 +5,7 @@ _Description: Name of the server:
|
||||
servers via federation. This is normally the public hostname of the
|
||||
server running synapse, but can be different if you set up delegation.
|
||||
Please refer to the delegation documentation in this case:
|
||||
https://github.com/element-hq/synapse/blob/master/docs/delegate.md.
|
||||
https://element-hq.github.io/synapse/latest/delegate.html.
|
||||
|
||||
Template: matrix-synapse/report-stats
|
||||
Type: boolean
|
||||
|
||||
@@ -27,7 +27,7 @@ ARG PYTHON_VERSION=3.11
|
||||
###
|
||||
# We hardcode the use of Debian bookworm here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bookworm.
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as requirements
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS requirements
|
||||
|
||||
# RUN --mount is specific to buildkit and is documented at
|
||||
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
|
||||
@@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
###
|
||||
### Stage 1: builder
|
||||
###
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as builder
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS builder
|
||||
|
||||
# install the OS build deps
|
||||
RUN \
|
||||
|
||||
@@ -24,7 +24,7 @@ ARG distro=""
|
||||
# https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but
|
||||
# it's not obviously easier to use that than to build our own.)
|
||||
|
||||
FROM docker.io/library/${distro} as builder
|
||||
FROM docker.io/library/${distro} AS builder
|
||||
|
||||
RUN apt-get update -qq -o Acquire::Languages=none
|
||||
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
@@ -73,6 +73,8 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
# Required for building cffi from source.
|
||||
libffi-dev \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
|
||||
@@ -11,6 +11,9 @@ DIST=$(cut -d ':' -f2 <<< "${distro:?}")
|
||||
cp -aT /synapse/source /synapse/build
|
||||
cd /synapse/build
|
||||
|
||||
# Delete any existing `.so` files to ensure a clean build.
|
||||
rm -f /synapse/build/synapse/*.so
|
||||
|
||||
# if this is a prerelease, set the Section accordingly.
|
||||
#
|
||||
# When the package is later added to the package repo, reprepro will use the
|
||||
|
||||
@@ -105,8 +105,6 @@ experimental_features:
|
||||
# Expose a room summary for public rooms
|
||||
msc3266_enabled: true
|
||||
|
||||
msc4115_membership_on_events: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
system_mxid_display_name: "Server Alert"
|
||||
|
||||
@@ -176,7 +176,6 @@ app_service_config_files:
|
||||
{% endif %}
|
||||
|
||||
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
||||
expire_access_token: False
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
},
|
||||
"media_repository": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["media"],
|
||||
"listener_resources": ["media", "client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/media/",
|
||||
"^/_synapse/admin/v1/purge_media_cache$",
|
||||
@@ -125,6 +125,8 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_synapse/admin/v1/user/.*/media.*$",
|
||||
"^/_synapse/admin/v1/media/.*$",
|
||||
"^/_synapse/admin/v1/quarantine_media/.*$",
|
||||
"^/_matrix/client/v1/media/.*$",
|
||||
"^/_matrix/federation/v1/media/.*$",
|
||||
],
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
|
||||
@@ -2,13 +2,9 @@
|
||||
|
||||
This API allows a server administrator to enable or disable some experimental features on a per-user
|
||||
basis. The currently supported features are:
|
||||
- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy
|
||||
presence state enabled
|
||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require
|
||||
UIA when first uploading cross-signing keys.
|
||||
|
||||
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
|
||||
@@ -36,6 +36,10 @@ The following query parameters are available:
|
||||
- the room's name,
|
||||
- the local part of the room's canonical alias, or
|
||||
- the complete (local and server part) room's id (case sensitive).
|
||||
* `public_rooms` - Optional flag to filter public rooms. If `true`, only public rooms are queried. If `false`, public rooms are excluded from
|
||||
the query. When the flag is absent (the default), **both** public and non-public rooms are included in the search results.
|
||||
* `empty_rooms` - Optional flag to filter empty rooms. A room is empty if joined_members is zero. If `true`, only empty rooms are queried. If `false`, empty rooms are excluded from
|
||||
the query. When the flag is absent (the default), **both** empty and non-empty rooms are included in the search results.
|
||||
|
||||
Defaults to no filtering.
|
||||
|
||||
|
||||
@@ -8,9 +8,7 @@ errors in code.
|
||||
|
||||
The necessary tools are:
|
||||
|
||||
- [black](https://black.readthedocs.io/en/stable/), a source code formatter;
|
||||
- [isort](https://pycqa.github.io/isort/), which organises each file's imports;
|
||||
- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors; and
|
||||
- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors and enforce a consistent style; and
|
||||
- [mypy](https://mypy.readthedocs.io/en/stable/), a type checker.
|
||||
|
||||
See [the contributing guide](development/contributing_guide.md#run-the-linters) for instructions
|
||||
|
||||
@@ -449,9 +449,9 @@ For example, a fix in PR #1234 would have its changelog entry in
|
||||
> The security levels of Florbs are now validated when received
|
||||
> via the `/federation/florb` endpoint. Contributed by Jane Matrix.
|
||||
|
||||
If there are multiple pull requests involved in a single bugfix/feature/etc,
|
||||
then the content for each `changelog.d` file should be the same. Towncrier will
|
||||
merge the matching files together into a single changelog entry when we come to
|
||||
If there are multiple pull requests involved in a single bugfix/feature/etc, then the
|
||||
content for each `changelog.d` file and file extension should be the same. Towncrier
|
||||
will merge the matching files together into a single changelog entry when we come to
|
||||
release.
|
||||
|
||||
### How do I know what to call the changelog file before I create the PR?
|
||||
|
||||
@@ -21,8 +21,10 @@ incrementing integer, but backfilled events start with `stream_ordering=-1` and
|
||||
|
||||
---
|
||||
|
||||
- `/sync` returns things in the order they arrive at the server (`stream_ordering`).
|
||||
- `/messages` (and `/backfill` in the federation API) return them in the order determined by the event graph `(topological_ordering, stream_ordering)`.
|
||||
- Incremental `/sync?since=xxx` returns things in the order they arrive at the server
|
||||
(`stream_ordering`).
|
||||
- Initial `/sync`, `/messages` (and `/backfill` in the federation API) return them in
|
||||
the order determined by the event graph `(topological_ordering, stream_ordering)`.
|
||||
|
||||
The general idea is that, if you're following a room in real-time (i.e.
|
||||
`/sync`), you probably want to see the messages as they arrive at your server,
|
||||
|
||||
94
docs/element_logo_white_bg.svg
Normal file
94
docs/element_logo_white_bg.svg
Normal file
@@ -0,0 +1,94 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="41.440346mm"
|
||||
height="10.383124mm"
|
||||
viewBox="0 0 41.440346 10.383125"
|
||||
version="1.1"
|
||||
id="svg1"
|
||||
xml:space="preserve"
|
||||
sodipodi:docname="element_logo_white_bg.svg"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><sodipodi:namedview
|
||||
id="namedview1"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#000000"
|
||||
borderopacity="0.25"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="0"
|
||||
inkscape:deskcolor="#d1d1d1"
|
||||
inkscape:document-units="mm"
|
||||
showgrid="false"
|
||||
inkscape:export-bgcolor="#ffffffff" /><defs
|
||||
id="defs1" /><g
|
||||
id="layer1"
|
||||
transform="translate(-84.803844,-143.2075)"
|
||||
inkscape:export-filename="element_logo_white_bg.svg"
|
||||
inkscape:export-xdpi="96"
|
||||
inkscape:export-ydpi="96"><g
|
||||
style="fill:none"
|
||||
id="g1"
|
||||
transform="matrix(0.26458333,0,0,0.26458333,85.841658,144.26667)"><rect
|
||||
style="display:inline;fill:#ffffff;fill-opacity:1;stroke:#ffffff;stroke-width:1.31041;stroke-dasharray:none;stroke-opacity:1"
|
||||
id="rect20"
|
||||
width="155.31451"
|
||||
height="37.932892"
|
||||
x="-3.2672384"
|
||||
y="-3.3479743"
|
||||
rx="3.3718522"
|
||||
ry="3.7915266"
|
||||
transform="translate(-2.1259843e-6)"
|
||||
inkscape:label="rect20"
|
||||
inkscape:export-filename="rect20.svg"
|
||||
inkscape:export-xdpi="96"
|
||||
inkscape:export-ydpi="96" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="M 16,32 C 24.8366,32 32,24.8366 32,16 32,7.16344 24.8366,0 16,0 7.16344,0 0,7.16344 0,16 0,24.8366 7.16344,32 16,32 Z"
|
||||
fill="#0dbd8b"
|
||||
id="path1" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="m 13.0756,7.455 c 0,-0.64584 0.5247,-1.1694 1.1719,-1.1694 4.3864,0 7.9423,3.54853 7.9423,7.9259 0,0.6458 -0.5246,1.1694 -1.1718,1.1694 -0.6472,0 -1.1719,-0.5236 -1.1719,-1.1694 0,-3.0857 -2.5066,-5.58711 -5.5986,-5.58711 -0.6472,0 -1.1719,-0.52355 -1.1719,-1.16939 z"
|
||||
fill="#ffffff"
|
||||
id="path2" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="m 24.5424,13.042 c 0.6472,0 1.1719,0.5235 1.1719,1.1694 0,4.3773 -3.5559,7.9258 -7.9424,7.9258 -0.6472,0 -1.1718,-0.5235 -1.1718,-1.1693 0,-0.6459 0.5246,-1.1694 1.1718,-1.1694 3.0921,0 5.5987,-2.5015 5.5987,-5.5871 0,-0.6459 0.5247,-1.1694 1.1718,-1.1694 z"
|
||||
fill="#ffffff"
|
||||
id="path3" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="m 18.9446,24.5446 c 0,0.6459 -0.5247,1.1694 -1.1718,1.1694 -4.3865,0 -7.94239,-3.5485 -7.94239,-7.9258 0,-0.6459 0.52469,-1.1694 1.17179,-1.1694 0.6472,0 1.1719,0.5235 1.1719,1.1694 0,3.0856 2.5066,5.587 5.5987,5.587 0.6471,0 1.1718,0.5236 1.1718,1.1694 z"
|
||||
fill="#ffffff"
|
||||
id="path4" /><path
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
d="m 7.45823,18.9576 c -0.64718,0 -1.17183,-0.5235 -1.17183,-1.1694 0,-4.3773 3.55591,-7.92581 7.9423,-7.92581 0.6472,0 1.1719,0.52351 1.1719,1.16941 0,0.6458 -0.5247,1.1694 -1.1719,1.1694 -3.092,0 -5.59864,2.5014 -5.59864,5.587 0,0.6459 -0.52465,1.1694 -1.17183,1.1694 z"
|
||||
fill="#ffffff"
|
||||
id="path5" /><path
|
||||
d="M 56.2856,18.1428 H 44.9998 c 0.1334,1.181 0.5619,2.1238 1.2858,2.8286 0.7238,0.6857 1.6761,1.0286 2.8571,1.0286 0.7809,0 1.4857,-0.1905 2.1143,-0.5715 0.6286,-0.3809 1.0762,-0.8952 1.3428,-1.5428 h 3.4286 c -0.4571,1.5047 -1.3143,2.7238 -2.5714,3.6571 -1.2381,0.9143 -2.7048,1.3715 -4.4,1.3715 -2.2095,0 -4,-0.7334 -5.3714,-2.2 -1.3524,-1.4667 -2.0286,-3.3239 -2.0286,-5.5715 0,-2.1905 0.6857,-4.0285 2.0571,-5.5143 1.3715,-1.4857 3.1429,-2.22853 5.3143,-2.22853 2.1714,0 3.9238,0.73333 5.2572,2.20003 1.3523,1.4476 2.0285,3.2762 2.0285,5.4857 z m -7.2572,-5.9714 c -1.0667,0 -1.9524,0.3143 -2.6571,0.9429 -0.7048,0.6285 -1.1429,1.4666 -1.3143,2.5142 h 7.8857 c -0.1524,-1.0476 -0.5714,-1.8857 -1.2571,-2.5142 -0.6858,-0.6286 -1.5715,-0.9429 -2.6572,-0.9429 z"
|
||||
fill="#000000"
|
||||
id="path6" /><path
|
||||
d="M 58.6539,20.1428 V 3.14282 h 3.4 V 20.2 c 0,0.7619 0.419,1.1428 1.2571,1.1428 l 0.6,-0.0285 v 3.2285 c -0.3238,0.0572 -0.6667,0.0857 -1.0286,0.0857 -1.4666,0 -2.5428,-0.3714 -3.2285,-1.1142 -0.6667,-0.7429 -1,-1.8667 -1,-3.3715 z"
|
||||
fill="#000000"
|
||||
id="path7" /><path
|
||||
d="M 79.7454,18.1428 H 68.4597 c 0.1333,1.181 0.5619,2.1238 1.2857,2.8286 0.7238,0.6857 1.6762,1.0286 2.8571,1.0286 0.781,0 1.4857,-0.1905 2.1143,-0.5715 0.6286,-0.3809 1.0762,-0.8952 1.3429,-1.5428 h 3.4285 c -0.4571,1.5047 -1.3143,2.7238 -2.5714,3.6571 -1.2381,0.9143 -2.7048,1.3715 -4.4,1.3715 -2.2095,0 -4,-0.7334 -5.3714,-2.2 -1.3524,-1.4667 -2.0286,-3.3239 -2.0286,-5.5715 0,-2.1905 0.6857,-4.0285 2.0571,-5.5143 1.3715,-1.4857 3.1429,-2.22853 5.3143,-2.22853 2.1715,0 3.9238,0.73333 5.2572,2.20003 1.3524,1.4476 2.0285,3.2762 2.0285,5.4857 z m -7.2572,-5.9714 c -1.0666,0 -1.9524,0.3143 -2.6571,0.9429 -0.7048,0.6285 -1.1429,1.4666 -1.3143,2.5142 h 7.8857 c -0.1524,-1.0476 -0.5714,-1.8857 -1.2571,-2.5142 -0.6857,-0.6286 -1.5715,-0.9429 -2.6572,-0.9429 z"
|
||||
fill="#000000"
|
||||
id="path8" /><path
|
||||
d="m 95.0851,16.0571 v 8.5143 h -3.4 v -8.8857 c 0,-2.2476 -0.9333,-3.3714 -2.8,-3.3714 -1.0095,0 -1.819,0.3238 -2.4286,0.9714 -0.5904,0.6476 -0.8857,1.5333 -0.8857,2.6571 v 8.6286 h -3.4 V 9.74282 h 3.1429 v 1.97148 c 0.3619,-0.6667 0.9143,-1.2191 1.6571,-1.6572 0.7429,-0.43809 1.6667,-0.65713 2.7714,-0.65713 2.0572,0 3.5429,0.78093 4.4572,2.34283 1.2571,-1.5619 2.9333,-2.34283 5.0286,-2.34283 1.733,0 3.067,0.54285 4,1.62853 0.933,1.0667 1.4,2.4762 1.4,4.2286 v 9.3143 h -3.4 v -8.8857 c 0,-2.2476 -0.933,-3.3714 -2.8,-3.3714 -1.0286,0 -1.8477,0.3333 -2.4572,1 -0.5905,0.6476 -0.8857,1.5619 -0.8857,2.7428 z"
|
||||
fill="#000000"
|
||||
id="path9" /><path
|
||||
d="m 121.537,18.1428 h -11.286 c 0.133,1.181 0.562,2.1238 1.286,2.8286 0.723,0.6857 1.676,1.0286 2.857,1.0286 0.781,0 1.486,-0.1905 2.114,-0.5715 0.629,-0.3809 1.076,-0.8952 1.343,-1.5428 h 3.429 c -0.458,1.5047 -1.315,2.7238 -2.572,3.6571 -1.238,0.9143 -2.705,1.3715 -4.4,1.3715 -2.209,0 -4,-0.7334 -5.371,-2.2 -1.353,-1.4667 -2.029,-3.3239 -2.029,-5.5715 0,-2.1905 0.686,-4.0285 2.057,-5.5143 1.372,-1.4857 3.143,-2.22853 5.315,-2.22853 2.171,0 3.923,0.73333 5.257,2.20003 1.352,1.4476 2.028,3.2762 2.028,5.4857 z m -7.257,-5.9714 c -1.067,0 -1.953,0.3143 -2.658,0.9429 -0.704,0.6285 -1.142,1.4666 -1.314,2.5142 h 7.886 c -0.153,-1.0476 -0.572,-1.8857 -1.257,-2.5142 -0.686,-0.6286 -1.572,-0.9429 -2.657,-0.9429 z"
|
||||
fill="#000000"
|
||||
id="path10" /><path
|
||||
d="m 127.105,9.74282 v 1.97148 c 0.343,-0.6477 0.905,-1.1905 1.686,-1.6286 0.8,-0.45716 1.762,-0.68573 2.885,-0.68573 1.753,0 3.105,0.53333 4.058,1.60003 0.971,1.0666 1.457,2.4857 1.457,4.2571 v 9.3143 h -3.4 v -8.8857 c 0,-1.0476 -0.248,-1.8667 -0.743,-2.4572 -0.476,-0.6095 -1.21,-0.9142 -2.2,-0.9142 -1.086,0 -1.943,0.3238 -2.572,0.9714 -0.609,0.6476 -0.914,1.5428 -0.914,2.6857 v 8.6 h -3.4 V 9.74282 Z"
|
||||
fill="#000000"
|
||||
id="path11" /><path
|
||||
d="m 147.12,21.5428 v 2.9429 c -0.419,0.1143 -1.009,0.1714 -1.771,0.1714 -2.895,0 -4.343,-1.4571 -4.343,-4.3714 v -7.8286 h -2.257 V 9.74282 h 2.257 V 5.88568 h 3.4 v 3.85714 h 2.772 v 2.71428 h -2.772 v 7.4857 c 0,1.1619 0.552,1.7429 1.657,1.7429 z"
|
||||
fill="#000000"
|
||||
id="path12" /></g></g></svg>
|
||||
|
After Width: | Height: | Size: 7.5 KiB |
@@ -242,12 +242,11 @@ host all all ::1/128 ident
|
||||
|
||||
### Fixing incorrect `COLLATE` or `CTYPE`
|
||||
|
||||
Synapse will refuse to set up a new database if it has the wrong values of
|
||||
`COLLATE` and `CTYPE` set. Synapse will also refuse to start an existing database with incorrect values
|
||||
of `COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in the
|
||||
`database` section of the config, is set to true. Using different locales can cause issues if the locale library is updated from
|
||||
underneath the database, or if a different version of the locale is used on any
|
||||
replicas.
|
||||
Synapse will refuse to start when using a database with incorrect values of
|
||||
`COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in the
|
||||
`database` section of the config, is set to true. Using different locales can
|
||||
cause issues if the locale library is updated from underneath the database, or
|
||||
if a different version of the locale is used on any replicas.
|
||||
|
||||
If you have a database with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with
|
||||
the correct locale parameter (as shown above). It is also possible to change the
|
||||
@@ -256,13 +255,3 @@ however extreme care must be taken to avoid database corruption.
|
||||
|
||||
Note that the above may fail with an error about duplicate rows if corruption
|
||||
has already occurred, and such duplicate rows will need to be manually removed.
|
||||
|
||||
### Fixing inconsistent sequences error
|
||||
|
||||
Synapse uses Postgres sequences to generate IDs for various tables. A sequence
|
||||
and associated table can get out of sync if, for example, Synapse has been
|
||||
downgraded and then upgraded again.
|
||||
|
||||
To fix the issue shut down Synapse (including any and all workers) and run the
|
||||
SQL command included in the error message. Once done Synapse should start
|
||||
successfully.
|
||||
|
||||
@@ -67,7 +67,7 @@ in Synapse can be deactivated.
|
||||
**NOTE**: This has an impact on security and is for testing purposes only!
|
||||
|
||||
To deactivate the certificate validation, the following setting must be added to
|
||||
your [homserver.yaml](../usage/configuration/homeserver_sample_config.md).
|
||||
your [homeserver.yaml](../usage/configuration/homeserver_sample_config.md).
|
||||
|
||||
```yaml
|
||||
use_insecure_ssl_client_just_for_testing_do_not_use: true
|
||||
|
||||
@@ -312,6 +312,61 @@ sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
sudo dnf group install "Development Tools"
|
||||
```
|
||||
|
||||
##### Red Hat Enterprise Linux / Rocky Linux
|
||||
|
||||
*Note: The term "RHEL" below refers to both Red Hat Enterprise Linux and Rocky Linux. The distributions are 1:1 binary compatible.*
|
||||
|
||||
It's recommended to use the latest Python versions.
|
||||
|
||||
RHEL 8 in particular ships with Python 3.6 by default which is EOL and therefore no longer supported by Synapse. RHEL 9 ship with Python 3.9 which is still supported by the Python core team as of this writing. However, newer Python versions provide significant performance improvements and they're available in official distributions' repositories. Therefore it's recommended to use them.
|
||||
|
||||
Python 3.11 and 3.12 are available for both RHEL 8 and 9.
|
||||
|
||||
These commands should be run as root user.
|
||||
|
||||
RHEL 8
|
||||
```bash
|
||||
# Enable PowerTools repository
|
||||
dnf config-manager --set-enabled powertools
|
||||
```
|
||||
RHEL 9
|
||||
```bash
|
||||
# Enable CodeReady Linux Builder repository
|
||||
crb enable
|
||||
```
|
||||
|
||||
Install new version of Python. You only need one of these:
|
||||
```bash
|
||||
# Python 3.11
|
||||
dnf install python3.11 python3.11-devel
|
||||
```
|
||||
```bash
|
||||
# Python 3.12
|
||||
dnf install python3.12 python3.12-devel
|
||||
```
|
||||
Finally, install common prerequisites
|
||||
```bash
|
||||
dnf install libicu libicu-devel libpq5 libpq5-devel lz4 pkgconf
|
||||
dnf group install "Development Tools"
|
||||
```
|
||||
###### Using venv module instead of virtualenv command
|
||||
|
||||
It's recommended to use Python venv module directly rather than the virtualenv command.
|
||||
* On RHEL 9, virtualenv is only available on [EPEL](https://docs.fedoraproject.org/en-US/epel/).
|
||||
* On RHEL 8, virtualenv is based on Python 3.6. It does not support creating 3.11/3.12 virtual environments.
|
||||
|
||||
Here's an example of creating Python 3.12 virtual environment and installing Synapse from PyPI.
|
||||
|
||||
```bash
|
||||
mkdir -p ~/synapse
|
||||
# To use Python 3.11, simply use the command "python3.11" instead.
|
||||
python3.12 -m venv ~/synapse/env
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade setuptools
|
||||
pip install matrix-synapse
|
||||
```
|
||||
|
||||
##### macOS
|
||||
|
||||
Installing prerequisites on macOS:
|
||||
|
||||
@@ -117,6 +117,20 @@ each upgrade are complete before moving on to the next upgrade, to avoid
|
||||
stacking them up. You can monitor the currently running background updates with
|
||||
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||
|
||||
# Upgrading to v1.111.0
|
||||
|
||||
## New worker endpoints for authenticated client and federation media
|
||||
|
||||
[Media repository workers](./workers.md#synapseappmedia_repository) handling
|
||||
Media APIs can now handle the following endpoint patterns:
|
||||
|
||||
```
|
||||
^/_matrix/client/v1/media/.*$
|
||||
^/_matrix/federation/v1/media/.*$
|
||||
```
|
||||
|
||||
Please update your reverse proxy configuration.
|
||||
|
||||
# Upgrading to v1.106.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
||||
@@ -246,6 +246,7 @@ Example configuration:
|
||||
```yaml
|
||||
presence:
|
||||
enabled: false
|
||||
include_offline_users_on_sync: false
|
||||
```
|
||||
|
||||
`enabled` can also be set to a special value of "untracked" which ignores updates
|
||||
@@ -254,6 +255,10 @@ received via clients and federation, while still accepting updates from the
|
||||
|
||||
*The "untracked" option was added in Synapse 1.96.0.*
|
||||
|
||||
When clients perform an initial or `full_state` sync, presence results for offline users are
|
||||
not included by default. Setting `include_offline_users_on_sync` to `true` will always include
|
||||
offline users in the results. Defaults to false.
|
||||
|
||||
---
|
||||
### `require_auth_for_profile_requests`
|
||||
|
||||
@@ -1759,8 +1764,9 @@ rc_3pid_validation:
|
||||
### `rc_invites`
|
||||
|
||||
This option sets ratelimiting how often invites can be sent in a room or to a
|
||||
specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and
|
||||
`per_user` defaults to `per_second: 0.003`, `burst_count: 5`.
|
||||
specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10`,
|
||||
`per_user` defaults to `per_second: 0.003`, `burst_count: 5`, and `per_issuer`
|
||||
defaults to `per_second: 0.3`, `burst_count: 10`.
|
||||
|
||||
Client requests that invite user(s) when [creating a
|
||||
room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom)
|
||||
@@ -1862,6 +1868,18 @@ federation_rr_transactions_per_room_per_second: 40
|
||||
## Media Store
|
||||
Config options related to Synapse's media store.
|
||||
|
||||
---
|
||||
### `enable_authenticated_media`
|
||||
|
||||
When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy
|
||||
unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false
|
||||
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but
|
||||
this will change to true in a future Synapse release.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_authenticated_media: true
|
||||
```
|
||||
---
|
||||
### `enable_media_repo`
|
||||
|
||||
@@ -1946,6 +1964,24 @@ Example configuration:
|
||||
max_image_pixels: 35M
|
||||
```
|
||||
---
|
||||
### `remote_media_download_burst_count`
|
||||
|
||||
Remote media downloads are ratelimited using a [leaky bucket algorithm](https://en.wikipedia.org/wiki/Leaky_bucket), where a given "bucket" is keyed to the IP address of the requester when requesting remote media downloads. This configuration option sets the size of the bucket against which the size in bytes of downloads are penalized - if the bucket is full, ie a given number of bytes have already been downloaded, further downloads will be denied until the bucket drains. Defaults to 500MiB. See also `remote_media_download_per_second` which determines the rate at which the "bucket" is emptied and thus has available space to authorize new requests.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
remote_media_download_burst_count: 200M
|
||||
```
|
||||
---
|
||||
### `remote_media_download_per_second`
|
||||
|
||||
Works in conjunction with `remote_media_download_burst_count` to ratelimit remote media downloads - this configuration option determines the rate at which the "bucket" (see above) leaks in bytes per second. As requests are made to download remote media, the size of those requests in bytes is added to the bucket, and once the bucket has reached it's capacity, no more requests will be allowed until a number of bytes has "drained" from the bucket. This setting determines the rate at which bytes drain from the bucket, with the practical effect that the larger the number, the faster the bucket leaks, allowing for more bytes downloaded over a shorter period of time. Defaults to 87KiB per second. See also `remote_media_download_burst_count`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
remote_media_download_per_second: 40K
|
||||
```
|
||||
---
|
||||
### `prevent_media_downloads_from`
|
||||
|
||||
A list of domains to never download media from. Media from these
|
||||
@@ -1957,9 +1993,10 @@ This will not prevent the listed domains from accessing media themselves.
|
||||
It simply prevents users on this server from downloading media originating
|
||||
from the listed servers.
|
||||
|
||||
This will have no effect on media originating from the local server.
|
||||
This only affects media downloaded from other Matrix servers, to
|
||||
block domains from URL previews see [`url_preview_url_blacklist`](#url_preview_url_blacklist).
|
||||
This will have no effect on media originating from the local server. This only
|
||||
affects media downloaded from other Matrix servers, to control URL previews see
|
||||
[`url_preview_ip_range_blacklist`](#url_preview_ip_range_blacklist) or
|
||||
[`url_preview_url_blacklist`](#url_preview_url_blacklist).
|
||||
|
||||
Defaults to an empty list (nothing blocked).
|
||||
|
||||
@@ -2111,12 +2148,14 @@ url_preview_ip_range_whitelist:
|
||||
---
|
||||
### `url_preview_url_blacklist`
|
||||
|
||||
Optional list of URL matches that the URL preview spider is
|
||||
denied from accessing. You should use `url_preview_ip_range_blacklist`
|
||||
in preference to this, otherwise someone could define a public DNS
|
||||
entry that points to a private IP address and circumvent the blacklist.
|
||||
This is more useful if you know there is an entire shape of URL that
|
||||
you know that will never want synapse to try to spider.
|
||||
Optional list of URL matches that the URL preview spider is denied from
|
||||
accessing. This is a usability feature, not a security one. You should use
|
||||
`url_preview_ip_range_blacklist` in preference to this, otherwise someone could
|
||||
define a public DNS entry that points to a private IP address and circumvent
|
||||
the blacklist. Applications that perform redirects or serve different content
|
||||
when detecting that Synapse is accessing them can also bypass the blacklist.
|
||||
This is more useful if you know there is an entire shape of URL that you know
|
||||
that you do not want Synapse to preview.
|
||||
|
||||
Each list entry is a dictionary of url component attributes as returned
|
||||
by urlparse.urlsplit as applied to the absolute form of the URL. See
|
||||
@@ -2347,7 +2386,7 @@ enable_registration_without_verification: true
|
||||
---
|
||||
### `registrations_require_3pid`
|
||||
|
||||
If this is set, users must provide all of the specified types of 3PID when registering an account.
|
||||
If this is set, users must provide all of the specified types of [3PID](https://spec.matrix.org/latest/appendices/#3pid-types) when registering an account.
|
||||
|
||||
Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
|
||||
|
||||
@@ -2372,6 +2411,9 @@ disable_msisdn_registration: true
|
||||
|
||||
Mandate that users are only allowed to associate certain formats of
|
||||
3PIDs with accounts on this server, as specified by the `medium` and `pattern` sub-options.
|
||||
`pattern` is a [Perl-like regular expression](https://docs.python.org/3/library/re.html#module-re).
|
||||
|
||||
More information about 3PIDs, allowed `medium` types and their `address` syntax can be found [in the Matrix spec](https://spec.matrix.org/latest/appendices/#3pid-types).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -2381,7 +2423,7 @@ allowed_local_3pids:
|
||||
- medium: email
|
||||
pattern: '^[^@]+@vector\.im$'
|
||||
- medium: msisdn
|
||||
pattern: '\+44'
|
||||
pattern: '^44\d{10}$'
|
||||
```
|
||||
---
|
||||
### `enable_3pid_lookup`
|
||||
@@ -2700,7 +2742,7 @@ Example configuration:
|
||||
session_lifetime: 24h
|
||||
```
|
||||
---
|
||||
### `refresh_access_token_lifetime`
|
||||
### `refreshable_access_token_lifetime`
|
||||
|
||||
Time that an access token remains valid for, if the session is using refresh tokens.
|
||||
|
||||
@@ -3788,7 +3830,8 @@ This setting defines options related to the user directory.
|
||||
This option has the following sub-options:
|
||||
* `enabled`: Defines whether users can search the user directory. If false then
|
||||
empty responses are returned to all queries. Defaults to true.
|
||||
* `search_all_users`: Defines whether to search all users visible to your HS at the time the search is performed. If set to true, will return all users who share a room with the user from the homeserver.
|
||||
* `search_all_users`: Defines whether to search all users visible to your homeserver at the time the search is performed.
|
||||
If set to true, will return all users known to the homeserver matching the search query.
|
||||
If false, search results will only contain users
|
||||
visible in public rooms and users sharing a room with the requester.
|
||||
Defaults to false.
|
||||
@@ -4111,6 +4154,38 @@ default_power_level_content_override:
|
||||
trusted_private_chat: null
|
||||
public_chat: null
|
||||
```
|
||||
|
||||
The default power levels for each preset are:
|
||||
```yaml
|
||||
"m.room.name": 50
|
||||
"m.room.power_levels": 100
|
||||
"m.room.history_visibility": 100
|
||||
"m.room.canonical_alias": 50
|
||||
"m.room.avatar": 50
|
||||
"m.room.tombstone": 100
|
||||
"m.room.server_acl": 100
|
||||
"m.room.encryption": 100
|
||||
```
|
||||
|
||||
So a complete example where the default power-levels for a preset are maintained
|
||||
but the power level for a new key is set is:
|
||||
```yaml
|
||||
default_power_level_content_override:
|
||||
private_chat:
|
||||
events:
|
||||
"com.example.foo": 0
|
||||
"m.room.name": 50
|
||||
"m.room.power_levels": 100
|
||||
"m.room.history_visibility": 100
|
||||
"m.room.canonical_alias": 50
|
||||
"m.room.avatar": 50
|
||||
"m.room.tombstone": 100
|
||||
"m.room.server_acl": 100
|
||||
"m.room.encryption": 100
|
||||
trusted_private_chat: null
|
||||
public_chat: null
|
||||
```
|
||||
|
||||
---
|
||||
### `forget_rooms_on_leave`
|
||||
|
||||
@@ -4132,7 +4207,7 @@ By default, no room is excluded.
|
||||
Example configuration:
|
||||
```yaml
|
||||
exclude_rooms_from_sync:
|
||||
- !foo:example.com
|
||||
- "!foo:example.com"
|
||||
```
|
||||
|
||||
---
|
||||
@@ -4610,7 +4685,9 @@ This setting has the following sub-options:
|
||||
* `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only
|
||||
for direct messages. Defaults to false.
|
||||
* `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false.
|
||||
* `worker_to_run_on`: Which worker to run this module on. This must match the "worker_name".
|
||||
* `worker_to_run_on`: Which worker to run this module on. This must match
|
||||
the "worker_name". If not set or `null`, invites will be accepted on the
|
||||
main process.
|
||||
|
||||
NOTE: Care should be taken not to enable this setting if the `synapse_auto_accept_invite` module is enabled and installed.
|
||||
The two modules will compete to perform the same task and may result in undesired behaviour. For example, multiple join
|
||||
|
||||
@@ -62,6 +62,6 @@ following documentation:
|
||||
|
||||
## Reporting a security vulnerability
|
||||
|
||||
If you've found a security issue in Synapse or any other Matrix.org Foundation
|
||||
project, please report it to us in accordance with our [Security Disclosure
|
||||
Policy](https://www.matrix.org/security-disclosure-policy/). Thank you!
|
||||
If you've found a security issue in Synapse or any other Element project,
|
||||
please report it to us in accordance with our [Security Disclosure
|
||||
Policy](https://element.io/security/security-disclosure-policy). Thank you!
|
||||
|
||||
@@ -739,6 +739,8 @@ An example for a federation sender instance:
|
||||
Handles the media repository. It can handle all endpoints starting with:
|
||||
|
||||
/_matrix/media/
|
||||
/_matrix/client/v1/media/
|
||||
/_matrix/federation/v1/media/
|
||||
|
||||
... and the following regular expressions matching media-specific administration APIs:
|
||||
|
||||
|
||||
3
mypy.ini
3
mypy.ini
@@ -96,3 +96,6 @@ ignore_missing_imports = True
|
||||
# https://github.com/twisted/treq/pull/366
|
||||
[mypy-treq.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-multipart.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
1884
poetry.lock
generated
1884
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
280
pylint.cfg
280
pylint.cfg
@@ -1,280 +0,0 @@
|
||||
[MASTER]
|
||||
|
||||
# Specify a configuration file.
|
||||
#rcfile=
|
||||
|
||||
# Python code to execute, usually for sys.path manipulation such as
|
||||
# pygtk.require().
|
||||
#init-hook=
|
||||
|
||||
# Profiled execution.
|
||||
profile=no
|
||||
|
||||
# Add files or directories to the blacklist. They should be base names, not
|
||||
# paths.
|
||||
ignore=CVS
|
||||
|
||||
# Pickle collected data for later comparisons.
|
||||
persistent=yes
|
||||
|
||||
# List of plugins (as comma separated values of python modules names) to load,
|
||||
# usually to register additional checkers.
|
||||
load-plugins=
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time. See also the "--disable" option for examples.
|
||||
#enable=
|
||||
|
||||
# Disable the message, report, category or checker with the given id(s). You
|
||||
# can either give multiple identifiers separated by comma (,) or put this
|
||||
# option multiple times (only on the command line, not in the configuration
|
||||
# file where it should appear only once).You can also use "--disable=all" to
|
||||
# disable everything first and then reenable specific checks. For example, if
|
||||
# you want to run only the similarities checker, you can use "--disable=all
|
||||
# --enable=similarities". If you want to run only the classes checker, but have
|
||||
# no Warning level messages displayed, use"--disable=all --enable=classes
|
||||
# --disable=W"
|
||||
disable=missing-docstring
|
||||
|
||||
|
||||
[REPORTS]
|
||||
|
||||
# Set the output format. Available formats are text, parseable, colorized, msvs
|
||||
# (visual studio) and html. You can also give a reporter class, eg
|
||||
# mypackage.mymodule.MyReporterClass.
|
||||
output-format=text
|
||||
|
||||
# Put messages in a separate file for each module / package specified on the
|
||||
# command line instead of printing them on stdout. Reports (if any) will be
|
||||
# written in a file name "pylint_global.[txt|html]".
|
||||
files-output=no
|
||||
|
||||
# Tells whether to display a full report or only the messages
|
||||
reports=yes
|
||||
|
||||
# Python expression which should return a note less than 10 (10 is the highest
|
||||
# note). You have access to the variables errors warning, statement which
|
||||
# respectively contain the number of errors / warnings messages and the total
|
||||
# number of statements analyzed. This is used by the global evaluation report
|
||||
# (RP0004).
|
||||
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
||||
|
||||
# Add a comment according to your evaluation note. This is used by the global
|
||||
# evaluation report (RP0004).
|
||||
comment=no
|
||||
|
||||
# Template used to display messages. This is a python new-style format string
|
||||
# used to format the message information. See doc for all details
|
||||
#msg-template=
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
|
||||
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
||||
ignore-mixin-members=yes
|
||||
|
||||
# List of classes names for which member attributes should not be checked
|
||||
# (useful for classes with attributes dynamically set).
|
||||
ignored-classes=SQLObject
|
||||
|
||||
# When zope mode is activated, add a predefined set of Zope acquired attributes
|
||||
# to generated-members.
|
||||
zope=no
|
||||
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E0201 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=REQUEST,acl_users,aq_parent
|
||||
|
||||
|
||||
[MISCELLANEOUS]
|
||||
|
||||
# List of note tags to take in consideration, separated by a comma.
|
||||
notes=FIXME,XXX,TODO
|
||||
|
||||
|
||||
[SIMILARITIES]
|
||||
|
||||
# Minimum lines number of a similarity.
|
||||
min-similarity-lines=4
|
||||
|
||||
# Ignore comments when computing similarities.
|
||||
ignore-comments=yes
|
||||
|
||||
# Ignore docstrings when computing similarities.
|
||||
ignore-docstrings=yes
|
||||
|
||||
# Ignore imports when computing similarities.
|
||||
ignore-imports=no
|
||||
|
||||
|
||||
[VARIABLES]
|
||||
|
||||
# Tells whether we should check for unused import in __init__ files.
|
||||
init-import=no
|
||||
|
||||
# A regular expression matching the beginning of the name of dummy variables
|
||||
# (i.e. not used).
|
||||
dummy-variables-rgx=_$|dummy
|
||||
|
||||
# List of additional names supposed to be defined in builtins. Remember that
|
||||
# you should avoid to define new builtins when possible.
|
||||
additional-builtins=
|
||||
|
||||
|
||||
[BASIC]
|
||||
|
||||
# Required attributes for module, separated by a comma
|
||||
required-attributes=
|
||||
|
||||
# List of builtins function names that should not be used, separated by a comma
|
||||
bad-functions=map,filter,apply,input
|
||||
|
||||
# Regular expression which should only match correct module names
|
||||
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||
|
||||
# Regular expression which should only match correct module level names
|
||||
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
||||
|
||||
# Regular expression which should only match correct class names
|
||||
class-rgx=[A-Z_][a-zA-Z0-9]+$
|
||||
|
||||
# Regular expression which should only match correct function names
|
||||
function-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct method names
|
||||
method-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct instance attribute names
|
||||
attr-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct argument names
|
||||
argument-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct variable names
|
||||
variable-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct attribute names in class
|
||||
# bodies
|
||||
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
|
||||
|
||||
# Regular expression which should only match correct list comprehension /
|
||||
# generator expression variable names
|
||||
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
|
||||
|
||||
# Good variable names which should always be accepted, separated by a comma
|
||||
good-names=i,j,k,ex,Run,_
|
||||
|
||||
# Bad variable names which should always be refused, separated by a comma
|
||||
bad-names=foo,bar,baz,toto,tutu,tata
|
||||
|
||||
# Regular expression which should only match function or class names that do
|
||||
# not require a docstring.
|
||||
no-docstring-rgx=__.*__
|
||||
|
||||
# Minimum line length for functions/classes that require docstrings, shorter
|
||||
# ones are exempt.
|
||||
docstring-min-length=-1
|
||||
|
||||
|
||||
[FORMAT]
|
||||
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=80
|
||||
|
||||
# Regexp for a line that is allowed to be longer than the limit.
|
||||
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
|
||||
|
||||
# Allow the body of an if to be on the same line as the test if there is no
|
||||
# else.
|
||||
single-line-if-stmt=no
|
||||
|
||||
# List of optional constructs for which whitespace checking is disabled
|
||||
no-space-check=trailing-comma,dict-separator
|
||||
|
||||
# Maximum number of lines in a module
|
||||
max-module-lines=1000
|
||||
|
||||
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||
# tab).
|
||||
indent-string=' '
|
||||
|
||||
|
||||
[DESIGN]
|
||||
|
||||
# Maximum number of arguments for function / method
|
||||
max-args=5
|
||||
|
||||
# Argument names that match this expression will be ignored. Default to name
|
||||
# with leading underscore
|
||||
ignored-argument-names=_.*
|
||||
|
||||
# Maximum number of locals for function / method body
|
||||
max-locals=15
|
||||
|
||||
# Maximum number of return / yield for function / method body
|
||||
max-returns=6
|
||||
|
||||
# Maximum number of branch for function / method body
|
||||
max-branches=12
|
||||
|
||||
# Maximum number of statements in function / method body
|
||||
max-statements=50
|
||||
|
||||
# Maximum number of parents for a class (see R0901).
|
||||
max-parents=7
|
||||
|
||||
# Maximum number of attributes for a class (see R0902).
|
||||
max-attributes=7
|
||||
|
||||
# Minimum number of public methods for a class (see R0903).
|
||||
min-public-methods=2
|
||||
|
||||
# Maximum number of public methods for a class (see R0904).
|
||||
max-public-methods=20
|
||||
|
||||
|
||||
[IMPORTS]
|
||||
|
||||
# Deprecated modules which should not be used, separated by a comma
|
||||
deprecated-modules=regsub,TERMIOS,Bastion,rexec
|
||||
|
||||
# Create a graph of every (i.e. internal and external) dependencies in the
|
||||
# given file (report RP0402 must not be disabled)
|
||||
import-graph=
|
||||
|
||||
# Create a graph of external dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
ext-import-graph=
|
||||
|
||||
# Create a graph of internal dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
int-import-graph=
|
||||
|
||||
|
||||
[CLASSES]
|
||||
|
||||
# List of interface methods to ignore, separated by a comma. This is used for
|
||||
# instance to not check methods defines in Zope's Interface base class.
|
||||
ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
|
||||
|
||||
# List of method names used to declare (i.e. assign) instance attributes.
|
||||
defining-attr-methods=__init__,__new__,setUp
|
||||
|
||||
# List of valid names for the first argument in a class method.
|
||||
valid-classmethod-first-arg=cls
|
||||
|
||||
# List of valid names for the first argument in a metaclass class method.
|
||||
valid-metaclass-classmethod-first-arg=mcs
|
||||
|
||||
|
||||
[EXCEPTIONS]
|
||||
|
||||
# Exceptions that will emit a warning when being caught. Defaults to
|
||||
# "Exception"
|
||||
overgeneral-exceptions=Exception
|
||||
@@ -34,15 +34,11 @@
|
||||
name = "Internal Changes"
|
||||
showcontent = true
|
||||
|
||||
[tool.black]
|
||||
target-version = ['py38', 'py39', 'py310', 'py311']
|
||||
# black ignores everything in .gitignore by default, see
|
||||
# https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#gitignore
|
||||
# Use `extend-exclude` if you want to exclude something in addition to this.
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
target-version = "py38"
|
||||
|
||||
[tool.ruff.lint]
|
||||
# See https://beta.ruff.rs/docs/rules/#error-e
|
||||
# for error codes. The ones we ignore are:
|
||||
# E501: Line too long (black enforces this for us)
|
||||
@@ -62,6 +58,8 @@ select = [
|
||||
"W",
|
||||
# pyflakes
|
||||
"F",
|
||||
# isort
|
||||
"I001",
|
||||
# flake8-bugbear
|
||||
"B0",
|
||||
# flake8-comprehensions
|
||||
@@ -78,17 +76,20 @@ select = [
|
||||
"EXE",
|
||||
]
|
||||
|
||||
[tool.isort]
|
||||
line_length = 88
|
||||
sections = ["FUTURE", "STDLIB", "THIRDPARTY", "TWISTED", "FIRSTPARTY", "TESTS", "LOCALFOLDER"]
|
||||
default_section = "THIRDPARTY"
|
||||
known_first_party = ["synapse"]
|
||||
known_tests = ["tests"]
|
||||
known_twisted = ["twisted", "OpenSSL"]
|
||||
multi_line_output = 3
|
||||
include_trailing_comma = true
|
||||
combine_as_imports = true
|
||||
skip_gitignore = true
|
||||
[tool.ruff.lint.isort]
|
||||
combine-as-imports = true
|
||||
section-order = ["future", "standard-library", "third-party", "twisted", "first-party", "testing", "local-folder"]
|
||||
known-first-party = ["synapse"]
|
||||
|
||||
[tool.ruff.lint.isort.sections]
|
||||
twisted = ["twisted", "OpenSSL"]
|
||||
testing = ["tests"]
|
||||
|
||||
[tool.ruff.format]
|
||||
quote-style = "double"
|
||||
indent-style = "space"
|
||||
skip-magic-trailing-comma = false
|
||||
line-ending = "auto"
|
||||
|
||||
[tool.maturin]
|
||||
manifest-path = "rust/Cargo.toml"
|
||||
@@ -96,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.108.0rc1"
|
||||
version = "1.114.0rc2"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
@@ -200,10 +201,8 @@ netaddr = ">=0.7.18"
|
||||
# add a lower bound to the Jinja2 dependency.
|
||||
Jinja2 = ">=3.0"
|
||||
bleach = ">=1.4.3"
|
||||
# We use `ParamSpec` and `Concatenate`, which were added in `typing-extensions` 3.10.0.0.
|
||||
# Additionally we need https://github.com/python/typing/pull/817 to allow types to be
|
||||
# generic over ParamSpecs.
|
||||
typing-extensions = ">=3.10.0.1"
|
||||
# We use `assert_never`, which were added in `typing-extensions` 4.1.
|
||||
typing-extensions = ">=4.1"
|
||||
# We enforce that we have a `cryptography` version that bundles an `openssl`
|
||||
# with the latest security patches.
|
||||
cryptography = ">=3.4.7"
|
||||
@@ -226,6 +225,8 @@ pydantic = ">=1.7.4, <3"
|
||||
# needed.
|
||||
setuptools_rust = ">=1.3"
|
||||
|
||||
# This is used for parsing multipart responses
|
||||
python-multipart = ">=0.0.9"
|
||||
|
||||
# Optional Dependencies
|
||||
# ---------------------
|
||||
@@ -319,9 +320,7 @@ all = [
|
||||
# failing on new releases. Keeping lower bounds loose here means that dependabot
|
||||
# can bump versions without having to update the content-hash in the lockfile.
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.7.0"
|
||||
ruff = "0.3.7"
|
||||
ruff = "0.6.2"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
|
||||
@@ -204,6 +204,8 @@ pub struct EventInternalMetadata {
|
||||
/// The stream ordering of this event. None, until it has been persisted.
|
||||
#[pyo3(get, set)]
|
||||
stream_ordering: Option<NonZeroI64>,
|
||||
#[pyo3(get, set)]
|
||||
instance_name: Option<String>,
|
||||
|
||||
/// whether this event is an outlier (ie, whether we have the state at that
|
||||
/// point in the DAG)
|
||||
@@ -232,6 +234,7 @@ impl EventInternalMetadata {
|
||||
Ok(EventInternalMetadata {
|
||||
data,
|
||||
stream_ordering: None,
|
||||
instance_name: None,
|
||||
outlier: false,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -223,7 +223,6 @@ test_packages=(
|
||||
./tests/msc3930
|
||||
./tests/msc3902
|
||||
./tests/msc3967
|
||||
./tests/msc4115
|
||||
)
|
||||
|
||||
# Enable dirty runs, so tests will reuse the same container where possible.
|
||||
|
||||
@@ -43,7 +43,7 @@ import argparse
|
||||
import base64
|
||||
import json
|
||||
import sys
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
from typing import Any, Dict, Mapping, Optional, Tuple, Union
|
||||
from urllib import parse as urlparse
|
||||
|
||||
import requests
|
||||
@@ -75,7 +75,7 @@ def encode_canonical_json(value: object) -> bytes:
|
||||
value,
|
||||
# Encode code-points outside of ASCII as UTF-8 rather than \u escapes
|
||||
ensure_ascii=False,
|
||||
# Remove unecessary white space.
|
||||
# Remove unnecessary white space.
|
||||
separators=(",", ":"),
|
||||
# Sort the keys of dictionaries.
|
||||
sort_keys=True,
|
||||
@@ -298,12 +298,23 @@ class MatrixConnectionAdapter(HTTPAdapter):
|
||||
|
||||
return super().send(request, *args, **kwargs)
|
||||
|
||||
def get_connection(
|
||||
self, url: str, proxies: Optional[Dict[str, str]] = None
|
||||
def get_connection_with_tls_context(
|
||||
self,
|
||||
request: PreparedRequest,
|
||||
verify: Optional[Union[bool, str]],
|
||||
proxies: Optional[Mapping[str, str]] = None,
|
||||
cert: Optional[Union[Tuple[str, str], str]] = None,
|
||||
) -> HTTPConnectionPool:
|
||||
# overrides the get_connection() method in the base class
|
||||
parsed = urlparse.urlsplit(url)
|
||||
(host, port, ssl_server_name) = self._lookup(parsed.netloc)
|
||||
# overrides the get_connection_with_tls_context() method in the base class
|
||||
parsed = urlparse.urlsplit(request.url)
|
||||
|
||||
# Extract the server name from the request URL, and ensure it's a str.
|
||||
hostname = parsed.netloc
|
||||
if isinstance(hostname, bytes):
|
||||
hostname = hostname.decode("utf-8")
|
||||
assert isinstance(hostname, str)
|
||||
|
||||
(host, port, ssl_server_name) = self._lookup(hostname)
|
||||
print(
|
||||
f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr
|
||||
)
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Runs linting scripts over the local Synapse checkout
|
||||
# black - opinionated code formatter
|
||||
# ruff - lints and finds mistakes
|
||||
# mypy - typechecks python code
|
||||
# cargo clippy - lints rust code
|
||||
|
||||
set -e
|
||||
|
||||
@@ -101,18 +102,12 @@ echo
|
||||
# Print out the commands being run
|
||||
set -x
|
||||
|
||||
# Ensure the sort order of imports.
|
||||
isort "${files[@]}"
|
||||
|
||||
# Ensure Python code conforms to an opinionated style.
|
||||
python3 -m black "${files[@]}"
|
||||
|
||||
# Ensure the sample configuration file conforms to style checks.
|
||||
./scripts-dev/config-lint.sh
|
||||
|
||||
# Catch any common programming mistakes in Python code.
|
||||
# --quiet suppresses the update check.
|
||||
ruff --quiet --fix "${files[@]}"
|
||||
ruff check --quiet --fix "${files[@]}"
|
||||
|
||||
# Catch any common programming mistakes in Rust code.
|
||||
#
|
||||
|
||||
@@ -38,6 +38,7 @@ from mypy.types import (
|
||||
NoneType,
|
||||
TupleType,
|
||||
TypeAliasType,
|
||||
TypeVarType,
|
||||
UninhabitedType,
|
||||
UnionType,
|
||||
)
|
||||
@@ -233,6 +234,7 @@ IMMUTABLE_CUSTOM_TYPES = {
|
||||
"synapse.synapse_rust.push.FilteredPushRules",
|
||||
# This is technically not immutable, but close enough.
|
||||
"signedjson.types.VerifyKey",
|
||||
"synapse.types.StrCollection",
|
||||
}
|
||||
|
||||
# Immutable containers only if the values are also immutable.
|
||||
@@ -298,7 +300,7 @@ def is_cacheable(
|
||||
|
||||
elif rt.type.fullname in MUTABLE_CONTAINER_TYPES:
|
||||
# Mutable containers are mutable regardless of their underlying type.
|
||||
return False, None
|
||||
return False, f"container {rt.type.fullname} is mutable"
|
||||
|
||||
elif "attrs" in rt.type.metadata:
|
||||
# attrs classes are only cachable iff it is frozen (immutable itself)
|
||||
@@ -318,6 +320,9 @@ def is_cacheable(
|
||||
else:
|
||||
return False, "non-frozen attrs class"
|
||||
|
||||
elif rt.type.is_enum:
|
||||
# We assume Enum values are immutable
|
||||
return True, None
|
||||
else:
|
||||
# Ensure we fail for unknown types, these generally means that the
|
||||
# above code is not complete.
|
||||
@@ -326,6 +331,18 @@ def is_cacheable(
|
||||
f"Don't know how to handle {rt.type.fullname} return type instance",
|
||||
)
|
||||
|
||||
elif isinstance(rt, TypeVarType):
|
||||
# We consider TypeVars immutable if they are bound to a set of immutable
|
||||
# types.
|
||||
if rt.values:
|
||||
for value in rt.values:
|
||||
ok, note = is_cacheable(value, signature, verbose)
|
||||
if not ok:
|
||||
return False, f"TypeVar bound not cacheable {value}"
|
||||
return True, None
|
||||
|
||||
return False, "TypeVar is unbound"
|
||||
|
||||
elif isinstance(rt, NoneType):
|
||||
# None is cachable.
|
||||
return True, None
|
||||
|
||||
@@ -70,6 +70,7 @@ def cli() -> None:
|
||||
pip install -e .[dev]
|
||||
|
||||
- A checkout of the sytest repository at ../sytest
|
||||
- A checkout of the complement repository at ../complement
|
||||
|
||||
Then to use:
|
||||
|
||||
@@ -112,10 +113,12 @@ def _prepare() -> None:
|
||||
# Make sure we're in a git repo.
|
||||
synapse_repo = get_repo_and_check_clean_checkout()
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
complement_repo = get_repo_and_check_clean_checkout("../complement", "complement")
|
||||
|
||||
click.secho("Updating Synapse and Sytest git repos...")
|
||||
synapse_repo.remote().fetch()
|
||||
sytest_repo.remote().fetch()
|
||||
complement_repo.remote().fetch()
|
||||
|
||||
# Get the current version and AST from root Synapse module.
|
||||
current_version = get_package_version()
|
||||
@@ -208,7 +211,15 @@ def _prepare() -> None:
|
||||
"Which branch should the release be based on?", default=default
|
||||
)
|
||||
|
||||
for repo_name, repo in {"synapse": synapse_repo, "sytest": sytest_repo}.items():
|
||||
for repo_name, repo in {
|
||||
"synapse": synapse_repo,
|
||||
"sytest": sytest_repo,
|
||||
"complement": complement_repo,
|
||||
}.items():
|
||||
# Special case for Complement: `develop` maps to `main`
|
||||
if repo_name == "complement" and branch_name == "develop":
|
||||
branch_name = "main"
|
||||
|
||||
base_branch = find_ref(repo, branch_name)
|
||||
if not base_branch:
|
||||
print(f"Could not find base branch {branch_name} for {repo_name}!")
|
||||
@@ -231,6 +242,12 @@ def _prepare() -> None:
|
||||
if click.confirm("Push new SyTest branch?", default=True):
|
||||
sytest_repo.git.push("-u", sytest_repo.remote().name, release_branch_name)
|
||||
|
||||
# Same for Complement
|
||||
if click.confirm("Push new Complement branch?", default=True):
|
||||
complement_repo.git.push(
|
||||
"-u", complement_repo.remote().name, release_branch_name
|
||||
)
|
||||
|
||||
# Switch to the release branch and ensure it's up to date.
|
||||
synapse_repo.git.checkout(release_branch_name)
|
||||
update_branch(synapse_repo)
|
||||
@@ -307,6 +324,11 @@ def tag(gh_token: Optional[str]) -> None:
|
||||
def _tag(gh_token: Optional[str]) -> None:
|
||||
"""Tags the release and generates a draft GitHub release"""
|
||||
|
||||
if gh_token:
|
||||
# Test that the GH Token is valid before continuing.
|
||||
gh = Github(gh_token)
|
||||
gh.get_user()
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
repo = get_repo_and_check_clean_checkout()
|
||||
|
||||
@@ -401,6 +423,11 @@ def publish(gh_token: str) -> None:
|
||||
def _publish(gh_token: str) -> None:
|
||||
"""Publish release on GitHub."""
|
||||
|
||||
if gh_token:
|
||||
# Test that the GH Token is valid before continuing.
|
||||
gh = Github(gh_token)
|
||||
gh.get_user()
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
get_repo_and_check_clean_checkout()
|
||||
|
||||
@@ -443,6 +470,11 @@ def upload(gh_token: Optional[str]) -> None:
|
||||
def _upload(gh_token: Optional[str]) -> None:
|
||||
"""Upload release to pypi."""
|
||||
|
||||
if gh_token:
|
||||
# Test that the GH Token is valid before continuing.
|
||||
gh = Github(gh_token)
|
||||
gh.get_user()
|
||||
|
||||
current_version = get_package_version()
|
||||
tag_name = f"v{current_version}"
|
||||
|
||||
@@ -538,6 +570,11 @@ def wait_for_actions(gh_token: Optional[str]) -> None:
|
||||
|
||||
|
||||
def _wait_for_actions(gh_token: Optional[str]) -> None:
|
||||
if gh_token:
|
||||
# Test that the GH Token is valid before continuing.
|
||||
gh = Github(gh_token)
|
||||
gh.get_user()
|
||||
|
||||
# Find out the version and tag name.
|
||||
current_version = get_package_version()
|
||||
tag_name = f"v{current_version}"
|
||||
@@ -630,6 +667,9 @@ def _merge_back() -> None:
|
||||
else:
|
||||
# Full release
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
complement_repo = get_repo_and_check_clean_checkout(
|
||||
"../complement", "complement"
|
||||
)
|
||||
|
||||
if click.confirm(f"Merge {branch_name} → master?", default=True):
|
||||
_merge_into(synapse_repo, branch_name, "master")
|
||||
@@ -643,6 +683,9 @@ def _merge_back() -> None:
|
||||
if click.confirm("On SyTest, merge master → develop?", default=True):
|
||||
_merge_into(sytest_repo, "master", "develop")
|
||||
|
||||
if click.confirm(f"On Complement, merge {branch_name} → main?", default=True):
|
||||
_merge_into(complement_repo, branch_name, "main")
|
||||
|
||||
|
||||
@cli.command()
|
||||
def announce() -> None:
|
||||
@@ -688,6 +731,11 @@ Ask the designated people to do the blog and tweets."""
|
||||
@cli.command()
|
||||
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
|
||||
def full(gh_token: str) -> None:
|
||||
if gh_token:
|
||||
# Test that the GH Token is valid before continuing.
|
||||
gh = Github(gh_token)
|
||||
gh.get_user()
|
||||
|
||||
click.echo("1. If this is a security release, read the security wiki page.")
|
||||
click.echo("2. Check for any release blockers before proceeding.")
|
||||
click.echo(" https://github.com/element-hq/synapse/labels/X-Release-Blocker")
|
||||
|
||||
@@ -44,7 +44,7 @@ logger = logging.getLogger("generate_workers_map")
|
||||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None:
|
||||
super().__init__(config.server.server_name, config=config)
|
||||
|
||||
@@ -56,7 +56,9 @@ def main() -> None:
|
||||
password_pepper = password_config.get("pepper", password_pepper)
|
||||
password = args.password
|
||||
|
||||
if not password:
|
||||
if not password and not sys.stdin.isatty():
|
||||
password = sys.stdin.readline().strip()
|
||||
elif not password:
|
||||
password = prompt_for_pass()
|
||||
|
||||
# On Python 2, make sure we decode it to Unicode before we normalise it
|
||||
|
||||
@@ -52,6 +52,7 @@ def request_registration(
|
||||
user_type: Optional[str] = None,
|
||||
_print: Callable[[str], None] = print,
|
||||
exit: Callable[[int], None] = sys.exit,
|
||||
exists_ok: bool = False,
|
||||
) -> None:
|
||||
url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),)
|
||||
|
||||
@@ -97,6 +98,10 @@ def request_registration(
|
||||
r = requests.post(url, json=data)
|
||||
|
||||
if r.status_code != 200:
|
||||
response = r.json()
|
||||
if exists_ok and response["errcode"] == "M_USER_IN_USE":
|
||||
_print("User already exists. Skipping.")
|
||||
return
|
||||
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||
if 400 <= r.status_code < 500:
|
||||
try:
|
||||
@@ -115,6 +120,7 @@ def register_new_user(
|
||||
shared_secret: str,
|
||||
admin: Optional[bool],
|
||||
user_type: Optional[str],
|
||||
exists_ok: bool = False,
|
||||
) -> None:
|
||||
if not user:
|
||||
try:
|
||||
@@ -154,7 +160,13 @@ def register_new_user(
|
||||
admin = False
|
||||
|
||||
request_registration(
|
||||
user, password, server_location, shared_secret, bool(admin), user_type
|
||||
user,
|
||||
password,
|
||||
server_location,
|
||||
shared_secret,
|
||||
bool(admin),
|
||||
user_type,
|
||||
exists_ok=exists_ok,
|
||||
)
|
||||
|
||||
|
||||
@@ -174,10 +186,22 @@ def main() -> None:
|
||||
help="Local part of the new user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--exists-ok",
|
||||
action="store_true",
|
||||
help="Do not fail if user already exists.",
|
||||
)
|
||||
password_group = parser.add_mutually_exclusive_group()
|
||||
password_group.add_argument(
|
||||
"-p",
|
||||
"--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
help="New password for user. Will prompt for a password if "
|
||||
"this flag and `--password-file` are both omitted.",
|
||||
)
|
||||
password_group.add_argument(
|
||||
"--password-file",
|
||||
default=None,
|
||||
help="File containing the new password for user. If set, will override `--password`.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
@@ -185,6 +209,7 @@ def main() -> None:
|
||||
default=None,
|
||||
help="User type as specified in synapse.api.constants.UserTypes",
|
||||
)
|
||||
|
||||
admin_group = parser.add_mutually_exclusive_group()
|
||||
admin_group.add_argument(
|
||||
"-a",
|
||||
@@ -247,6 +272,11 @@ def main() -> None:
|
||||
print(_NO_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if args.password_file:
|
||||
password = _read_file(args.password_file, "password-file").strip()
|
||||
else:
|
||||
password = args.password
|
||||
|
||||
if args.server_url:
|
||||
server_url = args.server_url
|
||||
elif config is not None:
|
||||
@@ -270,7 +300,13 @@ def main() -> None:
|
||||
admin = args.admin
|
||||
|
||||
register_new_user(
|
||||
args.user, args.password, server_url, secret, admin, args.user_type
|
||||
args.user,
|
||||
password,
|
||||
server_url,
|
||||
secret,
|
||||
admin,
|
||||
args.user_type,
|
||||
exists_ok=args.exists_ok,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -119,18 +119,19 @@ BOOLEAN_COLUMNS = {
|
||||
"e2e_room_keys": ["is_verified"],
|
||||
"event_edges": ["is_state"],
|
||||
"events": ["processed", "outlier", "contains_url"],
|
||||
"local_media_repository": ["safe_from_quarantine"],
|
||||
"local_media_repository": ["safe_from_quarantine", "authenticated"],
|
||||
"per_user_experimental_features": ["enabled"],
|
||||
"presence_list": ["accepted"],
|
||||
"presence_stream": ["currently_active"],
|
||||
"public_room_list_stream": ["visibility"],
|
||||
"pushers": ["enabled"],
|
||||
"redactions": ["have_censored"],
|
||||
"remote_media_cache": ["authenticated"],
|
||||
"room_stats_state": ["is_federatable"],
|
||||
"rooms": ["is_public", "has_auth_chain_index"],
|
||||
"users": ["shadow_banned", "approved", "locked", "suspended"],
|
||||
"un_partial_stated_event_stream": ["rejection_status_changed"],
|
||||
"users_who_share_rooms": ["share_private"],
|
||||
"per_user_experimental_features": ["enabled"],
|
||||
}
|
||||
|
||||
|
||||
@@ -777,23 +778,75 @@ class Porter:
|
||||
await self._setup_events_stream_seqs()
|
||||
await self._setup_sequence(
|
||||
"un_partial_stated_event_stream_sequence",
|
||||
("un_partial_stated_event_stream",),
|
||||
[("un_partial_stated_event_stream", "stream_id")],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"device_inbox_sequence", ("device_inbox", "device_federation_outbox")
|
||||
"device_inbox_sequence",
|
||||
[
|
||||
("device_inbox", "stream_id"),
|
||||
("device_federation_outbox", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"account_data_sequence",
|
||||
("room_account_data", "room_tags_revisions", "account_data"),
|
||||
[
|
||||
("room_account_data", "stream_id"),
|
||||
("room_tags_revisions", "stream_id"),
|
||||
("account_data", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"receipts_sequence",
|
||||
[
|
||||
("receipts_linearized", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"presence_stream_sequence",
|
||||
[
|
||||
("presence_stream", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence("receipts_sequence", ("receipts_linearized",))
|
||||
await self._setup_sequence("presence_stream_sequence", ("presence_stream",))
|
||||
await self._setup_auth_chain_sequence()
|
||||
await self._setup_sequence(
|
||||
"application_services_txn_id_seq",
|
||||
("application_services_txns",),
|
||||
[
|
||||
(
|
||||
"application_services_txns",
|
||||
"txn_id",
|
||||
)
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"device_lists_sequence",
|
||||
[
|
||||
("device_lists_stream", "stream_id"),
|
||||
("user_signature_stream", "stream_id"),
|
||||
("device_lists_outbound_pokes", "stream_id"),
|
||||
("device_lists_changes_in_room", "stream_id"),
|
||||
("device_lists_remote_pending", "stream_id"),
|
||||
("device_lists_changes_converted_stream_position", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"e2e_cross_signing_keys_sequence",
|
||||
[
|
||||
("e2e_cross_signing_keys", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"push_rules_stream_sequence",
|
||||
[
|
||||
("push_rules_stream", "stream_id"),
|
||||
],
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"pushers_sequence",
|
||||
[
|
||||
("pushers", "id"),
|
||||
("deleted_pushers", "stream_id"),
|
||||
],
|
||||
)
|
||||
|
||||
# Step 3. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
@@ -1101,12 +1154,11 @@ class Porter:
|
||||
async def _setup_sequence(
|
||||
self,
|
||||
sequence_name: str,
|
||||
stream_id_tables: Iterable[str],
|
||||
column_name: str = "stream_id",
|
||||
stream_id_tables: Iterable[Tuple[str, str]],
|
||||
) -> None:
|
||||
"""Set a sequence to the correct value."""
|
||||
current_stream_ids = []
|
||||
for stream_id_table in stream_id_tables:
|
||||
for stream_id_table, column_name in stream_id_tables:
|
||||
max_stream_id = cast(
|
||||
int,
|
||||
await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
|
||||
@@ -41,7 +41,7 @@ logger = logging.getLogger("update_database")
|
||||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore [assignment]
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
super().__init__(
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
from typing import Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
|
||||
from typing_extensions import Protocol
|
||||
|
||||
@@ -28,6 +28,9 @@ from synapse.appservice import ApplicationService
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.types import Requester
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
|
||||
# guests always get this device id.
|
||||
GUEST_DEVICE_ID = "guest_device"
|
||||
|
||||
@@ -87,6 +90,19 @@ class Auth(Protocol):
|
||||
AuthError if access is denied for the user in the access token
|
||||
"""
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
"""Like `get_user_by_req`, except also checks if the user has access to
|
||||
the experimental feature. If they don't returns a 404 unrecognized
|
||||
request.
|
||||
"""
|
||||
|
||||
async def validate_appservice_can_control_user_id(
|
||||
self, app_service: ApplicationService, user_id: str
|
||||
) -> None:
|
||||
|
||||
@@ -28,6 +28,7 @@ from synapse.api.errors import (
|
||||
Codes,
|
||||
InvalidClientTokenError,
|
||||
MissingClientTokenError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
|
||||
@@ -38,8 +39,10 @@ from . import GUEST_DEVICE_ID
|
||||
from .base import BaseAuth
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -106,6 +109,32 @@ class InternalAuth(BaseAuth):
|
||||
parent_span.set_tag("appservice_id", requester.app_service.id)
|
||||
return requester
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
try:
|
||||
requester = await self.get_user_by_req(
|
||||
request,
|
||||
allow_guest=allow_guest,
|
||||
allow_expired=allow_expired,
|
||||
allow_locked=allow_locked,
|
||||
)
|
||||
if await self.store.is_feature_enabled(requester.user.to_string(), feature):
|
||||
return requester
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
except (AuthError, InvalidClientTokenError):
|
||||
if feature.is_globally_enabled(self.hs.config):
|
||||
# If its globally enabled then return the auth error
|
||||
raise
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
|
||||
@cancellable
|
||||
async def _wrapped_get_user_by_req(
|
||||
self,
|
||||
|
||||
@@ -40,6 +40,7 @@ from synapse.api.errors import (
|
||||
OAuthInsufficientScopeError,
|
||||
StoreError,
|
||||
SynapseError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
@@ -48,6 +49,7 @@ from synapse.util import json_decoder
|
||||
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -119,7 +121,9 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
self._hostname = hs.hostname
|
||||
self._admin_token = self._config.admin_token
|
||||
|
||||
self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata)
|
||||
self._issuer_metadata = RetryOnExceptionCachedCall[OpenIDProviderMetadata](
|
||||
self._load_metadata
|
||||
)
|
||||
|
||||
if isinstance(auth_method, PrivateKeyJWTWithKid):
|
||||
# Use the JWK as the client secret when using the private_key_jwt method
|
||||
@@ -143,6 +147,45 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
# metadata.validate_introspection_endpoint()
|
||||
return metadata
|
||||
|
||||
async def issuer(self) -> str:
|
||||
"""
|
||||
Get the configured issuer
|
||||
|
||||
This will use the issuer value set in the metadata,
|
||||
falling back to the one set in the config if not set in the metadata
|
||||
"""
|
||||
metadata = await self._issuer_metadata.get()
|
||||
return metadata.issuer or self._config.issuer
|
||||
|
||||
async def account_management_url(self) -> Optional[str]:
|
||||
"""
|
||||
Get the configured account management URL
|
||||
|
||||
This will discover the account management URL from the issuer if it's not set in the config
|
||||
"""
|
||||
if self._config.account_management_url is not None:
|
||||
return self._config.account_management_url
|
||||
|
||||
try:
|
||||
metadata = await self._issuer_metadata.get()
|
||||
return metadata.get("account_management_uri", None)
|
||||
# We don't want to raise here if we can't load the metadata
|
||||
except Exception:
|
||||
logger.warning("Failed to load metadata:", exc_info=True)
|
||||
return None
|
||||
|
||||
async def _introspection_endpoint(self) -> str:
|
||||
"""
|
||||
Returns the introspection endpoint of the issuer
|
||||
|
||||
It uses the config option if set, otherwise it will use OIDC discovery to get it
|
||||
"""
|
||||
if self._config.introspection_endpoint is not None:
|
||||
return self._config.introspection_endpoint
|
||||
|
||||
metadata = await self._issuer_metadata.get()
|
||||
return metadata.get("introspection_endpoint")
|
||||
|
||||
async def _introspect_token(self, token: str) -> IntrospectionToken:
|
||||
"""
|
||||
Send a token to the introspection endpoint and returns the introspection response
|
||||
@@ -159,8 +202,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
Returns:
|
||||
The introspection response
|
||||
"""
|
||||
metadata = await self._issuer_metadata.get()
|
||||
introspection_endpoint = metadata.get("introspection_endpoint")
|
||||
introspection_endpoint = await self._introspection_endpoint()
|
||||
raw_headers: Dict[str, str] = {
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
"User-Agent": str(self._http_client.user_agent, "utf-8"),
|
||||
@@ -245,6 +287,32 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
|
||||
return requester
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
try:
|
||||
requester = await self.get_user_by_req(
|
||||
request,
|
||||
allow_guest=allow_guest,
|
||||
allow_expired=allow_expired,
|
||||
allow_locked=allow_locked,
|
||||
)
|
||||
if await self.store.is_feature_enabled(requester.user.to_string(), feature):
|
||||
return requester
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
except (AuthError, InvalidClientTokenError):
|
||||
if feature.is_globally_enabled(self.hs.config):
|
||||
# If its globally enabled then return the auth error
|
||||
raise
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
|
||||
async def get_user_by_access_token(
|
||||
self,
|
||||
token: str,
|
||||
|
||||
@@ -50,7 +50,7 @@ class Membership:
|
||||
KNOCK: Final = "knock"
|
||||
LEAVE: Final = "leave"
|
||||
BAN: Final = "ban"
|
||||
LIST: Final = (INVITE, JOIN, KNOCK, LEAVE, BAN)
|
||||
LIST: Final = frozenset((INVITE, JOIN, KNOCK, LEAVE, BAN))
|
||||
|
||||
|
||||
class PresenceState:
|
||||
@@ -128,9 +128,13 @@ class EventTypes:
|
||||
SpaceParent: Final = "m.space.parent"
|
||||
|
||||
Reaction: Final = "m.reaction"
|
||||
Sticker: Final = "m.sticker"
|
||||
LiveLocationShareStart: Final = "m.beacon_info"
|
||||
|
||||
CallInvite: Final = "m.call.invite"
|
||||
|
||||
PollStart: Final = "m.poll.start"
|
||||
|
||||
|
||||
class ToDeviceEventTypes:
|
||||
RoomKeyRequest: Final = "m.room_key_request"
|
||||
@@ -221,6 +225,11 @@ class EventContentFields:
|
||||
# This is deprecated in MSC2175.
|
||||
ROOM_CREATOR: Final = "creator"
|
||||
|
||||
# The version of the room for `m.room.create` events.
|
||||
ROOM_VERSION: Final = "room_version"
|
||||
|
||||
ROOM_NAME: Final = "name"
|
||||
|
||||
# Used in m.room.guest_access events.
|
||||
GUEST_ACCESS: Final = "guest_access"
|
||||
|
||||
@@ -233,12 +242,15 @@ class EventContentFields:
|
||||
# an unspecced field added to to-device messages to identify them uniquely-ish
|
||||
TO_DEVICE_MSGID: Final = "org.matrix.msgid"
|
||||
|
||||
# `m.room.encryption`` algorithm field
|
||||
ENCRYPTION_ALGORITHM: Final = "algorithm"
|
||||
|
||||
|
||||
class EventUnsignedContentFields:
|
||||
"""Fields found inside the 'unsigned' data on events"""
|
||||
|
||||
# Requesting user's membership, per MSC4115
|
||||
MSC4115_MEMBERSHIP: Final = "io.element.msc4115.membership"
|
||||
MEMBERSHIP: Final = "membership"
|
||||
|
||||
|
||||
class RoomTypes:
|
||||
|
||||
@@ -128,6 +128,10 @@ class Codes(str, Enum):
|
||||
# MSC2677
|
||||
DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION"
|
||||
|
||||
# MSC3575 we are telling the client they need to expire their sliding sync
|
||||
# connection.
|
||||
UNKNOWN_POS = "M_UNKNOWN_POS"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
"""An exception with integer code, a message string attributes and optional headers.
|
||||
@@ -847,3 +851,17 @@ class PartialStateConflictError(SynapseError):
|
||||
msg=PartialStateConflictError.message(),
|
||||
errcode=Codes.UNKNOWN,
|
||||
)
|
||||
|
||||
|
||||
class SlidingSyncUnknownPosition(SynapseError):
|
||||
"""An error that Synapse can return to signal to the client to expire their
|
||||
sliding sync connection (i.e. send a new request without a `?since=`
|
||||
param).
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__(
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
msg="Unknown position",
|
||||
errcode=Codes.UNKNOWN_POS,
|
||||
)
|
||||
|
||||
@@ -130,7 +130,8 @@ class Ratelimiter:
|
||||
Overrides the value set during instantiation if set.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
update: Whether to count this check as performing the action. If the action
|
||||
cannot be performed, the user's action count is not incremented at all.
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
@@ -235,9 +236,8 @@ class Ratelimiter:
|
||||
requester: The requester that is doing the action, if any.
|
||||
key: An arbitrary key used to classify an action. Defaults to the
|
||||
requester's user ID.
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
n_actions: The number of times the user performed the action. May be negative
|
||||
to "refund" the rate limit.
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
"""
|
||||
|
||||
@@ -681,8 +681,8 @@ def setup_sentry(hs: "HomeServer") -> None:
|
||||
)
|
||||
|
||||
# We set some default tags that give some context to this instance
|
||||
with sentry_sdk.configure_scope() as scope:
|
||||
scope.set_tag("matrix_server_name", hs.config.server.server_name)
|
||||
global_scope = sentry_sdk.Scope.get_global_scope()
|
||||
global_scope.set_tag("matrix_server_name", hs.config.server.server_name)
|
||||
|
||||
app = (
|
||||
hs.config.worker.worker_app
|
||||
@@ -690,8 +690,8 @@ def setup_sentry(hs: "HomeServer") -> None:
|
||||
else "synapse.app.homeserver"
|
||||
)
|
||||
name = hs.get_instance_name()
|
||||
scope.set_tag("worker_app", app)
|
||||
scope.set_tag("worker_name", name)
|
||||
global_scope.set_tag("worker_app", app)
|
||||
global_scope.set_tag("worker_name", name)
|
||||
|
||||
|
||||
def setup_sdnotify(hs: "HomeServer") -> None:
|
||||
|
||||
@@ -110,7 +110,7 @@ class AdminCmdStore(
|
||||
|
||||
|
||||
class AdminCmdServer(HomeServer):
|
||||
DATASTORE_CLASS = AdminCmdStore # type: ignore
|
||||
DATASTORE_CLASS = AdminCmdStore
|
||||
|
||||
|
||||
async def export_data_command(hs: HomeServer, args: argparse.Namespace) -> None:
|
||||
|
||||
@@ -74,6 +74,9 @@ from synapse.storage.databases.main.event_push_actions import (
|
||||
EventPushActionsWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.databases.main.experimental_features import (
|
||||
ExperimentalFeaturesStore,
|
||||
)
|
||||
from synapse.storage.databases.main.filtering import FilteringWorkerStore
|
||||
from synapse.storage.databases.main.keys import KeyStore
|
||||
from synapse.storage.databases.main.lock import LockStore
|
||||
@@ -95,6 +98,7 @@ from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
|
||||
from synapse.storage.databases.main.search import SearchStore
|
||||
from synapse.storage.databases.main.session import SessionStore
|
||||
from synapse.storage.databases.main.signatures import SignatureWorkerStore
|
||||
from synapse.storage.databases.main.sliding_sync import SlidingSyncStore
|
||||
from synapse.storage.databases.main.state import StateGroupWorkerStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.stream import StreamWorkerStore
|
||||
@@ -155,6 +159,8 @@ class GenericWorkerStore(
|
||||
LockStore,
|
||||
SessionStore,
|
||||
TaskSchedulerWorkerStore,
|
||||
ExperimentalFeaturesStore,
|
||||
SlidingSyncStore,
|
||||
):
|
||||
# Properties that multiple storage classes define. Tell mypy what the
|
||||
# expected type is.
|
||||
@@ -163,7 +169,7 @@ class GenericWorkerStore(
|
||||
|
||||
|
||||
class GenericWorkerServer(HomeServer):
|
||||
DATASTORE_CLASS = GenericWorkerStore # type: ignore
|
||||
DATASTORE_CLASS = GenericWorkerStore
|
||||
|
||||
def _listen_http(self, listener_config: ListenerConfig) -> None:
|
||||
assert listener_config.http_options is not None
|
||||
@@ -202,6 +208,21 @@ class GenericWorkerServer(HomeServer):
|
||||
"/_synapse/admin": admin_resource,
|
||||
}
|
||||
)
|
||||
|
||||
if "federation" not in res.names:
|
||||
# Only load the federation media resource separately if federation
|
||||
# resource is not specified since federation resource includes media
|
||||
# resource.
|
||||
resources[FEDERATION_PREFIX] = TransportLayerServer(
|
||||
self, servlet_groups=["media"]
|
||||
)
|
||||
if "client" not in res.names:
|
||||
# Only load the client media resource separately if client
|
||||
# resource is not specified since client resource includes media
|
||||
# resource.
|
||||
resources[CLIENT_API_PREFIX] = ClientRestResource(
|
||||
self, servlet_groups=["media"]
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"A 'media' listener is configured but the media"
|
||||
|
||||
@@ -81,7 +81,7 @@ def gz_wrap(r: Resource) -> Resource:
|
||||
|
||||
|
||||
class SynapseHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def _listener_http(
|
||||
self,
|
||||
@@ -101,6 +101,12 @@ class SynapseHomeServer(HomeServer):
|
||||
# Skip loading openid resource if federation is defined
|
||||
# since federation resource will include openid
|
||||
continue
|
||||
if name == "media" and (
|
||||
"federation" in res.names or "client" in res.names
|
||||
):
|
||||
# Skip loading media resource if federation or client are defined
|
||||
# since federation & client resources will include media
|
||||
continue
|
||||
if name == "health":
|
||||
# Skip loading, health resource is always included
|
||||
continue
|
||||
@@ -217,7 +223,7 @@ class SynapseHomeServer(HomeServer):
|
||||
)
|
||||
|
||||
if name in ["media", "federation", "client"]:
|
||||
if self.config.server.enable_media_repo:
|
||||
if self.config.media.can_load_media_repo:
|
||||
media_repo = self.get_media_repository_resource()
|
||||
resources.update(
|
||||
{
|
||||
@@ -231,6 +237,14 @@ class SynapseHomeServer(HomeServer):
|
||||
"'media' resource conflicts with enable_media_repo=False"
|
||||
)
|
||||
|
||||
if name == "media":
|
||||
resources[FEDERATION_PREFIX] = TransportLayerServer(
|
||||
self, servlet_groups=["media"]
|
||||
)
|
||||
resources[CLIENT_API_PREFIX] = ClientRestResource(
|
||||
self, servlet_groups=["media"]
|
||||
)
|
||||
|
||||
if name in ["keys", "federation"]:
|
||||
resources[SERVER_KEY_PREFIX] = KeyResource(self)
|
||||
|
||||
|
||||
@@ -140,6 +140,12 @@ class MSC3861:
|
||||
("experimental", "msc3861", "client_auth_method"),
|
||||
)
|
||||
|
||||
introspection_endpoint: Optional[str] = attr.ib(
|
||||
default=None,
|
||||
validator=attr.validators.optional(attr.validators.instance_of(str)),
|
||||
)
|
||||
"""The URL of the introspection endpoint used to validate access tokens."""
|
||||
|
||||
account_management_url: Optional[str] = attr.ib(
|
||||
default=None,
|
||||
validator=attr.validators.optional(attr.validators.instance_of(str)),
|
||||
@@ -332,6 +338,9 @@ class ExperimentalConfig(Config):
|
||||
# MSC3391: Removing account data.
|
||||
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
|
||||
|
||||
# MSC3575 (Sliding Sync API endpoints)
|
||||
self.msc3575_enabled: bool = experimental.get("msc3575_enabled", False)
|
||||
|
||||
# MSC3773: Thread notifications
|
||||
self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
|
||||
|
||||
@@ -390,9 +399,6 @@ class ExperimentalConfig(Config):
|
||||
# MSC3391: Removing account data.
|
||||
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
|
||||
|
||||
# MSC3967: Do not require UIA when first uploading cross signing keys
|
||||
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
|
||||
|
||||
# MSC3861: Matrix architecture change to delegate authentication via OIDC
|
||||
try:
|
||||
self.msc3861 = MSC3861(**experimental.get("msc3861", {}))
|
||||
@@ -433,6 +439,12 @@ class ExperimentalConfig(Config):
|
||||
("experimental", "msc4108_delegation_endpoint"),
|
||||
)
|
||||
|
||||
self.msc4115_membership_on_events = experimental.get(
|
||||
"msc4115_membership_on_events", False
|
||||
self.msc3823_account_suspension = experimental.get(
|
||||
"msc3823_account_suspension", False
|
||||
)
|
||||
|
||||
# MSC4151: Report room API (Client-Server API)
|
||||
self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
|
||||
|
||||
# MSC4156: Migrate server_name to via
|
||||
self.msc4156_enabled: bool = experimental.get("msc4156_enabled", False)
|
||||
|
||||
@@ -218,3 +218,13 @@ class RatelimitConfig(Config):
|
||||
"rc_media_create",
|
||||
defaults={"per_second": 10, "burst_count": 50},
|
||||
)
|
||||
|
||||
self.remote_media_downloads = RatelimitSettings(
|
||||
key="rc_remote_media_downloads",
|
||||
per_second=self.parse_size(
|
||||
config.get("remote_media_download_per_second", "87K")
|
||||
),
|
||||
burst_count=self.parse_size(
|
||||
config.get("remote_media_download_burst_count", "500M")
|
||||
),
|
||||
)
|
||||
|
||||
@@ -126,7 +126,7 @@ class ContentRepositoryConfig(Config):
|
||||
# Only enable the media repo if either the media repo is enabled or the
|
||||
# current worker app is the media repo.
|
||||
if (
|
||||
self.root.server.enable_media_repo is False
|
||||
config.get("enable_media_repo", True) is False
|
||||
and config.get("worker_app") != "synapse.app.media_repository"
|
||||
):
|
||||
self.can_load_media_repo = False
|
||||
@@ -272,6 +272,10 @@ class ContentRepositoryConfig(Config):
|
||||
remote_media_lifetime
|
||||
)
|
||||
|
||||
self.enable_authenticated_media = config.get(
|
||||
"enable_authenticated_media", False
|
||||
)
|
||||
|
||||
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
|
||||
assert data_dir_path is not None
|
||||
media_store = os.path.join(data_dir_path, "media_store")
|
||||
|
||||
@@ -384,6 +384,11 @@ class ServerConfig(Config):
|
||||
# Whether to internally track presence, requires that presence is enabled,
|
||||
self.track_presence = self.presence_enabled and presence_enabled != "untracked"
|
||||
|
||||
# Determines if presence results for offline users are included on initial/full sync
|
||||
self.presence_include_offline_users_on_sync = presence_config.get(
|
||||
"include_offline_users_on_sync", False
|
||||
)
|
||||
|
||||
# Custom presence router module
|
||||
# This is the legacy way of configuring it (the config should now be put in the modules section)
|
||||
self.presence_router_module_class = None
|
||||
@@ -395,12 +400,6 @@ class ServerConfig(Config):
|
||||
self.presence_router_config,
|
||||
) = load_module(presence_router_config, ("presence", "presence_router"))
|
||||
|
||||
# whether to enable the media repository endpoints. This should be set
|
||||
# to false if the media repository is running as a separate endpoint;
|
||||
# doing so ensures that we will not run cache cleanup jobs on the
|
||||
# master, potentially causing inconsistency.
|
||||
self.enable_media_repo = config.get("enable_media_repo", True)
|
||||
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API.
|
||||
self.require_auth_for_profile_requests = config.get(
|
||||
|
||||
@@ -589,7 +589,7 @@ class BaseV2KeyFetcher(KeyFetcher):
|
||||
% (server_name,)
|
||||
)
|
||||
|
||||
for key_id, key_data in response_json["old_verify_keys"].items():
|
||||
for key_id, key_data in response_json.get("old_verify_keys", {}).items():
|
||||
if is_signing_algorithm_supported(key_id):
|
||||
key_base64 = key_data["key"]
|
||||
key_bytes = decode_base64(key_base64)
|
||||
|
||||
@@ -554,3 +554,22 @@ def relation_from_event(event: EventBase) -> Optional[_EventRelation]:
|
||||
aggregation_key = None
|
||||
|
||||
return _EventRelation(parent_id, rel_type, aggregation_key)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class StrippedStateEvent:
|
||||
"""
|
||||
A stripped down state event. Usually used for remote invite/knocks so the user can
|
||||
make an informed decision on whether they want to join.
|
||||
|
||||
Attributes:
|
||||
type: Event `type`
|
||||
state_key: Event `state_key`
|
||||
sender: Event `sender`
|
||||
content: Event `content`
|
||||
"""
|
||||
|
||||
type: str
|
||||
state_key: str
|
||||
sender: str
|
||||
content: Dict[str, Any]
|
||||
|
||||
@@ -49,7 +49,7 @@ from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.types import JsonDict, Requester
|
||||
|
||||
from . import EventBase, make_event_from_dict
|
||||
from . import EventBase, StrippedStateEvent, make_event_from_dict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.handlers.relations import BundledAggregations
|
||||
@@ -90,6 +90,7 @@ def prune_event(event: EventBase) -> EventBase:
|
||||
pruned_event.internal_metadata.stream_ordering = (
|
||||
event.internal_metadata.stream_ordering
|
||||
)
|
||||
pruned_event.internal_metadata.instance_name = event.internal_metadata.instance_name
|
||||
pruned_event.internal_metadata.outlier = event.internal_metadata.outlier
|
||||
|
||||
# Mark the event as redacted
|
||||
@@ -116,6 +117,7 @@ def clone_event(event: EventBase) -> EventBase:
|
||||
new_event.internal_metadata.stream_ordering = (
|
||||
event.internal_metadata.stream_ordering
|
||||
)
|
||||
new_event.internal_metadata.instance_name = event.internal_metadata.instance_name
|
||||
new_event.internal_metadata.outlier = event.internal_metadata.outlier
|
||||
|
||||
return new_event
|
||||
@@ -834,3 +836,48 @@ def maybe_upsert_event_field(
|
||||
del container[key]
|
||||
|
||||
return upsert_okay
|
||||
|
||||
|
||||
def strip_event(event: EventBase) -> JsonDict:
|
||||
"""
|
||||
Used for "stripped state" events which provide a simplified view of the state of a
|
||||
room intended to help a potential joiner identify the room (relevant when the user
|
||||
is invited or knocked).
|
||||
|
||||
Stripped state events can only have the `sender`, `type`, `state_key` and `content`
|
||||
properties present.
|
||||
"""
|
||||
|
||||
return {
|
||||
"type": event.type,
|
||||
"state_key": event.state_key,
|
||||
"content": event.content,
|
||||
"sender": event.sender,
|
||||
}
|
||||
|
||||
|
||||
def parse_stripped_state_event(raw_stripped_event: Any) -> Optional[StrippedStateEvent]:
|
||||
"""
|
||||
Given a raw value from an event's `unsigned` field, attempt to parse it into a
|
||||
`StrippedStateEvent`.
|
||||
"""
|
||||
if isinstance(raw_stripped_event, dict):
|
||||
# All of these fields are required
|
||||
type = raw_stripped_event.get("type")
|
||||
state_key = raw_stripped_event.get("state_key")
|
||||
sender = raw_stripped_event.get("sender")
|
||||
content = raw_stripped_event.get("content")
|
||||
if (
|
||||
isinstance(type, str)
|
||||
and isinstance(state_key, str)
|
||||
and isinstance(sender, str)
|
||||
and isinstance(content, dict)
|
||||
):
|
||||
return StrippedStateEvent(
|
||||
type=type,
|
||||
state_key=state_key,
|
||||
sender=sender,
|
||||
content=content,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
@@ -47,9 +47,9 @@ from synapse.events.utils import (
|
||||
validate_canonicaljson,
|
||||
)
|
||||
from synapse.http.servlet import validate_json_object
|
||||
from synapse.rest.models import RequestBodyModel
|
||||
from synapse.storage.controllers.state import server_acl_evaluator_from_event
|
||||
from synapse.types import EventID, JsonDict, RoomID, StrCollection, UserID
|
||||
from synapse.types.rest import RequestBodyModel
|
||||
|
||||
|
||||
class EventValidator:
|
||||
|
||||
@@ -56,6 +56,7 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
UnsupportedRoomVersionError,
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
@@ -1870,6 +1871,52 @@ class FederationClient(FederationBase):
|
||||
|
||||
return filtered_statuses, filtered_failures
|
||||
|
||||
async def federation_download_media(
|
||||
self,
|
||||
destination: str,
|
||||
media_id: str,
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Union[
|
||||
Tuple[int, Dict[bytes, List[bytes]], bytes],
|
||||
Tuple[int, Dict[bytes, List[bytes]]],
|
||||
]:
|
||||
try:
|
||||
return await self.transport_layer.federation_download_media(
|
||||
destination,
|
||||
media_id,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the _matrix/media/v3/download endpoint. Otherwise, consider it a legitimate error
|
||||
# and raise.
|
||||
if not is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug(
|
||||
"Couldn't download media %s/%s over _matrix/federation/v1/media/download, falling back to _matrix/media/v3/download path",
|
||||
destination,
|
||||
media_id,
|
||||
)
|
||||
|
||||
return await self.transport_layer.download_media_v3(
|
||||
destination,
|
||||
media_id,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def download_media(
|
||||
self,
|
||||
destination: str,
|
||||
@@ -1877,6 +1924,8 @@ class FederationClient(FederationBase):
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
try:
|
||||
return await self.transport_layer.download_media_v3(
|
||||
@@ -1885,6 +1934,8 @@ class FederationClient(FederationBase):
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
@@ -1905,6 +1956,8 @@ class FederationClient(FederationBase):
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -674,7 +674,7 @@ class FederationServer(FederationBase):
|
||||
# This is in addition to the HS-level rate limiting applied by
|
||||
# BaseFederationServlet.
|
||||
# type-ignore: mypy doesn't seem able to deduce the type of the limiter(!?)
|
||||
await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type]
|
||||
await self._room_member_handler._join_rate_per_room_limiter.ratelimit(
|
||||
requester=None,
|
||||
key=room_id,
|
||||
update=False,
|
||||
@@ -717,7 +717,7 @@ class FederationServer(FederationBase):
|
||||
SynapseTags.SEND_JOIN_RESPONSE_IS_PARTIAL_STATE,
|
||||
caller_supports_partial_state,
|
||||
)
|
||||
await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type]
|
||||
await self._room_member_handler._join_rate_per_room_limiter.ratelimit(
|
||||
requester=None,
|
||||
key=room_id,
|
||||
update=False,
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#
|
||||
import datetime
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, Type
|
||||
|
||||
@@ -68,6 +69,10 @@ sent_edus_by_type = Counter(
|
||||
# If the retry interval is larger than this then we enter "catchup" mode
|
||||
CATCHUP_RETRY_INTERVAL = 60 * 60 * 1000
|
||||
|
||||
# Limit how many presence states we add to each presence EDU, to ensure that
|
||||
# they are bounded in size.
|
||||
MAX_PRESENCE_STATES_PER_EDU = 50
|
||||
|
||||
|
||||
class PerDestinationQueue:
|
||||
"""
|
||||
@@ -144,7 +149,7 @@ class PerDestinationQueue:
|
||||
|
||||
# Map of user_id -> UserPresenceState of pending presence to be sent to this
|
||||
# destination
|
||||
self._pending_presence: Dict[str, UserPresenceState] = {}
|
||||
self._pending_presence: OrderedDict[str, UserPresenceState] = OrderedDict()
|
||||
|
||||
# List of room_id -> receipt_type -> user_id -> receipt_dict,
|
||||
#
|
||||
@@ -333,12 +338,11 @@ class PerDestinationQueue:
|
||||
# not caught up yet
|
||||
return
|
||||
|
||||
pending_pdus = []
|
||||
while True:
|
||||
self._new_data_to_send = False
|
||||
|
||||
async with _TransactionQueueManager(self) as (
|
||||
pending_pdus,
|
||||
pending_pdus, # noqa: F811
|
||||
pending_edus,
|
||||
):
|
||||
if not pending_pdus and not pending_edus:
|
||||
@@ -399,7 +403,7 @@ class PerDestinationQueue:
|
||||
# through another mechanism, because this is all volatile!
|
||||
self._pending_edus = []
|
||||
self._pending_edus_keyed = {}
|
||||
self._pending_presence = {}
|
||||
self._pending_presence.clear()
|
||||
self._pending_receipt_edus = []
|
||||
|
||||
self._start_catching_up()
|
||||
@@ -721,22 +725,26 @@ class _TransactionQueueManager:
|
||||
|
||||
# Add presence EDU.
|
||||
if self.queue._pending_presence:
|
||||
# Only send max 50 presence entries in the EDU, to bound the amount
|
||||
# of data we're sending.
|
||||
presence_to_add: List[JsonDict] = []
|
||||
while (
|
||||
self.queue._pending_presence
|
||||
and len(presence_to_add) < MAX_PRESENCE_STATES_PER_EDU
|
||||
):
|
||||
_, presence = self.queue._pending_presence.popitem(last=False)
|
||||
presence_to_add.append(
|
||||
format_user_presence_state(presence, self.queue._clock.time_msec())
|
||||
)
|
||||
|
||||
pending_edus.append(
|
||||
Edu(
|
||||
origin=self.queue._server_name,
|
||||
destination=self.queue._destination,
|
||||
edu_type=EduTypes.PRESENCE,
|
||||
content={
|
||||
"push": [
|
||||
format_user_presence_state(
|
||||
presence, self.queue._clock.time_msec()
|
||||
)
|
||||
for presence in self.queue._pending_presence.values()
|
||||
]
|
||||
},
|
||||
content={"push": presence_to_add},
|
||||
)
|
||||
)
|
||||
self.queue._pending_presence = {}
|
||||
|
||||
# Add read receipt EDUs.
|
||||
pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
|
||||
|
||||
@@ -43,6 +43,7 @@ import ijson
|
||||
|
||||
from synapse.api.constants import Direction, Membership
|
||||
from synapse.api.errors import Codes, HttpResponseException, SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.api.urls import (
|
||||
FEDERATION_UNSTABLE_PREFIX,
|
||||
@@ -819,9 +820,10 @@ class TransportLayerClient:
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
path = f"/_matrix/media/r0/download/{destination}/{media_id}"
|
||||
|
||||
return await self.client.get_file(
|
||||
destination,
|
||||
path,
|
||||
@@ -834,6 +836,8 @@ class TransportLayerClient:
|
||||
"allow_remote": "false",
|
||||
"timeout_ms": str(max_timeout_ms),
|
||||
},
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def download_media_v3(
|
||||
@@ -843,9 +847,10 @@ class TransportLayerClient:
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
path = f"/_matrix/media/v3/download/{destination}/{media_id}"
|
||||
|
||||
return await self.client.get_file(
|
||||
destination,
|
||||
path,
|
||||
@@ -862,6 +867,31 @@ class TransportLayerClient:
|
||||
"allow_redirect": "true",
|
||||
},
|
||||
follow_redirects=True,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def federation_download_media(
|
||||
self,
|
||||
destination: str,
|
||||
media_id: str,
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
|
||||
path = f"/_matrix/federation/v1/media/download/{media_id}"
|
||||
return await self.client.federation_get_file(
|
||||
destination,
|
||||
path,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
args={
|
||||
"timeout_ms": str(max_timeout_ms),
|
||||
},
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -32,6 +32,8 @@ from synapse.federation.transport.server._base import (
|
||||
from synapse.federation.transport.server.federation import (
|
||||
FEDERATION_SERVLET_CLASSES,
|
||||
FederationAccountStatusServlet,
|
||||
FederationMediaDownloadServlet,
|
||||
FederationMediaThumbnailServlet,
|
||||
FederationUnstableClientKeysClaimServlet,
|
||||
)
|
||||
from synapse.http.server import HttpServer, JsonResource
|
||||
@@ -269,6 +271,10 @@ SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
|
||||
"federation": FEDERATION_SERVLET_CLASSES,
|
||||
"room_list": (PublicRoomList,),
|
||||
"openid": (OpenIdUserInfo,),
|
||||
"media": (
|
||||
FederationMediaDownloadServlet,
|
||||
FederationMediaThumbnailServlet,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@@ -315,6 +321,13 @@ def register_servlets(
|
||||
):
|
||||
continue
|
||||
|
||||
if (
|
||||
servletclass == FederationMediaDownloadServlet
|
||||
or servletclass == FederationMediaThumbnailServlet
|
||||
):
|
||||
if not hs.config.media.can_load_media_repo:
|
||||
continue
|
||||
|
||||
servletclass(
|
||||
hs=hs,
|
||||
authenticator=authenticator,
|
||||
|
||||
@@ -360,9 +360,29 @@ class BaseFederationServlet:
|
||||
"request"
|
||||
)
|
||||
return None
|
||||
if (
|
||||
func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaDownloadServlet"
|
||||
or func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaThumbnailServlet"
|
||||
):
|
||||
response = await func(
|
||||
origin, content, request, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = await func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
if (
|
||||
func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaDownloadServlet"
|
||||
or func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaThumbnailServlet"
|
||||
):
|
||||
response = await func(
|
||||
origin, content, request, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = await func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
|
||||
@@ -44,10 +44,15 @@ from synapse.federation.transport.server._base import (
|
||||
)
|
||||
from synapse.http.servlet import (
|
||||
parse_boolean_from_args,
|
||||
parse_integer,
|
||||
parse_integer_from_args,
|
||||
parse_string,
|
||||
parse_string_from_args,
|
||||
parse_strings_from_args,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.media._base import DEFAULT_MAX_TIMEOUT_MS, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS
|
||||
from synapse.media.thumbnailer import ThumbnailProvider
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
@@ -787,6 +792,95 @@ class FederationAccountStatusServlet(BaseFederationServerServlet):
|
||||
return 200, {"account_statuses": statuses, "failures": failures}
|
||||
|
||||
|
||||
class FederationMediaDownloadServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
Implementation of new federation media `/download` endpoint outlined in MSC3916. Returns
|
||||
a multipart/mixed response consisting of a JSON object and the requested media
|
||||
item. This endpoint only returns local media.
|
||||
"""
|
||||
|
||||
PATH = "/media/download/(?P<media_id>[^/]*)"
|
||||
RATELIMIT = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
ratelimiter: FederationRateLimiter,
|
||||
authenticator: Authenticator,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.media_repo = self.hs.get_media_repository()
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: Optional[str],
|
||||
content: Literal[None],
|
||||
request: SynapseRequest,
|
||||
media_id: str,
|
||||
) -> None:
|
||||
max_timeout_ms = parse_integer(
|
||||
request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
|
||||
)
|
||||
max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
|
||||
await self.media_repo.get_local_media(
|
||||
request, media_id, None, max_timeout_ms, federation=True
|
||||
)
|
||||
|
||||
|
||||
class FederationMediaThumbnailServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
Implementation of new federation media `/thumbnail` endpoint outlined in MSC3916. Returns
|
||||
a multipart/mixed response consisting of a JSON object and the requested media
|
||||
item. This endpoint only returns local media.
|
||||
"""
|
||||
|
||||
PATH = "/media/thumbnail/(?P<media_id>[^/]*)"
|
||||
RATELIMIT = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
ratelimiter: FederationRateLimiter,
|
||||
authenticator: Authenticator,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.media_repo = self.hs.get_media_repository()
|
||||
self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
|
||||
self.thumbnail_provider = ThumbnailProvider(
|
||||
hs, self.media_repo, self.media_repo.media_storage
|
||||
)
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: Optional[str],
|
||||
content: Literal[None],
|
||||
request: SynapseRequest,
|
||||
media_id: str,
|
||||
) -> None:
|
||||
|
||||
width = parse_integer(request, "width", required=True)
|
||||
height = parse_integer(request, "height", required=True)
|
||||
method = parse_string(request, "method", "scale")
|
||||
# TODO Parse the Accept header to get an prioritised list of thumbnail types.
|
||||
m_type = "image/png"
|
||||
max_timeout_ms = parse_integer(
|
||||
request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
|
||||
)
|
||||
max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
|
||||
|
||||
if self.dynamic_thumbnails:
|
||||
await self.thumbnail_provider.select_or_generate_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms, True
|
||||
)
|
||||
else:
|
||||
await self.thumbnail_provider.respond_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms, True
|
||||
)
|
||||
self.media_repo.mark_recently_accessed(None, media_id)
|
||||
|
||||
|
||||
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationSendServlet,
|
||||
FederationEventServlet,
|
||||
|
||||
@@ -42,7 +42,6 @@ class AdminHandler:
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._state_storage_controller = self._storage_controllers.state
|
||||
self._hs_config = hs.config
|
||||
self._msc3866_enabled = hs.config.experimental.msc3866.enabled
|
||||
|
||||
async def get_whois(self, user: UserID) -> JsonMapping:
|
||||
@@ -126,13 +125,7 @@ class AdminHandler:
|
||||
# Get all rooms the user is in or has been in
|
||||
rooms = await self._store.get_rooms_for_local_user_where_membership_is(
|
||||
user_id,
|
||||
membership_list=(
|
||||
Membership.JOIN,
|
||||
Membership.LEAVE,
|
||||
Membership.BAN,
|
||||
Membership.INVITE,
|
||||
Membership.KNOCK,
|
||||
),
|
||||
membership_list=Membership.LIST,
|
||||
)
|
||||
|
||||
# We only try and fetch events for rooms the user has been in. If
|
||||
@@ -179,7 +172,7 @@ class AdminHandler:
|
||||
if room.membership == Membership.JOIN:
|
||||
stream_ordering = self._store.get_room_max_stream_ordering()
|
||||
else:
|
||||
stream_ordering = room.stream_ordering
|
||||
stream_ordering = room.event_pos.stream
|
||||
|
||||
from_key = RoomStreamToken(topological=0, stream=0)
|
||||
to_key = RoomStreamToken(stream=stream_ordering)
|
||||
@@ -204,8 +197,14 @@ class AdminHandler:
|
||||
# events that we have and then filtering, this isn't the most
|
||||
# efficient method perhaps but it does guarantee we get everything.
|
||||
while True:
|
||||
events, _ = await self._store.paginate_room_events(
|
||||
room_id, from_key, to_key, limit=100, direction=Direction.FORWARDS
|
||||
events, _ = (
|
||||
await self._store.paginate_room_events_by_topological_ordering(
|
||||
room_id=room_id,
|
||||
from_key=from_key,
|
||||
to_key=to_key,
|
||||
limit=100,
|
||||
direction=Direction.FORWARDS,
|
||||
)
|
||||
)
|
||||
if not events:
|
||||
break
|
||||
@@ -221,7 +220,6 @@ class AdminHandler:
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
events,
|
||||
msc4115_membership_on_events=self._hs_config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
writer.write_events(room_id, events)
|
||||
|
||||
@@ -283,6 +283,10 @@ class DeactivateAccountHandler:
|
||||
ratelimit=False,
|
||||
require_consent=False,
|
||||
)
|
||||
|
||||
# Mark the room forgotten too, because they won't be able to do this
|
||||
# for us. This may lead to the room being purged eventually.
|
||||
await self._room_member_handler.forget(user, room_id)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to part user %r from room %r: ignoring and continuing",
|
||||
|
||||
@@ -20,10 +20,20 @@
|
||||
#
|
||||
#
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Mapping, Optional, Set, Tuple
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
AbstractSet,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
from synapse.api import errors
|
||||
from synapse.api.constants import EduTypes, EventTypes
|
||||
from synapse.api.constants import EduTypes, EventTypes, Membership
|
||||
from synapse.api.errors import (
|
||||
Codes,
|
||||
FederationDeniedError,
|
||||
@@ -38,7 +48,9 @@ from synapse.metrics.background_process_metrics import (
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo
|
||||
from synapse.storage.databases.main.state_deltas import StateDelta
|
||||
from synapse.types import (
|
||||
DeviceListUpdates,
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
ScheduledTask,
|
||||
@@ -214,138 +226,210 @@ class DeviceWorkerHandler:
|
||||
@cancellable
|
||||
async def get_user_ids_changed(
|
||||
self, user_id: str, from_token: StreamToken
|
||||
) -> JsonDict:
|
||||
) -> DeviceListUpdates:
|
||||
"""Get list of users that have had the devices updated, or have newly
|
||||
joined a room, that `user_id` may be interested in.
|
||||
"""
|
||||
|
||||
set_tag("user_id", user_id)
|
||||
set_tag("from_token", str(from_token))
|
||||
now_room_key = self.store.get_room_max_token()
|
||||
|
||||
room_ids = await self.store.get_rooms_for_user(user_id)
|
||||
now_token = self._event_sources.get_current_token()
|
||||
|
||||
changed = await self.get_device_changes_in_shared_rooms(
|
||||
user_id, room_ids, from_token
|
||||
# We need to work out all the different membership changes for the user
|
||||
# and user they share a room with, to pass to
|
||||
# `generate_sync_entry_for_device_list`. See its docstring for details
|
||||
# on the data required.
|
||||
|
||||
joined_room_ids = await self.store.get_rooms_for_user(user_id)
|
||||
|
||||
# Get the set of rooms that the user has joined/left
|
||||
membership_changes = (
|
||||
await self.store.get_current_state_delta_membership_changes_for_user(
|
||||
user_id, from_key=from_token.room_key, to_key=now_token.room_key
|
||||
)
|
||||
)
|
||||
|
||||
# Then work out if any users have since joined
|
||||
rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key)
|
||||
# Check for newly joined or left rooms. We need to make sure that we add
|
||||
# to newly joined in the case membership goes from join -> leave -> join
|
||||
# again.
|
||||
newly_joined_rooms: Set[str] = set()
|
||||
newly_left_rooms: Set[str] = set()
|
||||
for change in membership_changes:
|
||||
# We check for changes in "joinedness", i.e. if the membership has
|
||||
# changed to or from JOIN.
|
||||
if change.membership == Membership.JOIN:
|
||||
if change.prev_membership != Membership.JOIN:
|
||||
newly_joined_rooms.add(change.room_id)
|
||||
newly_left_rooms.discard(change.room_id)
|
||||
elif change.prev_membership == Membership.JOIN:
|
||||
newly_joined_rooms.discard(change.room_id)
|
||||
newly_left_rooms.add(change.room_id)
|
||||
|
||||
member_events = await self.store.get_membership_changes_for_user(
|
||||
user_id, from_token.room_key, now_room_key
|
||||
# We now work out if any other users have since joined or left the rooms
|
||||
# the user is currently in.
|
||||
|
||||
# List of membership changes per room
|
||||
room_to_deltas: Dict[str, List[StateDelta]] = {}
|
||||
# The set of event IDs of membership events (so we can fetch their
|
||||
# associated membership).
|
||||
memberships_to_fetch: Set[str] = set()
|
||||
|
||||
# TODO: Only pull out membership events?
|
||||
state_changes = await self.store.get_current_state_deltas_for_rooms(
|
||||
joined_room_ids, from_token=from_token.room_key, to_token=now_token.room_key
|
||||
)
|
||||
rooms_changed.update(event.room_id for event in member_events)
|
||||
|
||||
stream_ordering = from_token.room_key.stream
|
||||
|
||||
possibly_changed = set(changed)
|
||||
possibly_left = set()
|
||||
for room_id in rooms_changed:
|
||||
# Check if the forward extremities have changed. If not then we know
|
||||
# the current state won't have changed, and so we can skip this room.
|
||||
try:
|
||||
if not await self.store.have_room_forward_extremities_changed_since(
|
||||
room_id, stream_ordering
|
||||
):
|
||||
continue
|
||||
except errors.StoreError:
|
||||
pass
|
||||
|
||||
current_state_ids = await self._state_storage.get_current_state_ids(
|
||||
room_id, await_full_state=False
|
||||
)
|
||||
|
||||
# The user may have left the room
|
||||
# TODO: Check if they actually did or if we were just invited.
|
||||
if room_id not in room_ids:
|
||||
for etype, state_key in current_state_ids.keys():
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
possibly_left.add(state_key)
|
||||
for delta in state_changes:
|
||||
if delta.event_type != EventTypes.Member:
|
||||
continue
|
||||
|
||||
# Fetch the current state at the time.
|
||||
try:
|
||||
event_ids = await self.store.get_forward_extremities_for_room_at_stream_ordering(
|
||||
room_id, stream_ordering=stream_ordering
|
||||
)
|
||||
except errors.StoreError:
|
||||
# we have purged the stream_ordering index since the stream
|
||||
# ordering: treat it the same as a new room
|
||||
event_ids = []
|
||||
room_to_deltas.setdefault(delta.room_id, []).append(delta)
|
||||
if delta.event_id:
|
||||
memberships_to_fetch.add(delta.event_id)
|
||||
if delta.prev_event_id:
|
||||
memberships_to_fetch.add(delta.prev_event_id)
|
||||
|
||||
# Fetch all the memberships for the membership events
|
||||
event_id_to_memberships = await self.store.get_membership_from_event_ids(
|
||||
memberships_to_fetch
|
||||
)
|
||||
|
||||
joined_invited_knocked = (
|
||||
Membership.JOIN,
|
||||
Membership.INVITE,
|
||||
Membership.KNOCK,
|
||||
)
|
||||
|
||||
# We now want to find any user that have newly joined/invited/knocked,
|
||||
# or newly left, similarly to above.
|
||||
newly_joined_or_invited_or_knocked_users: Set[str] = set()
|
||||
newly_left_users: Set[str] = set()
|
||||
for _, deltas in room_to_deltas.items():
|
||||
for delta in deltas:
|
||||
# Get the prev/new memberships for the delta
|
||||
new_membership = None
|
||||
prev_membership = None
|
||||
if delta.event_id:
|
||||
m = event_id_to_memberships.get(delta.event_id)
|
||||
if m is not None:
|
||||
new_membership = m.membership
|
||||
if delta.prev_event_id:
|
||||
m = event_id_to_memberships.get(delta.prev_event_id)
|
||||
if m is not None:
|
||||
prev_membership = m.membership
|
||||
|
||||
# Check if a user has newly joined/invited/knocked, or left.
|
||||
if new_membership in joined_invited_knocked:
|
||||
if prev_membership not in joined_invited_knocked:
|
||||
newly_joined_or_invited_or_knocked_users.add(delta.state_key)
|
||||
newly_left_users.discard(delta.state_key)
|
||||
elif prev_membership in joined_invited_knocked:
|
||||
newly_joined_or_invited_or_knocked_users.discard(delta.state_key)
|
||||
newly_left_users.add(delta.state_key)
|
||||
|
||||
# Now we actually calculate the device list entry with the information
|
||||
# calculated above.
|
||||
device_list_updates = await self.generate_sync_entry_for_device_list(
|
||||
user_id=user_id,
|
||||
since_token=from_token,
|
||||
now_token=now_token,
|
||||
joined_room_ids=joined_room_ids,
|
||||
newly_joined_rooms=newly_joined_rooms,
|
||||
newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
|
||||
newly_left_rooms=newly_left_rooms,
|
||||
newly_left_users=newly_left_users,
|
||||
)
|
||||
|
||||
# special-case for an empty prev state: include all members
|
||||
# in the changed list
|
||||
if not event_ids:
|
||||
log_kv(
|
||||
{"event": "encountered empty previous state", "room_id": room_id}
|
||||
)
|
||||
for etype, state_key in current_state_ids.keys():
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
possibly_changed.add(state_key)
|
||||
continue
|
||||
|
||||
current_member_id = current_state_ids.get((EventTypes.Member, user_id))
|
||||
if not current_member_id:
|
||||
continue
|
||||
|
||||
# mapping from event_id -> state_dict
|
||||
prev_state_ids = await self._state_storage.get_state_ids_for_events(
|
||||
event_ids,
|
||||
await_full_state=False,
|
||||
{
|
||||
"changed": device_list_updates.changed,
|
||||
"left": device_list_updates.left,
|
||||
}
|
||||
)
|
||||
|
||||
# Check if we've joined the room? If so we just blindly add all the users to
|
||||
# the "possibly changed" users.
|
||||
for state_dict in prev_state_ids.values():
|
||||
member_event = state_dict.get((EventTypes.Member, user_id), None)
|
||||
if not member_event or member_event != current_member_id:
|
||||
for etype, state_key in current_state_ids.keys():
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
possibly_changed.add(state_key)
|
||||
break
|
||||
return device_list_updates
|
||||
|
||||
# If there has been any change in membership, include them in the
|
||||
# possibly changed list. We'll check if they are joined below,
|
||||
# and we're not toooo worried about spuriously adding users.
|
||||
for key, event_id in current_state_ids.items():
|
||||
etype, state_key = key
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
@measure_func("_generate_sync_entry_for_device_list")
|
||||
async def generate_sync_entry_for_device_list(
|
||||
self,
|
||||
user_id: str,
|
||||
since_token: StreamToken,
|
||||
now_token: StreamToken,
|
||||
joined_room_ids: AbstractSet[str],
|
||||
newly_joined_rooms: AbstractSet[str],
|
||||
newly_joined_or_invited_or_knocked_users: AbstractSet[str],
|
||||
newly_left_rooms: AbstractSet[str],
|
||||
newly_left_users: AbstractSet[str],
|
||||
) -> DeviceListUpdates:
|
||||
"""Generate the DeviceListUpdates section of sync
|
||||
|
||||
# check if this member has changed since any of the extremities
|
||||
# at the stream_ordering, and add them to the list if so.
|
||||
for state_dict in prev_state_ids.values():
|
||||
prev_event_id = state_dict.get(key, None)
|
||||
if not prev_event_id or prev_event_id != event_id:
|
||||
if state_key != user_id:
|
||||
possibly_changed.add(state_key)
|
||||
break
|
||||
Args:
|
||||
sync_result_builder
|
||||
newly_joined_rooms: Set of rooms user has joined since previous sync
|
||||
newly_joined_or_invited_or_knocked_users: Set of users that have joined,
|
||||
been invited to a room or are knocking on a room since
|
||||
previous sync.
|
||||
newly_left_rooms: Set of rooms user has left since previous sync
|
||||
newly_left_users: Set of users that have left a room we're in since
|
||||
previous sync
|
||||
"""
|
||||
# Take a copy since these fields will be mutated later.
|
||||
newly_joined_or_invited_or_knocked_users = set(
|
||||
newly_joined_or_invited_or_knocked_users
|
||||
)
|
||||
newly_left_users = set(newly_left_users)
|
||||
|
||||
if possibly_changed or possibly_left:
|
||||
possibly_joined = possibly_changed
|
||||
possibly_left = possibly_changed | possibly_left
|
||||
# We want to figure out what user IDs the client should refetch
|
||||
# device keys for, and which users we aren't going to track changes
|
||||
# for anymore.
|
||||
#
|
||||
# For the first step we check:
|
||||
# a. if any users we share a room with have updated their devices,
|
||||
# and
|
||||
# b. we also check if we've joined any new rooms, or if a user has
|
||||
# joined a room we're in.
|
||||
#
|
||||
# For the second step we just find any users we no longer share a
|
||||
# room with by looking at all users that have left a room plus users
|
||||
# that were in a room we've left.
|
||||
|
||||
# Double check if we still share rooms with the given user.
|
||||
users_rooms = await self.store.get_rooms_for_users(possibly_left)
|
||||
for changed_user_id, entries in users_rooms.items():
|
||||
if any(rid in room_ids for rid in entries):
|
||||
possibly_left.discard(changed_user_id)
|
||||
else:
|
||||
possibly_joined.discard(changed_user_id)
|
||||
users_that_have_changed = set()
|
||||
|
||||
else:
|
||||
possibly_joined = set()
|
||||
possibly_left = set()
|
||||
# Step 1a, check for changes in devices of users we share a room
|
||||
# with
|
||||
users_that_have_changed = await self.get_device_changes_in_shared_rooms(
|
||||
user_id,
|
||||
joined_room_ids,
|
||||
from_token=since_token,
|
||||
now_token=now_token,
|
||||
)
|
||||
|
||||
result = {"changed": list(possibly_joined), "left": list(possibly_left)}
|
||||
# Step 1b, check for newly joined rooms
|
||||
for room_id in newly_joined_rooms:
|
||||
joined_users = await self.store.get_users_in_room(room_id)
|
||||
newly_joined_or_invited_or_knocked_users.update(joined_users)
|
||||
|
||||
log_kv(result)
|
||||
# TODO: Check that these users are actually new, i.e. either they
|
||||
# weren't in the previous sync *or* they left and rejoined.
|
||||
users_that_have_changed.update(newly_joined_or_invited_or_knocked_users)
|
||||
|
||||
return result
|
||||
user_signatures_changed = await self.store.get_users_whose_signatures_changed(
|
||||
user_id, since_token.device_list_key
|
||||
)
|
||||
users_that_have_changed.update(user_signatures_changed)
|
||||
|
||||
# Now find users that we no longer track
|
||||
for room_id in newly_left_rooms:
|
||||
left_users = await self.store.get_users_in_room(room_id)
|
||||
newly_left_users.update(left_users)
|
||||
|
||||
# Remove any users that we still share a room with.
|
||||
left_users_rooms = await self.store.get_rooms_for_users(newly_left_users)
|
||||
for user_id, entries in left_users_rooms.items():
|
||||
if any(rid in joined_room_ids for rid in entries):
|
||||
newly_left_users.discard(user_id)
|
||||
|
||||
return DeviceListUpdates(changed=users_that_have_changed, left=newly_left_users)
|
||||
|
||||
async def on_federation_query_user_devices(self, user_id: str) -> JsonDict:
|
||||
if not self.hs.is_mine(UserID.from_string(user_id)):
|
||||
|
||||
@@ -236,6 +236,13 @@ class DeviceMessageHandler:
|
||||
local_messages = {}
|
||||
remote_messages: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
|
||||
for user_id, by_device in messages.items():
|
||||
if not UserID.is_valid(user_id):
|
||||
logger.warning(
|
||||
"Ignoring attempt to send device message to invalid user: %r",
|
||||
user_id,
|
||||
)
|
||||
continue
|
||||
|
||||
# add an opentracing log entry for each message
|
||||
for device_id, message_content in by_device.items():
|
||||
log_kv(
|
||||
|
||||
@@ -35,6 +35,7 @@ from synapse.api.errors import CodeMessageException, Codes, NotFoundError, Synap
|
||||
from synapse.handlers.device import DeviceHandler
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
|
||||
from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
@@ -45,7 +46,10 @@ from synapse.types import (
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
from synapse.util.retryutils import (
|
||||
NotRetryingDestination,
|
||||
filter_destinations_by_retry_limiter,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -53,6 +57,9 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
ONE_TIME_KEY_UPLOAD = "one_time_key_upload_lock"
|
||||
|
||||
|
||||
class E2eKeysHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.config = hs.config
|
||||
@@ -62,6 +69,7 @@ class E2eKeysHandler:
|
||||
self._appservice_handler = hs.get_application_service_handler()
|
||||
self.is_mine = hs.is_mine
|
||||
self.clock = hs.get_clock()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
||||
@@ -82,6 +90,12 @@ class E2eKeysHandler:
|
||||
edu_updater.incoming_signing_key_update,
|
||||
)
|
||||
|
||||
self.device_key_uploader = self.upload_device_keys_for_user
|
||||
else:
|
||||
self.device_key_uploader = (
|
||||
ReplicationUploadKeysForUserRestServlet.make_client(hs)
|
||||
)
|
||||
|
||||
# doesn't really work as part of the generic query API, because the
|
||||
# query request requires an object POST, but we abuse the
|
||||
# "query handler" interface.
|
||||
@@ -145,6 +159,11 @@ class E2eKeysHandler:
|
||||
remote_queries = {}
|
||||
|
||||
for user_id, device_ids in device_keys_query.items():
|
||||
if not UserID.is_valid(user_id):
|
||||
# Ignore invalid user IDs, which is the same behaviour as if
|
||||
# the user existed but had no keys.
|
||||
continue
|
||||
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if self.is_mine(UserID.from_string(user_id)):
|
||||
local_query[user_id] = device_ids
|
||||
@@ -259,10 +278,8 @@ class E2eKeysHandler:
|
||||
"%d destinations to query devices for", len(remote_queries_not_in_cache)
|
||||
)
|
||||
|
||||
async def _query(
|
||||
destination_queries: Tuple[str, Dict[str, Iterable[str]]]
|
||||
) -> None:
|
||||
destination, queries = destination_queries
|
||||
async def _query(destination: str) -> None:
|
||||
queries = remote_queries_not_in_cache[destination]
|
||||
return await self._query_devices_for_destination(
|
||||
results,
|
||||
cross_signing_keys,
|
||||
@@ -272,9 +289,27 @@ class E2eKeysHandler:
|
||||
timeout,
|
||||
)
|
||||
|
||||
# Only try and fetch keys for destinations that are not marked as
|
||||
# down.
|
||||
unfiltered_destinations = remote_queries_not_in_cache.keys()
|
||||
filtered_destinations = set(
|
||||
await filter_destinations_by_retry_limiter(
|
||||
unfiltered_destinations,
|
||||
self.clock,
|
||||
self.store,
|
||||
# Let's give an arbitrary grace period for those hosts that are
|
||||
# only recently down
|
||||
retry_due_within_ms=60 * 1000,
|
||||
)
|
||||
)
|
||||
failures.update(
|
||||
(dest, _NOT_READY_FOR_RETRY_FAILURE)
|
||||
for dest in (unfiltered_destinations - filtered_destinations)
|
||||
)
|
||||
|
||||
await concurrently_execute(
|
||||
_query,
|
||||
remote_queries_not_in_cache.items(),
|
||||
filtered_destinations,
|
||||
10,
|
||||
delay_cancellation=True,
|
||||
)
|
||||
@@ -775,36 +810,17 @@ class E2eKeysHandler:
|
||||
"one_time_keys": A mapping from algorithm to number of keys for that
|
||||
algorithm, including those previously persisted.
|
||||
"""
|
||||
# This can only be called from the main process.
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
device_keys = keys.get("device_keys", None)
|
||||
if device_keys:
|
||||
logger.info(
|
||||
"Updating device_keys for device %r for user %s at %d",
|
||||
device_id,
|
||||
user_id,
|
||||
time_now,
|
||||
await self.device_key_uploader(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
keys={"device_keys": device_keys},
|
||||
)
|
||||
log_kv(
|
||||
{
|
||||
"message": "Updating device_keys for user.",
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
}
|
||||
)
|
||||
# TODO: Sign the JSON with the server key
|
||||
changed = await self.store.set_e2e_device_keys(
|
||||
user_id, device_id, time_now, device_keys
|
||||
)
|
||||
if changed:
|
||||
# Only notify about device updates *if* the keys actually changed
|
||||
await self.device_handler.notify_device_update(user_id, [device_id])
|
||||
else:
|
||||
log_kv({"message": "Not updating device_keys for user", "user_id": user_id})
|
||||
|
||||
one_time_keys = keys.get("one_time_keys", None)
|
||||
if one_time_keys:
|
||||
log_kv(
|
||||
@@ -840,6 +856,49 @@ class E2eKeysHandler:
|
||||
{"message": "Did not update fallback_keys", "reason": "no keys given"}
|
||||
)
|
||||
|
||||
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
|
||||
set_tag("one_time_key_counts", str(result))
|
||||
return {"one_time_key_counts": result}
|
||||
|
||||
@tag_args
|
||||
async def upload_device_keys_for_user(
|
||||
self, user_id: str, device_id: str, keys: JsonDict
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
user_id: user whose keys are being uploaded.
|
||||
device_id: device whose keys are being uploaded.
|
||||
device_keys: the `device_keys` of an /keys/upload request.
|
||||
|
||||
"""
|
||||
# This can only be called from the main process.
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
device_keys = keys["device_keys"]
|
||||
logger.info(
|
||||
"Updating device_keys for device %r for user %s at %d",
|
||||
device_id,
|
||||
user_id,
|
||||
time_now,
|
||||
)
|
||||
log_kv(
|
||||
{
|
||||
"message": "Updating device_keys for user.",
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
}
|
||||
)
|
||||
# TODO: Sign the JSON with the server key
|
||||
changed = await self.store.set_e2e_device_keys(
|
||||
user_id, device_id, time_now, device_keys
|
||||
)
|
||||
if changed:
|
||||
# Only notify about device updates *if* the keys actually changed
|
||||
await self.device_handler.notify_device_update(user_id, [device_id])
|
||||
|
||||
# the device should have been registered already, but it may have been
|
||||
# deleted due to a race with a DELETE request. Or we may be using an
|
||||
# old access_token without an associated device_id. Either way, we
|
||||
@@ -847,14 +906,15 @@ class E2eKeysHandler:
|
||||
# keys without a corresponding device.
|
||||
await self.device_handler.check_device_registered(user_id, device_id)
|
||||
|
||||
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
|
||||
set_tag("one_time_key_counts", str(result))
|
||||
return {"one_time_key_counts": result}
|
||||
|
||||
async def _upload_one_time_keys_for_user(
|
||||
self, user_id: str, device_id: str, time_now: int, one_time_keys: JsonDict
|
||||
) -> None:
|
||||
# We take out a lock so that we don't have to worry about a client
|
||||
# sending duplicate requests.
|
||||
lock_key = f"{user_id}_{device_id}"
|
||||
async with self._worker_lock_handler.acquire_lock(
|
||||
ONE_TIME_KEY_UPLOAD, lock_key
|
||||
):
|
||||
logger.info(
|
||||
"Adding one_time_keys %r for device %r for user %r at %d",
|
||||
one_time_keys.keys(),
|
||||
@@ -893,7 +953,9 @@ class E2eKeysHandler:
|
||||
)
|
||||
|
||||
log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
|
||||
await self.store.add_e2e_one_time_keys(user_id, device_id, time_now, new_keys)
|
||||
await self.store.add_e2e_one_time_keys(
|
||||
user_id, device_id, time_now, new_keys
|
||||
)
|
||||
|
||||
async def upload_signing_keys_for_user(
|
||||
self, user_id: str, keys: JsonDict
|
||||
@@ -1586,6 +1648,9 @@ def _check_device_signature(
|
||||
raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE)
|
||||
|
||||
|
||||
_NOT_READY_FOR_RETRY_FAILURE = {"status": 503, "message": "Not ready for retry"}
|
||||
|
||||
|
||||
def _exception_to_failure(e: Exception) -> JsonDict:
|
||||
if isinstance(e, SynapseError):
|
||||
return {"status": e.code, "errcode": e.errcode, "message": str(e)}
|
||||
@@ -1594,7 +1659,7 @@ def _exception_to_failure(e: Exception) -> JsonDict:
|
||||
return {"status": e.code, "message": str(e)}
|
||||
|
||||
if isinstance(e, NotRetryingDestination):
|
||||
return {"status": 503, "message": "Not ready for retry"}
|
||||
return _NOT_READY_FOR_RETRY_FAILURE
|
||||
|
||||
# include ConnectionRefused and other errors
|
||||
#
|
||||
|
||||
@@ -34,7 +34,7 @@ from synapse.api.errors import (
|
||||
from synapse.logging.opentracing import log_kv, trace
|
||||
from synapse.storage.databases.main.e2e_room_keys import RoomKey
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.async_helpers import ReadWriteLock
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -58,7 +58,7 @@ class E2eRoomKeysHandler:
|
||||
# clients belonging to a user will receive and try to upload a new session at
|
||||
# roughly the same time. Also used to lock out uploads when the key is being
|
||||
# changed.
|
||||
self._upload_linearizer = Linearizer("upload_room_keys_lock")
|
||||
self._upload_lock = ReadWriteLock()
|
||||
|
||||
@trace
|
||||
async def get_room_keys(
|
||||
@@ -89,7 +89,7 @@ class E2eRoomKeysHandler:
|
||||
|
||||
# we deliberately take the lock to get keys so that changing the version
|
||||
# works atomically
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.read(user_id):
|
||||
# make sure the backup version exists
|
||||
try:
|
||||
await self.store.get_e2e_room_keys_version_info(user_id, version)
|
||||
@@ -132,7 +132,7 @@ class E2eRoomKeysHandler:
|
||||
"""
|
||||
|
||||
# lock for consistency with uploading
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
# make sure the backup version exists
|
||||
try:
|
||||
version_info = await self.store.get_e2e_room_keys_version_info(
|
||||
@@ -193,7 +193,7 @@ class E2eRoomKeysHandler:
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
|
||||
# XXX: perhaps we should use a finer grained lock here?
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
# Check that the version we're trying to upload is the current version
|
||||
try:
|
||||
version_info = await self.store.get_e2e_room_keys_version_info(user_id)
|
||||
@@ -247,6 +247,12 @@ class E2eRoomKeysHandler:
|
||||
if current_room_key:
|
||||
if self._should_replace_room_key(current_room_key, room_key):
|
||||
log_kv({"message": "Replacing room key."})
|
||||
logger.debug(
|
||||
"Replacing room key. room=%s session=%s user=%s",
|
||||
room_id,
|
||||
session_id,
|
||||
user_id,
|
||||
)
|
||||
# updates are done one at a time in the DB, so send
|
||||
# updates right away rather than batching them up,
|
||||
# like we do with the inserts
|
||||
@@ -256,6 +262,12 @@ class E2eRoomKeysHandler:
|
||||
changed = True
|
||||
else:
|
||||
log_kv({"message": "Not replacing room_key."})
|
||||
logger.debug(
|
||||
"Not replacing room key. room=%s session=%s user=%s",
|
||||
room_id,
|
||||
session_id,
|
||||
user_id,
|
||||
)
|
||||
else:
|
||||
log_kv(
|
||||
{
|
||||
@@ -265,6 +277,12 @@ class E2eRoomKeysHandler:
|
||||
}
|
||||
)
|
||||
log_kv({"message": "Replacing room key."})
|
||||
logger.debug(
|
||||
"Inserting new room key. room=%s session=%s user=%s",
|
||||
room_id,
|
||||
session_id,
|
||||
user_id,
|
||||
)
|
||||
to_insert.append((room_id, session_id, room_key))
|
||||
changed = True
|
||||
|
||||
@@ -337,7 +355,7 @@ class E2eRoomKeysHandler:
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
|
||||
# lock everyone out until we've switched version
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
new_version = await self.store.create_e2e_room_keys_version(
|
||||
user_id, version_info
|
||||
)
|
||||
@@ -364,7 +382,7 @@ class E2eRoomKeysHandler:
|
||||
}
|
||||
"""
|
||||
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.read(user_id):
|
||||
try:
|
||||
res = await self.store.get_e2e_room_keys_version_info(user_id, version)
|
||||
except StoreError as e:
|
||||
@@ -389,7 +407,7 @@ class E2eRoomKeysHandler:
|
||||
NotFoundError: if this backup version doesn't exist
|
||||
"""
|
||||
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
try:
|
||||
await self.store.delete_e2e_room_keys_version(user_id, version)
|
||||
except StoreError as e:
|
||||
@@ -419,7 +437,7 @@ class E2eRoomKeysHandler:
|
||||
raise SynapseError(
|
||||
400, "Version in body does not match", Codes.INVALID_PARAM
|
||||
)
|
||||
async with self._upload_linearizer.queue(user_id):
|
||||
async with self._upload_lock.write(user_id):
|
||||
try:
|
||||
old_info = await self.store.get_e2e_room_keys_version_info(
|
||||
user_id, version
|
||||
|
||||
@@ -148,7 +148,6 @@ class EventHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastores().main
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._config = hs.config
|
||||
|
||||
async def get_event(
|
||||
self,
|
||||
@@ -194,7 +193,6 @@ class EventHandler:
|
||||
user.to_string(),
|
||||
[event],
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
if not filtered:
|
||||
|
||||
@@ -199,7 +199,7 @@ class InitialSyncHandler:
|
||||
)
|
||||
elif event.membership == Membership.LEAVE:
|
||||
room_end_token = RoomStreamToken(
|
||||
stream=event.stream_ordering,
|
||||
stream=event.event_pos.stream,
|
||||
)
|
||||
deferred_room_state = run_in_background(
|
||||
self._state_storage_controller.get_state_for_events,
|
||||
@@ -224,7 +224,6 @@ class InitialSyncHandler:
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
messages,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
@@ -383,7 +382,6 @@ class InitialSyncHandler:
|
||||
requester.user.to_string(),
|
||||
messages,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
@@ -498,7 +496,6 @@ class InitialSyncHandler:
|
||||
requester.user.to_string(),
|
||||
messages,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
|
||||
|
||||
@@ -201,7 +201,7 @@ class MessageHandler:
|
||||
|
||||
if at_token:
|
||||
last_event_id = (
|
||||
await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
await self.store.get_last_event_id_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=at_token.room_key,
|
||||
)
|
||||
@@ -496,13 +496,6 @@ class EventCreationHandler:
|
||||
|
||||
self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state
|
||||
|
||||
self.membership_types_to_include_profile_data_in = {
|
||||
Membership.JOIN,
|
||||
Membership.KNOCK,
|
||||
}
|
||||
if self.hs.config.server.include_profile_data_on_invite:
|
||||
self.membership_types_to_include_profile_data_in.add(Membership.INVITE)
|
||||
|
||||
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
|
||||
self.send_events = ReplicationSendEventsRestServlet.make_client(hs)
|
||||
|
||||
@@ -594,8 +587,6 @@ class EventCreationHandler:
|
||||
Creates an FrozenEvent object, filling out auth_events, prev_events,
|
||||
etc.
|
||||
|
||||
Adds display names to Join membership events.
|
||||
|
||||
Args:
|
||||
requester
|
||||
event_dict: An entire event
|
||||
@@ -651,6 +642,17 @@ class EventCreationHandler:
|
||||
"""
|
||||
await self.auth_blocking.check_auth_blocking(requester=requester)
|
||||
|
||||
if event_dict["type"] == EventTypes.Message:
|
||||
requester_suspended = await self.store.get_user_suspended_status(
|
||||
requester.user.to_string()
|
||||
)
|
||||
if requester_suspended:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Sending messages while account is suspended is not allowed.",
|
||||
Codes.USER_ACCOUNT_SUSPENDED,
|
||||
)
|
||||
|
||||
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
|
||||
room_version_id = event_dict["content"]["room_version"]
|
||||
maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
||||
@@ -672,29 +674,6 @@ class EventCreationHandler:
|
||||
|
||||
self.validator.validate_builder(builder)
|
||||
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
target = UserID.from_string(builder.state_key)
|
||||
|
||||
if membership in self.membership_types_to_include_profile_data_in:
|
||||
# If event doesn't include a display name, add one.
|
||||
profile = self.profile_handler
|
||||
content = builder.content
|
||||
|
||||
try:
|
||||
if "displayname" not in content:
|
||||
displayname = await profile.get_displayname(target)
|
||||
if displayname is not None:
|
||||
content["displayname"] = displayname
|
||||
if "avatar_url" not in content:
|
||||
avatar_url = await profile.get_avatar_url(target)
|
||||
if avatar_url is not None:
|
||||
content["avatar_url"] = avatar_url
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information for %r: %s", target, e
|
||||
)
|
||||
|
||||
is_exempt = await self._is_exempt_from_privacy_policy(builder, requester)
|
||||
if require_consent and not is_exempt:
|
||||
await self.assert_accepted_privacy_policy(requester)
|
||||
@@ -1583,6 +1562,7 @@ class EventCreationHandler:
|
||||
# stream_ordering entry manually (as it was persisted on
|
||||
# another worker).
|
||||
event.internal_metadata.stream_ordering = stream_id
|
||||
event.internal_metadata.instance_name = writer_instance
|
||||
|
||||
return event
|
||||
|
||||
|
||||
@@ -27,7 +27,6 @@ from synapse.api.constants import Direction, EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.events.utils import SerializeEventConfig
|
||||
from synapse.handlers.room import ShutdownRoomParams, ShutdownRoomResponse
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -41,6 +40,7 @@ from synapse.types import (
|
||||
StreamKeyType,
|
||||
TaskStatus,
|
||||
)
|
||||
from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util.async_helpers import ReadWriteLock
|
||||
from synapse.visibility import filter_events_for_client
|
||||
@@ -507,7 +507,8 @@ class PaginationHandler:
|
||||
|
||||
# Initially fetch the events from the database. With any luck, we can return
|
||||
# these without blocking on backfill (handled below).
|
||||
events, next_key = await self.store.paginate_room_events(
|
||||
events, next_key = (
|
||||
await self.store.paginate_room_events_by_topological_ordering(
|
||||
room_id=room_id,
|
||||
from_key=from_token.room_key,
|
||||
to_key=to_room_key,
|
||||
@@ -515,6 +516,7 @@ class PaginationHandler:
|
||||
limit=pagin_config.limit,
|
||||
event_filter=event_filter,
|
||||
)
|
||||
)
|
||||
|
||||
if pagin_config.direction == Direction.BACKWARDS:
|
||||
# We use a `Set` because there can be multiple events at a given depth
|
||||
@@ -582,7 +584,8 @@ class PaginationHandler:
|
||||
# If we did backfill something, refetch the events from the database to
|
||||
# catch anything new that might have been added since we last fetched.
|
||||
if did_backfill:
|
||||
events, next_key = await self.store.paginate_room_events(
|
||||
events, next_key = (
|
||||
await self.store.paginate_room_events_by_topological_ordering(
|
||||
room_id=room_id,
|
||||
from_key=from_token.room_key,
|
||||
to_key=to_room_key,
|
||||
@@ -590,6 +593,7 @@ class PaginationHandler:
|
||||
limit=pagin_config.limit,
|
||||
event_filter=event_filter,
|
||||
)
|
||||
)
|
||||
else:
|
||||
# Otherwise, we can backfill in the background for eventual
|
||||
# consistency's sake but we don't need to block the client waiting
|
||||
@@ -623,7 +627,6 @@ class PaginationHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
# if after the filter applied there are no more events
|
||||
|
||||
@@ -74,6 +74,17 @@ class ProfileHandler:
|
||||
self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
|
||||
|
||||
async def get_profile(self, user_id: str, ignore_backoff: bool = True) -> JsonDict:
|
||||
"""
|
||||
Get a user's profile as a JSON dictionary.
|
||||
|
||||
Args:
|
||||
user_id: The user to fetch the profile of.
|
||||
ignore_backoff: True to ignore backoff when fetching over federation.
|
||||
|
||||
Returns:
|
||||
A JSON dictionary. For local queries this will include the displayname and avatar_url
|
||||
fields. For remote queries it may contain arbitrary information.
|
||||
"""
|
||||
target_user = UserID.from_string(user_id)
|
||||
|
||||
if self.hs.is_mine(target_user):
|
||||
@@ -107,6 +118,15 @@ class ProfileHandler:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
async def get_displayname(self, target_user: UserID) -> Optional[str]:
|
||||
"""
|
||||
Fetch a user's display name from their profile.
|
||||
|
||||
Args:
|
||||
target_user: The user to fetch the display name of.
|
||||
|
||||
Returns:
|
||||
The user's display name or None if unset.
|
||||
"""
|
||||
if self.hs.is_mine(target_user):
|
||||
try:
|
||||
displayname = await self.store.get_profile_displayname(target_user)
|
||||
@@ -203,6 +223,15 @@ class ProfileHandler:
|
||||
await self._update_join_states(requester, target_user)
|
||||
|
||||
async def get_avatar_url(self, target_user: UserID) -> Optional[str]:
|
||||
"""
|
||||
Fetch a user's avatar URL from their profile.
|
||||
|
||||
Args:
|
||||
target_user: The user to fetch the avatar URL of.
|
||||
|
||||
Returns:
|
||||
The user's avatar URL or None if unset.
|
||||
"""
|
||||
if self.hs.is_mine(target_user):
|
||||
try:
|
||||
avatar_url = await self.store.get_profile_avatar_url(target_user)
|
||||
@@ -403,6 +432,12 @@ class ProfileHandler:
|
||||
async def _update_join_states(
|
||||
self, requester: Requester, target_user: UserID
|
||||
) -> None:
|
||||
"""
|
||||
Update the membership events of each room the user is joined to with the
|
||||
new profile information.
|
||||
|
||||
Note that this stomps over any custom display name or avatar URL in member events.
|
||||
"""
|
||||
if not self.hs.is_mine(target_user):
|
||||
return
|
||||
|
||||
|
||||
@@ -286,7 +286,13 @@ class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]):
|
||||
room_ids: Iterable[str],
|
||||
is_guest: bool,
|
||||
explicit_room_id: Optional[str] = None,
|
||||
to_key: Optional[MultiWriterStreamToken] = None,
|
||||
) -> Tuple[List[JsonMapping], MultiWriterStreamToken]:
|
||||
"""
|
||||
Find read receipts for given rooms (> `from_token` and <= `to_token`)
|
||||
"""
|
||||
|
||||
if to_key is None:
|
||||
to_key = self.get_current_key()
|
||||
|
||||
if from_key == to_key:
|
||||
|
||||
@@ -590,7 +590,7 @@ class RegistrationHandler:
|
||||
# moving away from bare excepts is a good thing to do.
|
||||
logger.error("Failed to join new user to %r: %r", r, e)
|
||||
except Exception as e:
|
||||
logger.error("Failed to join new user to %r: %r", r, e)
|
||||
logger.error("Failed to join new user to %r: %r", r, e, exc_info=True)
|
||||
|
||||
async def _auto_join_rooms(self, user_id: str) -> None:
|
||||
"""Automatically joins users to auto join rooms - creating the room in the first place
|
||||
|
||||
@@ -95,7 +95,6 @@ class RelationsHandler:
|
||||
self._event_handler = hs.get_event_handler()
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self._event_creation_handler = hs.get_event_creation_handler()
|
||||
self._config = hs.config
|
||||
|
||||
async def get_relations(
|
||||
self,
|
||||
@@ -164,7 +163,6 @@ class RelationsHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
# The relations returned for the requested event do include their
|
||||
@@ -393,9 +391,9 @@ class RelationsHandler:
|
||||
|
||||
# Attempt to find another event to use as the latest event.
|
||||
potential_events, _ = await self._main_store.get_relations_for_event(
|
||||
room_id,
|
||||
event_id,
|
||||
event,
|
||||
room_id,
|
||||
RelationTypes.THREAD,
|
||||
direction=Direction.FORWARDS,
|
||||
)
|
||||
@@ -610,7 +608,6 @@ class RelationsHandler:
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
aggregations = await self.get_bundled_aggregations(
|
||||
|
||||
@@ -40,7 +40,6 @@ from typing import (
|
||||
)
|
||||
|
||||
import attr
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
import synapse.events.snapshot
|
||||
from synapse.api.constants import (
|
||||
@@ -88,6 +87,7 @@ from synapse.types import (
|
||||
UserID,
|
||||
create_requester,
|
||||
)
|
||||
from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
@@ -1188,6 +1188,8 @@ class RoomCreationHandler:
|
||||
)
|
||||
events_to_send.append((power_event, power_context))
|
||||
else:
|
||||
# Please update the docs for `default_power_level_content_override` when
|
||||
# updating the `events` dict below
|
||||
power_level_content: JsonDict = {
|
||||
"users": {creator_id: 100},
|
||||
"users_default": 0,
|
||||
@@ -1476,7 +1478,6 @@ class RoomContextHandler:
|
||||
user.to_string(),
|
||||
events,
|
||||
is_peeking=is_peeking,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
event = await self.store.get_event(
|
||||
@@ -1749,7 +1750,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
|
||||
from_key=from_key,
|
||||
to_key=to_key,
|
||||
limit=limit or 10,
|
||||
order="ASC",
|
||||
direction=Direction.FORWARDS,
|
||||
)
|
||||
|
||||
events = list(room_events)
|
||||
@@ -1780,63 +1781,6 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
|
||||
return self.store.get_current_room_stream_token_for_room_id(room_id)
|
||||
|
||||
|
||||
class ShutdownRoomParams(TypedDict):
|
||||
"""
|
||||
Attributes:
|
||||
requester_user_id:
|
||||
User who requested the action. Will be recorded as putting the room on the
|
||||
blocking list.
|
||||
new_room_user_id:
|
||||
If set, a new room will be created with this user ID
|
||||
as the creator and admin, and all users in the old room will be
|
||||
moved into that room. If not set, no new room will be created
|
||||
and the users will just be removed from the old room.
|
||||
new_room_name:
|
||||
A string representing the name of the room that new users will
|
||||
be invited to. Defaults to `Content Violation Notification`
|
||||
message:
|
||||
A string containing the first message that will be sent as
|
||||
`new_room_user_id` in the new room. Ideally this will clearly
|
||||
convey why the original room was shut down.
|
||||
Defaults to `Sharing illegal content on this server is not
|
||||
permitted and rooms in violation will be blocked.`
|
||||
block:
|
||||
If set to `true`, this room will be added to a blocking list,
|
||||
preventing future attempts to join the room. Defaults to `false`.
|
||||
purge:
|
||||
If set to `true`, purge the given room from the database.
|
||||
force_purge:
|
||||
If set to `true`, the room will be purged from database
|
||||
even if there are still users joined to the room.
|
||||
"""
|
||||
|
||||
requester_user_id: Optional[str]
|
||||
new_room_user_id: Optional[str]
|
||||
new_room_name: Optional[str]
|
||||
message: Optional[str]
|
||||
block: bool
|
||||
purge: bool
|
||||
force_purge: bool
|
||||
|
||||
|
||||
class ShutdownRoomResponse(TypedDict):
|
||||
"""
|
||||
Attributes:
|
||||
kicked_users: An array of users (`user_id`) that were kicked.
|
||||
failed_to_kick_users:
|
||||
An array of users (`user_id`) that that were not kicked.
|
||||
local_aliases:
|
||||
An array of strings representing the local aliases that were
|
||||
migrated from the old room to the new.
|
||||
new_room_id: A string representing the room ID of the new room.
|
||||
"""
|
||||
|
||||
kicked_users: List[str]
|
||||
failed_to_kick_users: List[str]
|
||||
local_aliases: List[str]
|
||||
new_room_id: Optional[str]
|
||||
|
||||
|
||||
class RoomShutdownHandler:
|
||||
DEFAULT_MESSAGE = (
|
||||
"Sharing illegal content on this server is not permitted and rooms in"
|
||||
|
||||
@@ -106,6 +106,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
self.event_auth_handler = hs.get_event_auth_handler()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
|
||||
self._membership_types_to_include_profile_data_in = {
|
||||
Membership.JOIN,
|
||||
Membership.KNOCK,
|
||||
}
|
||||
if self.hs.config.server.include_profile_data_on_invite:
|
||||
self._membership_types_to_include_profile_data_in.add(Membership.INVITE)
|
||||
|
||||
self.member_linearizer: Linearizer = Linearizer(name="member")
|
||||
self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter")
|
||||
|
||||
@@ -785,9 +792,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
if (
|
||||
not self.allow_per_room_profiles and not is_requester_server_notices_user
|
||||
) or requester.shadow_banned:
|
||||
# Strip profile data, knowing that new profile data will be added to the
|
||||
# event's content in event_creation_handler.create_event() using the target's
|
||||
# global profile.
|
||||
# Strip profile data, knowing that new profile data will be added to
|
||||
# the event's content below using the target's global profile.
|
||||
content.pop("displayname", None)
|
||||
content.pop("avatar_url", None)
|
||||
|
||||
@@ -823,6 +829,29 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
if action in ["kick", "unban"]:
|
||||
effective_membership_state = "leave"
|
||||
|
||||
if effective_membership_state not in Membership.LIST:
|
||||
raise SynapseError(400, "Invalid membership key")
|
||||
|
||||
# Add profile data for joins etc, if no per-room profile.
|
||||
if (
|
||||
effective_membership_state
|
||||
in self._membership_types_to_include_profile_data_in
|
||||
):
|
||||
# If event doesn't include a display name, add one.
|
||||
profile = self.profile_handler
|
||||
|
||||
try:
|
||||
if "displayname" not in content:
|
||||
displayname = await profile.get_displayname(target)
|
||||
if displayname is not None:
|
||||
content["displayname"] = displayname
|
||||
if "avatar_url" not in content:
|
||||
avatar_url = await profile.get_avatar_url(target)
|
||||
if avatar_url is not None:
|
||||
content["avatar_url"] = avatar_url
|
||||
except Exception as e:
|
||||
logger.info("Failed to get profile information for %r: %s", target, e)
|
||||
|
||||
# if this is a join with a 3pid signature, we may need to turn a 3pid
|
||||
# invite into a normal invite before we can handle the join.
|
||||
if third_party_signed is not None:
|
||||
|
||||
@@ -183,8 +183,29 @@ class RoomSummaryHandler:
|
||||
) -> JsonDict:
|
||||
"""See docstring for SpaceSummaryHandler.get_room_hierarchy."""
|
||||
|
||||
# First of all, check that the room is accessible.
|
||||
if not await self._is_local_room_accessible(requested_room_id, requester):
|
||||
# If the room is available locally, quickly check that the user can access it.
|
||||
local_room = await self._store.is_host_joined(
|
||||
requested_room_id, self._server_name
|
||||
)
|
||||
if local_room and not await self._is_local_room_accessible(
|
||||
requested_room_id, requester
|
||||
):
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"User %s not in room %s, and room previews are disabled"
|
||||
% (requester, requested_room_id),
|
||||
errcode=Codes.NOT_JOINED,
|
||||
)
|
||||
|
||||
if not local_room:
|
||||
room_hierarchy = await self._summarize_remote_room_hierarchy(
|
||||
_RoomQueueEntry(requested_room_id, ()),
|
||||
False,
|
||||
)
|
||||
root_room_entry = room_hierarchy[0]
|
||||
if not root_room_entry or not await self._is_remote_room_accessible(
|
||||
requester, requested_room_id, root_room_entry.room
|
||||
):
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"User %s not in room %s, and room previews are disabled"
|
||||
|
||||
@@ -483,7 +483,6 @@ class SearchHandler:
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
filtered_events,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
events.sort(key=lambda e: -rank_map[e.event_id])
|
||||
@@ -585,7 +584,6 @@ class SearchHandler:
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
filtered_events,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
room_events.extend(events)
|
||||
@@ -673,14 +671,12 @@ class SearchHandler:
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
res.events_before,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
events_after = await filter_events_for_client(
|
||||
self._storage_controllers,
|
||||
user.to_string(),
|
||||
res.events_after,
|
||||
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
|
||||
)
|
||||
|
||||
context: JsonDict = {
|
||||
|
||||
1079
synapse/handlers/sliding_sync/__init__.py
Normal file
1079
synapse/handlers/sliding_sync/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
699
synapse/handlers/sliding_sync/extensions.py
Normal file
699
synapse/handlers/sliding_sync/extensions.py
Normal file
@@ -0,0 +1,699 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2023 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, AbstractSet, Dict, Mapping, Optional, Sequence, Set
|
||||
|
||||
from typing_extensions import assert_never
|
||||
|
||||
from synapse.api.constants import AccountDataTypes, EduTypes
|
||||
from synapse.handlers.receipts import ReceiptEventSource
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.storage.databases.main.receipts import ReceiptInRoom
|
||||
from synapse.types import (
|
||||
DeviceListUpdates,
|
||||
JsonMapping,
|
||||
MultiWriterStreamToken,
|
||||
SlidingSyncStreamToken,
|
||||
StrCollection,
|
||||
StreamToken,
|
||||
)
|
||||
from synapse.types.handlers.sliding_sync import (
|
||||
HaveSentRoomFlag,
|
||||
MutablePerConnectionState,
|
||||
OperationType,
|
||||
PerConnectionState,
|
||||
SlidingSyncConfig,
|
||||
SlidingSyncResult,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlidingSyncExtensionHandler:
|
||||
"""Handles the extensions to sliding sync."""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastores().main
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.push_rules_handler = hs.get_push_rules_handler()
|
||||
|
||||
@trace
|
||||
async def get_extensions_response(
|
||||
self,
|
||||
sync_config: SlidingSyncConfig,
|
||||
previous_connection_state: "PerConnectionState",
|
||||
new_connection_state: "MutablePerConnectionState",
|
||||
actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
|
||||
actual_room_ids: Set[str],
|
||||
actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
|
||||
to_token: StreamToken,
|
||||
from_token: Optional[SlidingSyncStreamToken],
|
||||
) -> SlidingSyncResult.Extensions:
|
||||
"""Handle extension requests.
|
||||
|
||||
Args:
|
||||
sync_config: Sync configuration
|
||||
new_connection_state: Snapshot of the current per-connection state
|
||||
new_per_connection_state: A mutable copy of the per-connection
|
||||
state, used to record updates to the state during this request.
|
||||
actual_lists: Sliding window API. A map of list key to list results in the
|
||||
Sliding Sync response.
|
||||
actual_room_ids: The actual room IDs in the the Sliding Sync response.
|
||||
actual_room_response_map: A map of room ID to room results in the the
|
||||
Sliding Sync response.
|
||||
to_token: The point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from.
|
||||
"""
|
||||
|
||||
if sync_config.extensions is None:
|
||||
return SlidingSyncResult.Extensions()
|
||||
|
||||
to_device_response = None
|
||||
if sync_config.extensions.to_device is not None:
|
||||
to_device_response = await self.get_to_device_extension_response(
|
||||
sync_config=sync_config,
|
||||
to_device_request=sync_config.extensions.to_device,
|
||||
to_token=to_token,
|
||||
)
|
||||
|
||||
e2ee_response = None
|
||||
if sync_config.extensions.e2ee is not None:
|
||||
e2ee_response = await self.get_e2ee_extension_response(
|
||||
sync_config=sync_config,
|
||||
e2ee_request=sync_config.extensions.e2ee,
|
||||
to_token=to_token,
|
||||
from_token=from_token,
|
||||
)
|
||||
|
||||
account_data_response = None
|
||||
if sync_config.extensions.account_data is not None:
|
||||
account_data_response = await self.get_account_data_extension_response(
|
||||
sync_config=sync_config,
|
||||
actual_lists=actual_lists,
|
||||
actual_room_ids=actual_room_ids,
|
||||
account_data_request=sync_config.extensions.account_data,
|
||||
to_token=to_token,
|
||||
from_token=from_token,
|
||||
)
|
||||
|
||||
receipts_response = None
|
||||
if sync_config.extensions.receipts is not None:
|
||||
receipts_response = await self.get_receipts_extension_response(
|
||||
sync_config=sync_config,
|
||||
previous_connection_state=previous_connection_state,
|
||||
new_connection_state=new_connection_state,
|
||||
actual_lists=actual_lists,
|
||||
actual_room_ids=actual_room_ids,
|
||||
actual_room_response_map=actual_room_response_map,
|
||||
receipts_request=sync_config.extensions.receipts,
|
||||
to_token=to_token,
|
||||
from_token=from_token,
|
||||
)
|
||||
|
||||
typing_response = None
|
||||
if sync_config.extensions.typing is not None:
|
||||
typing_response = await self.get_typing_extension_response(
|
||||
sync_config=sync_config,
|
||||
actual_lists=actual_lists,
|
||||
actual_room_ids=actual_room_ids,
|
||||
actual_room_response_map=actual_room_response_map,
|
||||
typing_request=sync_config.extensions.typing,
|
||||
to_token=to_token,
|
||||
from_token=from_token,
|
||||
)
|
||||
|
||||
return SlidingSyncResult.Extensions(
|
||||
to_device=to_device_response,
|
||||
e2ee=e2ee_response,
|
||||
account_data=account_data_response,
|
||||
receipts=receipts_response,
|
||||
typing=typing_response,
|
||||
)
|
||||
|
||||
def find_relevant_room_ids_for_extension(
|
||||
self,
|
||||
requested_lists: Optional[StrCollection],
|
||||
requested_room_ids: Optional[StrCollection],
|
||||
actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
|
||||
actual_room_ids: AbstractSet[str],
|
||||
) -> Set[str]:
|
||||
"""
|
||||
Handle the reserved `lists`/`rooms` keys for extensions. Extensions should only
|
||||
return results for rooms in the Sliding Sync response. This matches up the
|
||||
requested rooms/lists with the actual lists/rooms in the Sliding Sync response.
|
||||
|
||||
{"lists": []} // Do not process any lists.
|
||||
{"lists": ["rooms", "dms"]} // Process only a subset of lists.
|
||||
{"lists": ["*"]} // Process all lists defined in the Sliding Window API. (This is the default.)
|
||||
|
||||
{"rooms": []} // Do not process any specific rooms.
|
||||
{"rooms": ["!a:b", "!c:d"]} // Process only a subset of room subscriptions.
|
||||
{"rooms": ["*"]} // Process all room subscriptions defined in the Room Subscription API. (This is the default.)
|
||||
|
||||
Args:
|
||||
requested_lists: The `lists` from the extension request.
|
||||
requested_room_ids: The `rooms` from the extension request.
|
||||
actual_lists: The actual lists from the Sliding Sync response.
|
||||
actual_room_ids: The actual room subscriptions from the Sliding Sync request.
|
||||
"""
|
||||
|
||||
# We only want to include account data for rooms that are already in the sliding
|
||||
# sync response AND that were requested in the account data request.
|
||||
relevant_room_ids: Set[str] = set()
|
||||
|
||||
# See what rooms from the room subscriptions we should get account data for
|
||||
if requested_room_ids is not None:
|
||||
for room_id in requested_room_ids:
|
||||
# A wildcard means we process all rooms from the room subscriptions
|
||||
if room_id == "*":
|
||||
relevant_room_ids.update(actual_room_ids)
|
||||
break
|
||||
|
||||
if room_id in actual_room_ids:
|
||||
relevant_room_ids.add(room_id)
|
||||
|
||||
# See what rooms from the sliding window lists we should get account data for
|
||||
if requested_lists is not None:
|
||||
for list_key in requested_lists:
|
||||
# Just some typing because we share the variable name in multiple places
|
||||
actual_list: Optional[SlidingSyncResult.SlidingWindowList] = None
|
||||
|
||||
# A wildcard means we process rooms from all lists
|
||||
if list_key == "*":
|
||||
for actual_list in actual_lists.values():
|
||||
# We only expect a single SYNC operation for any list
|
||||
assert len(actual_list.ops) == 1
|
||||
sync_op = actual_list.ops[0]
|
||||
assert sync_op.op == OperationType.SYNC
|
||||
|
||||
relevant_room_ids.update(sync_op.room_ids)
|
||||
|
||||
break
|
||||
|
||||
actual_list = actual_lists.get(list_key)
|
||||
if actual_list is not None:
|
||||
# We only expect a single SYNC operation for any list
|
||||
assert len(actual_list.ops) == 1
|
||||
sync_op = actual_list.ops[0]
|
||||
assert sync_op.op == OperationType.SYNC
|
||||
|
||||
relevant_room_ids.update(sync_op.room_ids)
|
||||
|
||||
return relevant_room_ids
|
||||
|
||||
@trace
|
||||
async def get_to_device_extension_response(
|
||||
self,
|
||||
sync_config: SlidingSyncConfig,
|
||||
to_device_request: SlidingSyncConfig.Extensions.ToDeviceExtension,
|
||||
to_token: StreamToken,
|
||||
) -> Optional[SlidingSyncResult.Extensions.ToDeviceExtension]:
|
||||
"""Handle to-device extension (MSC3885)
|
||||
|
||||
Args:
|
||||
sync_config: Sync configuration
|
||||
to_device_request: The to-device extension from the request
|
||||
to_token: The point in the stream to sync up to.
|
||||
"""
|
||||
user_id = sync_config.user.to_string()
|
||||
device_id = sync_config.requester.device_id
|
||||
|
||||
# Skip if the extension is not enabled
|
||||
if not to_device_request.enabled:
|
||||
return None
|
||||
|
||||
# Check that this request has a valid device ID (not all requests have
|
||||
# to belong to a device, and so device_id is None)
|
||||
if device_id is None:
|
||||
return SlidingSyncResult.Extensions.ToDeviceExtension(
|
||||
next_batch=f"{to_token.to_device_key}",
|
||||
events=[],
|
||||
)
|
||||
|
||||
since_stream_id = 0
|
||||
if to_device_request.since is not None:
|
||||
# We've already validated this is an int.
|
||||
since_stream_id = int(to_device_request.since)
|
||||
|
||||
if to_token.to_device_key < since_stream_id:
|
||||
# The since token is ahead of our current token, so we return an
|
||||
# empty response.
|
||||
logger.warning(
|
||||
"Got to-device.since from the future. since token: %r is ahead of our current to_device stream position: %r",
|
||||
since_stream_id,
|
||||
to_token.to_device_key,
|
||||
)
|
||||
return SlidingSyncResult.Extensions.ToDeviceExtension(
|
||||
next_batch=to_device_request.since,
|
||||
events=[],
|
||||
)
|
||||
|
||||
# Delete everything before the given since token, as we know the
|
||||
# device must have received them.
|
||||
deleted = await self.store.delete_messages_for_device(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
up_to_stream_id=since_stream_id,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"Deleted %d to-device messages up to %d for %s",
|
||||
deleted,
|
||||
since_stream_id,
|
||||
user_id,
|
||||
)
|
||||
|
||||
messages, stream_id = await self.store.get_messages_for_device(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
from_stream_id=since_stream_id,
|
||||
to_stream_id=to_token.to_device_key,
|
||||
limit=min(to_device_request.limit, 100), # Limit to at most 100 events
|
||||
)
|
||||
|
||||
return SlidingSyncResult.Extensions.ToDeviceExtension(
|
||||
next_batch=f"{stream_id}",
|
||||
events=messages,
|
||||
)
|
||||
|
||||
@trace
|
||||
async def get_e2ee_extension_response(
|
||||
self,
|
||||
sync_config: SlidingSyncConfig,
|
||||
e2ee_request: SlidingSyncConfig.Extensions.E2eeExtension,
|
||||
to_token: StreamToken,
|
||||
from_token: Optional[SlidingSyncStreamToken],
|
||||
) -> Optional[SlidingSyncResult.Extensions.E2eeExtension]:
|
||||
"""Handle E2EE device extension (MSC3884)
|
||||
|
||||
Args:
|
||||
sync_config: Sync configuration
|
||||
e2ee_request: The e2ee extension from the request
|
||||
to_token: The point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from.
|
||||
"""
|
||||
user_id = sync_config.user.to_string()
|
||||
device_id = sync_config.requester.device_id
|
||||
|
||||
# Skip if the extension is not enabled
|
||||
if not e2ee_request.enabled:
|
||||
return None
|
||||
|
||||
device_list_updates: Optional[DeviceListUpdates] = None
|
||||
if from_token is not None:
|
||||
# TODO: This should take into account the `from_token` and `to_token`
|
||||
device_list_updates = await self.device_handler.get_user_ids_changed(
|
||||
user_id=user_id,
|
||||
from_token=from_token.stream_token,
|
||||
)
|
||||
|
||||
device_one_time_keys_count: Mapping[str, int] = {}
|
||||
device_unused_fallback_key_types: Sequence[str] = []
|
||||
if device_id:
|
||||
# TODO: We should have a way to let clients differentiate between the states of:
|
||||
# * no change in OTK count since the provided since token
|
||||
# * the server has zero OTKs left for this device
|
||||
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
|
||||
device_one_time_keys_count = await self.store.count_e2e_one_time_keys(
|
||||
user_id, device_id
|
||||
)
|
||||
device_unused_fallback_key_types = (
|
||||
await self.store.get_e2e_unused_fallback_key_types(user_id, device_id)
|
||||
)
|
||||
|
||||
return SlidingSyncResult.Extensions.E2eeExtension(
|
||||
device_list_updates=device_list_updates,
|
||||
device_one_time_keys_count=device_one_time_keys_count,
|
||||
device_unused_fallback_key_types=device_unused_fallback_key_types,
|
||||
)
|
||||
|
||||
@trace
|
||||
async def get_account_data_extension_response(
|
||||
self,
|
||||
sync_config: SlidingSyncConfig,
|
||||
actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
|
||||
actual_room_ids: Set[str],
|
||||
account_data_request: SlidingSyncConfig.Extensions.AccountDataExtension,
|
||||
to_token: StreamToken,
|
||||
from_token: Optional[SlidingSyncStreamToken],
|
||||
) -> Optional[SlidingSyncResult.Extensions.AccountDataExtension]:
|
||||
"""Handle Account Data extension (MSC3959)
|
||||
|
||||
Args:
|
||||
sync_config: Sync configuration
|
||||
actual_lists: Sliding window API. A map of list key to list results in the
|
||||
Sliding Sync response.
|
||||
actual_room_ids: The actual room IDs in the the Sliding Sync response.
|
||||
account_data_request: The account_data extension from the request
|
||||
to_token: The point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from.
|
||||
"""
|
||||
user_id = sync_config.user.to_string()
|
||||
|
||||
# Skip if the extension is not enabled
|
||||
if not account_data_request.enabled:
|
||||
return None
|
||||
|
||||
global_account_data_map: Mapping[str, JsonMapping] = {}
|
||||
if from_token is not None:
|
||||
# TODO: This should take into account the `from_token` and `to_token`
|
||||
global_account_data_map = (
|
||||
await self.store.get_updated_global_account_data_for_user(
|
||||
user_id, from_token.stream_token.account_data_key
|
||||
)
|
||||
)
|
||||
|
||||
have_push_rules_changed = await self.store.have_push_rules_changed_for_user(
|
||||
user_id, from_token.stream_token.push_rules_key
|
||||
)
|
||||
if have_push_rules_changed:
|
||||
global_account_data_map = dict(global_account_data_map)
|
||||
# TODO: This should take into account the `from_token` and `to_token`
|
||||
global_account_data_map[AccountDataTypes.PUSH_RULES] = (
|
||||
await self.push_rules_handler.push_rules_for_user(sync_config.user)
|
||||
)
|
||||
else:
|
||||
# TODO: This should take into account the `to_token`
|
||||
all_global_account_data = await self.store.get_global_account_data_for_user(
|
||||
user_id
|
||||
)
|
||||
|
||||
global_account_data_map = dict(all_global_account_data)
|
||||
# TODO: This should take into account the `to_token`
|
||||
global_account_data_map[AccountDataTypes.PUSH_RULES] = (
|
||||
await self.push_rules_handler.push_rules_for_user(sync_config.user)
|
||||
)
|
||||
|
||||
# Fetch room account data
|
||||
account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] = {}
|
||||
relevant_room_ids = self.find_relevant_room_ids_for_extension(
|
||||
requested_lists=account_data_request.lists,
|
||||
requested_room_ids=account_data_request.rooms,
|
||||
actual_lists=actual_lists,
|
||||
actual_room_ids=actual_room_ids,
|
||||
)
|
||||
if len(relevant_room_ids) > 0:
|
||||
if from_token is not None:
|
||||
# TODO: This should take into account the `from_token` and `to_token`
|
||||
account_data_by_room_map = (
|
||||
await self.store.get_updated_room_account_data_for_user(
|
||||
user_id, from_token.stream_token.account_data_key
|
||||
)
|
||||
)
|
||||
else:
|
||||
# TODO: This should take into account the `to_token`
|
||||
account_data_by_room_map = (
|
||||
await self.store.get_room_account_data_for_user(user_id)
|
||||
)
|
||||
|
||||
# Filter down to the relevant rooms
|
||||
account_data_by_room_map = {
|
||||
room_id: account_data_map
|
||||
for room_id, account_data_map in account_data_by_room_map.items()
|
||||
if room_id in relevant_room_ids
|
||||
}
|
||||
|
||||
return SlidingSyncResult.Extensions.AccountDataExtension(
|
||||
global_account_data_map=global_account_data_map,
|
||||
account_data_by_room_map=account_data_by_room_map,
|
||||
)
|
||||
|
||||
@trace
|
||||
async def get_receipts_extension_response(
|
||||
self,
|
||||
sync_config: SlidingSyncConfig,
|
||||
previous_connection_state: "PerConnectionState",
|
||||
new_connection_state: "MutablePerConnectionState",
|
||||
actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
|
||||
actual_room_ids: Set[str],
|
||||
actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
|
||||
receipts_request: SlidingSyncConfig.Extensions.ReceiptsExtension,
|
||||
to_token: StreamToken,
|
||||
from_token: Optional[SlidingSyncStreamToken],
|
||||
) -> Optional[SlidingSyncResult.Extensions.ReceiptsExtension]:
|
||||
"""Handle Receipts extension (MSC3960)
|
||||
|
||||
Args:
|
||||
sync_config: Sync configuration
|
||||
previous_connection_state: The current per-connection state
|
||||
new_connection_state: A mutable copy of the per-connection
|
||||
state, used to record updates to the state.
|
||||
actual_lists: Sliding window API. A map of list key to list results in the
|
||||
Sliding Sync response.
|
||||
actual_room_ids: The actual room IDs in the the Sliding Sync response.
|
||||
actual_room_response_map: A map of room ID to room results in the the
|
||||
Sliding Sync response.
|
||||
account_data_request: The account_data extension from the request
|
||||
to_token: The point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from.
|
||||
"""
|
||||
# Skip if the extension is not enabled
|
||||
if not receipts_request.enabled:
|
||||
return None
|
||||
|
||||
relevant_room_ids = self.find_relevant_room_ids_for_extension(
|
||||
requested_lists=receipts_request.lists,
|
||||
requested_room_ids=receipts_request.rooms,
|
||||
actual_lists=actual_lists,
|
||||
actual_room_ids=actual_room_ids,
|
||||
)
|
||||
|
||||
room_id_to_receipt_map: Dict[str, JsonMapping] = {}
|
||||
if len(relevant_room_ids) > 0:
|
||||
# We need to handle the different cases depending on if we have sent
|
||||
# down receipts previously or not, so we split the relevant rooms
|
||||
# up into different collections based on status.
|
||||
live_rooms = set()
|
||||
previously_rooms: Dict[str, MultiWriterStreamToken] = {}
|
||||
initial_rooms = set()
|
||||
|
||||
for room_id in relevant_room_ids:
|
||||
if not from_token:
|
||||
initial_rooms.add(room_id)
|
||||
continue
|
||||
|
||||
# If we're sending down the room from scratch again for some
|
||||
# reason, we should always resend the receipts as well
|
||||
# (regardless of if we've sent them down before). This is to
|
||||
# mimic the behaviour of what happens on initial sync, where you
|
||||
# get a chunk of timeline with all of the corresponding receipts
|
||||
# for the events in the timeline.
|
||||
#
|
||||
# We also resend down receipts when we "expand" the timeline,
|
||||
# (see the "XXX: Odd behavior" in
|
||||
# `synapse.handlers.sliding_sync`).
|
||||
room_result = actual_room_response_map.get(room_id)
|
||||
if room_result is not None:
|
||||
if room_result.initial or room_result.unstable_expanded_timeline:
|
||||
initial_rooms.add(room_id)
|
||||
continue
|
||||
|
||||
room_status = previous_connection_state.receipts.have_sent_room(room_id)
|
||||
if room_status.status == HaveSentRoomFlag.LIVE:
|
||||
live_rooms.add(room_id)
|
||||
elif room_status.status == HaveSentRoomFlag.PREVIOUSLY:
|
||||
assert room_status.last_token is not None
|
||||
previously_rooms[room_id] = room_status.last_token
|
||||
elif room_status.status == HaveSentRoomFlag.NEVER:
|
||||
initial_rooms.add(room_id)
|
||||
else:
|
||||
assert_never(room_status.status)
|
||||
|
||||
# The set of receipts that we fetched. Private receipts need to be
|
||||
# filtered out before returning.
|
||||
fetched_receipts = []
|
||||
|
||||
# For live rooms we just fetch all receipts in those rooms since the
|
||||
# `since` token.
|
||||
if live_rooms:
|
||||
assert from_token is not None
|
||||
receipts = await self.store.get_linearized_receipts_for_rooms(
|
||||
room_ids=live_rooms,
|
||||
from_key=from_token.stream_token.receipt_key,
|
||||
to_key=to_token.receipt_key,
|
||||
)
|
||||
fetched_receipts.extend(receipts)
|
||||
|
||||
# For rooms we've previously sent down, but aren't up to date, we
|
||||
# need to use the from token from the room status.
|
||||
if previously_rooms:
|
||||
for room_id, receipt_token in previously_rooms.items():
|
||||
# TODO: Limit the number of receipts we're about to send down
|
||||
# for the room, if its too many we should TODO
|
||||
previously_receipts = (
|
||||
await self.store.get_linearized_receipts_for_room(
|
||||
room_id=room_id,
|
||||
from_key=receipt_token,
|
||||
to_key=to_token.receipt_key,
|
||||
)
|
||||
)
|
||||
fetched_receipts.extend(previously_receipts)
|
||||
|
||||
if initial_rooms:
|
||||
# We also always send down receipts for the current user.
|
||||
user_receipts = (
|
||||
await self.store.get_linearized_receipts_for_user_in_rooms(
|
||||
user_id=sync_config.user.to_string(),
|
||||
room_ids=initial_rooms,
|
||||
to_key=to_token.receipt_key,
|
||||
)
|
||||
)
|
||||
|
||||
# For rooms we haven't previously sent down, we could send all receipts
|
||||
# from that room but we only want to include receipts for events
|
||||
# in the timeline to avoid bloating and blowing up the sync response
|
||||
# as the number of users in the room increases. (this behavior is part of the spec)
|
||||
initial_rooms_and_event_ids = [
|
||||
(room_id, event.event_id)
|
||||
for room_id in initial_rooms
|
||||
if room_id in actual_room_response_map
|
||||
for event in actual_room_response_map[room_id].timeline_events
|
||||
]
|
||||
initial_receipts = await self.store.get_linearized_receipts_for_events(
|
||||
room_and_event_ids=initial_rooms_and_event_ids,
|
||||
)
|
||||
|
||||
# Combine the receipts for a room and add them to
|
||||
# `fetched_receipts`
|
||||
for room_id in initial_receipts.keys() | user_receipts.keys():
|
||||
receipt_content = ReceiptInRoom.merge_to_content(
|
||||
list(
|
||||
itertools.chain(
|
||||
initial_receipts.get(room_id, []),
|
||||
user_receipts.get(room_id, []),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
fetched_receipts.append(
|
||||
{
|
||||
"room_id": room_id,
|
||||
"type": EduTypes.RECEIPT,
|
||||
"content": receipt_content,
|
||||
}
|
||||
)
|
||||
|
||||
fetched_receipts = ReceiptEventSource.filter_out_private_receipts(
|
||||
fetched_receipts, sync_config.user.to_string()
|
||||
)
|
||||
|
||||
for receipt in fetched_receipts:
|
||||
# These fields should exist for every receipt
|
||||
room_id = receipt["room_id"]
|
||||
type = receipt["type"]
|
||||
content = receipt["content"]
|
||||
|
||||
room_id_to_receipt_map[room_id] = {"type": type, "content": content}
|
||||
|
||||
# Now we update the per-connection state to track which receipts we have
|
||||
# and haven't sent down.
|
||||
new_connection_state.receipts.record_sent_rooms(relevant_room_ids)
|
||||
|
||||
if from_token:
|
||||
# Now find the set of rooms that may have receipts that we're not sending
|
||||
# down. We only need to check rooms that we have previously returned
|
||||
# receipts for (in `previous_connection_state`) because we only care about
|
||||
# updating `LIVE` rooms to `PREVIOUSLY`. The `PREVIOUSLY` rooms will just
|
||||
# stay pointing at their previous position so we don't need to waste time
|
||||
# checking those and since we default to `NEVER`, rooms that were `NEVER`
|
||||
# sent before don't need to be recorded as we'll handle them correctly when
|
||||
# they come into range for the first time.
|
||||
rooms_no_receipts = [
|
||||
room_id
|
||||
for room_id, room_status in previous_connection_state.receipts._statuses.items()
|
||||
if room_status.status == HaveSentRoomFlag.LIVE
|
||||
and room_id not in relevant_room_ids
|
||||
]
|
||||
changed_rooms = await self.store.get_rooms_with_receipts_between(
|
||||
rooms_no_receipts,
|
||||
from_key=from_token.stream_token.receipt_key,
|
||||
to_key=to_token.receipt_key,
|
||||
)
|
||||
new_connection_state.receipts.record_unsent_rooms(
|
||||
changed_rooms, from_token.stream_token.receipt_key
|
||||
)
|
||||
|
||||
return SlidingSyncResult.Extensions.ReceiptsExtension(
|
||||
room_id_to_receipt_map=room_id_to_receipt_map,
|
||||
)
|
||||
|
||||
async def get_typing_extension_response(
|
||||
self,
|
||||
sync_config: SlidingSyncConfig,
|
||||
actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList],
|
||||
actual_room_ids: Set[str],
|
||||
actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult],
|
||||
typing_request: SlidingSyncConfig.Extensions.TypingExtension,
|
||||
to_token: StreamToken,
|
||||
from_token: Optional[SlidingSyncStreamToken],
|
||||
) -> Optional[SlidingSyncResult.Extensions.TypingExtension]:
|
||||
"""Handle Typing Notification extension (MSC3961)
|
||||
|
||||
Args:
|
||||
sync_config: Sync configuration
|
||||
actual_lists: Sliding window API. A map of list key to list results in the
|
||||
Sliding Sync response.
|
||||
actual_room_ids: The actual room IDs in the the Sliding Sync response.
|
||||
actual_room_response_map: A map of room ID to room results in the the
|
||||
Sliding Sync response.
|
||||
account_data_request: The account_data extension from the request
|
||||
to_token: The point in the stream to sync up to.
|
||||
from_token: The point in the stream to sync from.
|
||||
"""
|
||||
# Skip if the extension is not enabled
|
||||
if not typing_request.enabled:
|
||||
return None
|
||||
|
||||
relevant_room_ids = self.find_relevant_room_ids_for_extension(
|
||||
requested_lists=typing_request.lists,
|
||||
requested_room_ids=typing_request.rooms,
|
||||
actual_lists=actual_lists,
|
||||
actual_room_ids=actual_room_ids,
|
||||
)
|
||||
|
||||
room_id_to_typing_map: Dict[str, JsonMapping] = {}
|
||||
if len(relevant_room_ids) > 0:
|
||||
# Note: We don't need to take connection tracking into account for typing
|
||||
# notifications because they'll get anything still relevant and hasn't timed
|
||||
# out when the room comes into range. We consider the gap where the room
|
||||
# fell out of range, as long enough for any typing notifications to have
|
||||
# timed out (it's not worth the 30 seconds of data we may have missed).
|
||||
typing_source = self.event_sources.sources.typing
|
||||
typing_notifications, _ = await typing_source.get_new_events(
|
||||
user=sync_config.user,
|
||||
from_key=(from_token.stream_token.typing_key if from_token else 0),
|
||||
to_key=to_token.typing_key,
|
||||
# This is a dummy value and isn't used in the function
|
||||
limit=0,
|
||||
room_ids=relevant_room_ids,
|
||||
is_guest=False,
|
||||
)
|
||||
|
||||
for typing_notification in typing_notifications:
|
||||
# These fields should exist for every typing notification
|
||||
room_id = typing_notification["room_id"]
|
||||
type = typing_notification["type"]
|
||||
content = typing_notification["content"]
|
||||
|
||||
room_id_to_typing_map[room_id] = {"type": type, "content": content}
|
||||
|
||||
return SlidingSyncResult.Extensions.TypingExtension(
|
||||
room_id_to_typing_map=room_id_to_typing_map,
|
||||
)
|
||||
1353
synapse/handlers/sliding_sync/room_lists.py
Normal file
1353
synapse/handlers/sliding_sync/room_lists.py
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user