mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
35 Commits
devon/clea
...
s7evink/va
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ee9768c31f | ||
|
|
fc4e3f3e3c | ||
|
|
29fe51b293 | ||
|
|
a0c6243798 | ||
|
|
c284d8cb24 | ||
|
|
5fff5a1893 | ||
|
|
765817a1ad | ||
|
|
396de6544a | ||
|
|
d1c96ee0f2 | ||
|
|
0eaf28fa92 | ||
|
|
88bc4bb67e | ||
|
|
ca0c87c504 | ||
|
|
0d4a08103c | ||
|
|
b61527b0d8 | ||
|
|
5adb08f3c9 | ||
|
|
2aab171042 | ||
|
|
0aeb95fb07 | ||
|
|
72020f3f2c | ||
|
|
ad8dcc2119 | ||
|
|
84e1d15232 | ||
|
|
2b7a398b14 | ||
|
|
81848e8193 | ||
|
|
be3ecb332a | ||
|
|
14c114b9fd | ||
|
|
2eb6239ad8 | ||
|
|
26583f8623 | ||
|
|
265e5fe384 | ||
|
|
34d6eba694 | ||
|
|
938536186c | ||
|
|
9c2d8fd6dd | ||
|
|
75a45e9ce6 | ||
|
|
f4c17c5a38 | ||
|
|
9d2cd9fe63 | ||
|
|
67d516d2a4 | ||
|
|
3c0c30ad7d |
8
.github/workflows/docker.yml
vendored
8
.github/workflows/docker.yml
vendored
@@ -41,13 +41,13 @@ jobs:
|
||||
echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -102,14 +102,14 @@ jobs:
|
||||
merge-multiple: true
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
if: ${{ startsWith(matrix.repository, 'docker.io') }}
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
if: ${{ startsWith(matrix.repository, 'ghcr.io') }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
|
||||
2
.github/workflows/push_complement_image.yml
vendored
2
.github/workflows/push_complement_image.yml
vendored
@@ -48,7 +48,7 @@ jobs:
|
||||
with:
|
||||
ref: master
|
||||
- name: Login to registry
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
|
||||
2
.github/workflows/release-artifacts.yml
vendored
2
.github/workflows/release-artifacts.yml
vendored
@@ -66,7 +66,7 @@ jobs:
|
||||
install: true
|
||||
|
||||
- name: Set up docker layer caching
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
|
||||
2
.github/workflows/tests.yml
vendored
2
.github/workflows/tests.yml
vendored
@@ -174,7 +174,7 @@ jobs:
|
||||
# Cribbed from
|
||||
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
|
||||
- name: Restore/persist mypy's cache
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: |
|
||||
.mypy_cache
|
||||
|
||||
15
CHANGES.md
15
CHANGES.md
@@ -1,3 +1,18 @@
|
||||
# Synapse 1.139.0 (2025-09-30)
|
||||
|
||||
### `/register` requests from old application service implementations may break when using MAS
|
||||
|
||||
If you are using Matrix Authentication Service (MAS), as of this release any
|
||||
Application Services that do not set `inhibit_login=true` when calling `POST
|
||||
/_matrix/client/v3/register` will receive the error
|
||||
`IO.ELEMENT.MSC4190.M_APPSERVICE_LOGIN_UNSUPPORTED` in response. Please see [the
|
||||
upgrade
|
||||
notes](https://element-hq.github.io/synapse/develop/upgrade.html#register-requests-from-old-application-service-implementations-may-break-when-using-mas)
|
||||
for more information.
|
||||
|
||||
No significant changes since 1.139.0rc3.
|
||||
|
||||
|
||||
# Synapse 1.139.0rc3 (2025-09-25)
|
||||
|
||||
## Bugfixes
|
||||
|
||||
20
Cargo.lock
generated
20
Cargo.lock
generated
@@ -1062,9 +1062,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.11.2"
|
||||
version = "1.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912"
|
||||
checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -1074,9 +1074,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.4.9"
|
||||
version = "0.4.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
|
||||
checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -1250,9 +1250,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.226"
|
||||
version = "1.0.228"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0dca6411025b24b60bfa7ec1fe1f8e710ac09782dca409ee8237ba74b51295fd"
|
||||
checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
"serde_derive",
|
||||
@@ -1260,18 +1260,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_core"
|
||||
version = "1.0.226"
|
||||
version = "1.0.228"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4"
|
||||
checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.226"
|
||||
version = "1.0.228"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33"
|
||||
checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
||||
1
changelog.d/17097.misc
Normal file
1
changelog.d/17097.misc
Normal file
@@ -0,0 +1 @@
|
||||
Extend validation of uploaded device keys.
|
||||
1
changelog.d/18828.feature
Normal file
1
changelog.d/18828.feature
Normal file
@@ -0,0 +1 @@
|
||||
Cleanly shutdown `SynapseHomeServer` object.
|
||||
1
changelog.d/18988.misc
Normal file
1
changelog.d/18988.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove internal `ReplicationUploadKeysForUserRestServlet` as a follow-up to the work in https://github.com/element-hq/synapse/pull/18581 that moved device changes off the main process.
|
||||
1
changelog.d/18992.misc
Normal file
1
changelog.d/18992.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove `MockClock()` in tests.
|
||||
1
changelog.d/18998.doc
Normal file
1
changelog.d/18998.doc
Normal file
@@ -0,0 +1 @@
|
||||
Fix documentation for `rc_room_creation` and `rc_reports` to clarify that a `per_user` rate limit is not supported.
|
||||
38
debian/changelog
vendored
38
debian/changelog
vendored
@@ -1,4 +1,16 @@
|
||||
matrix-synapse-py3 (1.139.0~rc3+nmu1) UNRELEASED; urgency=medium
|
||||
matrix-synapse-py3 (1.139.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.139.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Sep 2025 11:58:55 +0100
|
||||
|
||||
matrix-synapse-py3 (1.139.0~rc3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.139.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Sep 2025 12:13:23 +0100
|
||||
|
||||
matrix-synapse-py3 (1.138.2) stable; urgency=medium
|
||||
|
||||
* The licensing specifier has been updated to add an optional
|
||||
`LicenseRef-Element-Commercial` license. The code was already licensed in
|
||||
@@ -6,11 +18,11 @@ matrix-synapse-py3 (1.139.0~rc3+nmu1) UNRELEASED; urgency=medium
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Sep 2025 12:17:17 +0100
|
||||
|
||||
matrix-synapse-py3 (1.139.0~rc3) stable; urgency=medium
|
||||
matrix-synapse-py3 (1.138.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.139.0rc3.
|
||||
* New Synapse release 1.138.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Sep 2025 12:13:23 +0100
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 24 Sep 2025 11:32:38 +0100
|
||||
|
||||
matrix-synapse-py3 (1.139.0~rc2) stable; urgency=medium
|
||||
|
||||
@@ -24,24 +36,6 @@ matrix-synapse-py3 (1.139.0~rc1) stable; urgency=medium
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 23 Sep 2025 13:24:50 +0100
|
||||
|
||||
matrix-synapse-py3 (1.138.2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.138.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 24 Sep 2025 12:26:16 +0100
|
||||
|
||||
matrix-synapse-py3 (1.138.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.138.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 24 Sep 2025 11:32:38 +0100
|
||||
|
||||
matrix-synapse-py3 (1.138.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.138.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 09 Sep 2025 11:21:25 +0100
|
||||
|
||||
matrix-synapse-py3 (1.138.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.138.0rc1.
|
||||
|
||||
@@ -2006,9 +2006,8 @@ This setting has the following sub-options:
|
||||
Default configuration:
|
||||
```yaml
|
||||
rc_reports:
|
||||
per_user:
|
||||
per_second: 1.0
|
||||
burst_count: 5.0
|
||||
per_second: 1.0
|
||||
burst_count: 5.0
|
||||
```
|
||||
|
||||
Example configuration:
|
||||
@@ -2031,9 +2030,8 @@ This setting has the following sub-options:
|
||||
Default configuration:
|
||||
```yaml
|
||||
rc_room_creation:
|
||||
per_user:
|
||||
per_second: 0.016
|
||||
burst_count: 10.0
|
||||
per_second: 0.016
|
||||
burst_count: 10.0
|
||||
```
|
||||
|
||||
Example configuration:
|
||||
|
||||
370
poetry.lock
generated
370
poetry.lock
generated
@@ -68,63 +68,75 @@ visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "bcrypt"
|
||||
version = "4.3.0"
|
||||
version = "5.0.0"
|
||||
description = "Modern password hashing for your software and your servers"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-macosx_10_12_universal2.whl", hash = "sha256:f01e060f14b6b57bbb72fc5b4a83ac21c443c9a2ee708e04a10e9192f90a6281"},
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5eeac541cefd0bb887a371ef73c62c3cd78535e4887b310626036a7c0a817bb"},
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59e1aa0e2cd871b08ca146ed08445038f42ff75968c7ae50d2fdd7860ade2180"},
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:0042b2e342e9ae3d2ed22727c1262f76cc4f345683b5c1715f0250cf4277294f"},
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74a8d21a09f5e025a9a23e7c0fd2c7fe8e7503e4d356c0a2c1486ba010619f09"},
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:0142b2cb84a009f8452c8c5a33ace5e3dfec4159e7735f5afe9a4d50a8ea722d"},
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_aarch64.whl", hash = "sha256:12fa6ce40cde3f0b899729dbd7d5e8811cb892d31b6f7d0334a1f37748b789fd"},
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_x86_64.whl", hash = "sha256:5bd3cca1f2aa5dbcf39e2aa13dd094ea181f48959e1071265de49cc2b82525af"},
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:335a420cfd63fc5bc27308e929bee231c15c85cc4c496610ffb17923abf7f231"},
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:0e30e5e67aed0187a1764911af023043b4542e70a7461ad20e837e94d23e1d6c"},
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b8d62290ebefd49ee0b3ce7500f5dbdcf13b81402c05f6dafab9a1e1b27212f"},
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2ef6630e0ec01376f59a006dc72918b1bf436c3b571b80fa1968d775fa02fe7d"},
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-win32.whl", hash = "sha256:7a4be4cbf241afee43f1c3969b9103a41b40bcb3a3f467ab19f891d9bc4642e4"},
|
||||
{file = "bcrypt-4.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c1949bf259a388863ced887c7861da1df681cb2388645766c89fdfd9004c669"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-macosx_10_12_universal2.whl", hash = "sha256:f81b0ed2639568bf14749112298f9e4e2b28853dab50a8b357e31798686a036d"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:864f8f19adbe13b7de11ba15d85d4a428c7e2f344bac110f667676a0ff84924b"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e36506d001e93bffe59754397572f21bb5dc7c83f54454c990c74a468cd589e"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:842d08d75d9fe9fb94b18b071090220697f9f184d4547179b60734846461ed59"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7c03296b85cb87db865d91da79bf63d5609284fc0cab9472fdd8367bbd830753"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:62f26585e8b219cdc909b6a0069efc5e4267e25d4a3770a364ac58024f62a761"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:beeefe437218a65322fbd0069eb437e7c98137e08f22c4660ac2dc795c31f8bb"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:97eea7408db3a5bcce4a55d13245ab3fa566e23b4c67cd227062bb49e26c585d"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:191354ebfe305e84f344c5964c7cd5f924a3bfc5d405c75ad07f232b6dffb49f"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:41261d64150858eeb5ff43c753c4b216991e0ae16614a308a15d909503617732"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:33752b1ba962ee793fa2b6321404bf20011fe45b9afd2a842139de3011898fef"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:50e6e80a4bfd23a25f5c05b90167c19030cf9f87930f7cb2eacb99f45d1c3304"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-win32.whl", hash = "sha256:67a561c4d9fb9465ec866177e7aebcad08fe23aaf6fbd692a6fab69088abfc51"},
|
||||
{file = "bcrypt-4.3.0-cp38-abi3-win_amd64.whl", hash = "sha256:584027857bc2843772114717a7490a37f68da563b3620f78a849bcb54dc11e62"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d3efb1157edebfd9128e4e46e2ac1a64e0c1fe46fb023158a407c7892b0f8c3"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08bacc884fd302b611226c01014eca277d48f0a05187666bca23aac0dad6fe24"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6746e6fec103fcd509b96bacdfdaa2fbde9a553245dbada284435173a6f1aef"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:afe327968aaf13fc143a56a3360cb27d4ad0345e34da12c7290f1b00b8fe9a8b"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d9af79d322e735b1fc33404b5765108ae0ff232d4b54666d46730f8ac1a43676"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f1e3ffa1365e8702dc48c8b360fef8d7afeca482809c5e45e653af82ccd088c1"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3004df1b323d10021fda07a813fd33e0fd57bef0e9a480bb143877f6cba996fe"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:531457e5c839d8caea9b589a1bcfe3756b0547d7814e9ce3d437f17da75c32b0"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:17a854d9a7a476a89dcef6c8bd119ad23e0f82557afbd2c442777a16408e614f"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6fb1fd3ab08c0cbc6826a2e0447610c6f09e983a281b919ed721ad32236b8b23"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e965a9c1e9a393b8005031ff52583cedc15b7884fce7deb8b0346388837d6cfe"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:79e70b8342a33b52b55d93b3a59223a844962bef479f6a0ea318ebbcadf71505"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-win32.whl", hash = "sha256:b4d4e57f0a63fd0b358eb765063ff661328f69a04494427265950c71b992a39a"},
|
||||
{file = "bcrypt-4.3.0-cp39-abi3-win_amd64.whl", hash = "sha256:e53e074b120f2877a35cc6c736b8eb161377caae8925c17688bd46ba56daaa5b"},
|
||||
{file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c950d682f0952bafcceaf709761da0a32a942272fad381081b51096ffa46cea1"},
|
||||
{file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:107d53b5c67e0bbc3f03ebf5b030e0403d24dda980f8e244795335ba7b4a027d"},
|
||||
{file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:b693dbb82b3c27a1604a3dff5bfc5418a7e6a781bb795288141e5f80cf3a3492"},
|
||||
{file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:b6354d3760fcd31994a14c89659dee887f1351a06e5dac3c1142307172a79f90"},
|
||||
{file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a839320bf27d474e52ef8cb16449bb2ce0ba03ca9f44daba6d93fa1d8828e48a"},
|
||||
{file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:bdc6a24e754a555d7316fa4774e64c6c3997d27ed2d1964d55920c7c227bc4ce"},
|
||||
{file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:55a935b8e9a1d2def0626c4269db3fcd26728cbff1e84f0341465c31c4ee56d8"},
|
||||
{file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:57967b7a28d855313a963aaea51bf6df89f833db4320da458e5b3c5ab6d4c938"},
|
||||
{file = "bcrypt-4.3.0.tar.gz", hash = "sha256:3a3fd2204178b6d2adcf09cb4f6426ffef54762577a7c9b54c159008cb288c18"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-macosx_10_12_universal2.whl", hash = "sha256:f3c08197f3039bec79cee59a606d62b96b16669cff3949f21e74796b6e3cd2be"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:200af71bc25f22006f4069060c88ed36f8aa4ff7f53e67ff04d2ab3f1e79a5b2"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:baade0a5657654c2984468efb7d6c110db87ea63ef5a4b54732e7e337253e44f"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:c58b56cdfb03202b3bcc9fd8daee8e8e9b6d7e3163aa97c631dfcfcc24d36c86"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:4bfd2a34de661f34d0bda43c3e4e79df586e4716ef401fe31ea39d69d581ef23"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:ed2e1365e31fc73f1825fa830f1c8f8917ca1b3ca6185773b349c20fd606cec2"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-manylinux_2_34_aarch64.whl", hash = "sha256:83e787d7a84dbbfba6f250dd7a5efd689e935f03dd83b0f919d39349e1f23f83"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-manylinux_2_34_x86_64.whl", hash = "sha256:137c5156524328a24b9fac1cb5db0ba618bc97d11970b39184c1d87dc4bf1746"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:38cac74101777a6a7d3b3e3cfefa57089b5ada650dce2baf0cbdd9d65db22a9e"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:d8d65b564ec849643d9f7ea05c6d9f0cd7ca23bdd4ac0c2dbef1104ab504543d"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:741449132f64b3524e95cd30e5cd3343006ce146088f074f31ab26b94e6c75ba"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:212139484ab3207b1f0c00633d3be92fef3c5f0af17cad155679d03ff2ee1e41"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-win32.whl", hash = "sha256:9d52ed507c2488eddd6a95bccee4e808d3234fa78dd370e24bac65a21212b861"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f6984a24db30548fd39a44360532898c33528b74aedf81c26cf29c51ee47057e"},
|
||||
{file = "bcrypt-5.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:9fffdb387abe6aa775af36ef16f55e318dcda4194ddbf82007a6f21da29de8f5"},
|
||||
{file = "bcrypt-5.0.0-cp314-cp314t-macosx_10_12_universal2.whl", hash = "sha256:4870a52610537037adb382444fefd3706d96d663ac44cbb2f37e3919dca3d7ef"},
|
||||
{file = "bcrypt-5.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:48f753100931605686f74e27a7b49238122aa761a9aefe9373265b8b7aa43ea4"},
|
||||
{file = "bcrypt-5.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f70aadb7a809305226daedf75d90379c397b094755a710d7014b8b117df1ebbf"},
|
||||
{file = "bcrypt-5.0.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:744d3c6b164caa658adcb72cb8cc9ad9b4b75c7db507ab4bc2480474a51989da"},
|
||||
{file = "bcrypt-5.0.0-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a28bc05039bdf3289d757f49d616ab3efe8cf40d8e8001ccdd621cd4f98f4fc9"},
|
||||
{file = "bcrypt-5.0.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:7f277a4b3390ab4bebe597800a90da0edae882c6196d3038a73adf446c4f969f"},
|
||||
{file = "bcrypt-5.0.0-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:79cfa161eda8d2ddf29acad370356b47f02387153b11d46042e93a0a95127493"},
|
||||
{file = "bcrypt-5.0.0-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a5393eae5722bcef046a990b84dff02b954904c36a194f6cfc817d7dca6c6f0b"},
|
||||
{file = "bcrypt-5.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7f4c94dec1b5ab5d522750cb059bb9409ea8872d4494fd152b53cca99f1ddd8c"},
|
||||
{file = "bcrypt-5.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0cae4cb350934dfd74c020525eeae0a5f79257e8a201c0c176f4b84fdbf2a4b4"},
|
||||
{file = "bcrypt-5.0.0-cp314-cp314t-win32.whl", hash = "sha256:b17366316c654e1ad0306a6858e189fc835eca39f7eb2cafd6aaca8ce0c40a2e"},
|
||||
{file = "bcrypt-5.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:92864f54fb48b4c718fc92a32825d0e42265a627f956bc0361fe869f1adc3e7d"},
|
||||
{file = "bcrypt-5.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:dd19cf5184a90c873009244586396a6a884d591a5323f0e8a5922560718d4993"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-macosx_10_12_universal2.whl", hash = "sha256:fc746432b951e92b58317af8e0ca746efe93e66555f1b40888865ef5bf56446b"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c2388ca94ffee269b6038d48747f4ce8df0ffbea43f31abfa18ac72f0218effb"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:560ddb6ec730386e7b3b26b8b4c88197aaed924430e7b74666a586ac997249ef"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d79e5c65dcc9af213594d6f7f1fa2c98ad3fc10431e7aa53c176b441943efbdd"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2b732e7d388fa22d48920baa267ba5d97cca38070b69c0e2d37087b381c681fd"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0c8e093ea2532601a6f686edbc2c6b2ec24131ff5c52f7610dd64fa4553b5464"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:5b1589f4839a0899c146e8892efe320c0fa096568abd9b95593efac50a87cb75"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:89042e61b5e808b67daf24a434d89bab164d4de1746b37a8d173b6b14f3db9ff"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:e3cf5b2560c7b5a142286f69bde914494b6d8f901aaa71e453078388a50881c4"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f632fd56fc4e61564f78b46a2269153122db34988e78b6be8b32d28507b7eaeb"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:801cad5ccb6b87d1b430f183269b94c24f248dddbbc5c1f78b6ed231743e001c"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3cf67a804fc66fc217e6914a5635000259fbbbb12e78a99488e4d5ba445a71eb"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-win32.whl", hash = "sha256:3abeb543874b2c0524ff40c57a4e14e5d3a66ff33fb423529c88f180fd756538"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-win_amd64.whl", hash = "sha256:35a77ec55b541e5e583eb3436ffbbf53b0ffa1fa16ca6782279daf95d146dcd9"},
|
||||
{file = "bcrypt-5.0.0-cp38-abi3-win_arm64.whl", hash = "sha256:cde08734f12c6a4e28dc6755cd11d3bdfea608d93d958fffbe95a7026ebe4980"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0c418ca99fd47e9c59a301744d63328f17798b5947b0f791e9af3c1c499c2d0a"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddb4e1500f6efdd402218ffe34d040a1196c072e07929b9820f363a1fd1f4191"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7aeef54b60ceddb6f30ee3db090351ecf0d40ec6e2abf41430997407a46d2254"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f0ce778135f60799d89c9693b9b398819d15f1921ba15fe719acb3178215a7db"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a71f70ee269671460b37a449f5ff26982a6f2ba493b3eabdd687b4bf35f875ac"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f8429e1c410b4073944f03bd778a9e066e7fad723564a52ff91841d278dfc822"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:edfcdcedd0d0f05850c52ba3127b1fce70b9f89e0fe5ff16517df7e81fa3cbb8"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:611f0a17aa4a25a69362dcc299fda5c8a3d4f160e2abb3831041feb77393a14a"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:db99dca3b1fdc3db87d7c57eac0c82281242d1eabf19dcb8a6b10eb29a2e72d1"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:5feebf85a9cefda32966d8171f5db7e3ba964b77fdfe31919622256f80f9cf42"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3ca8a166b1140436e058298a34d88032ab62f15aae1c598580333dc21d27ef10"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:61afc381250c3182d9078551e3ac3a41da14154fbff647ddf52a769f588c4172"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-win32.whl", hash = "sha256:64d7ce196203e468c457c37ec22390f1a61c85c6f0b8160fd752940ccfb3a683"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:64ee8434b0da054d830fa8e89e1c8bf30061d539044a39524ff7dec90481e5c2"},
|
||||
{file = "bcrypt-5.0.0-cp39-abi3-win_arm64.whl", hash = "sha256:f2347d3534e76bf50bca5500989d6c1d05ed64b440408057a37673282c654927"},
|
||||
{file = "bcrypt-5.0.0-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7edda91d5ab52b15636d9c30da87d2cc84f426c72b9dba7a9b4fe142ba11f534"},
|
||||
{file = "bcrypt-5.0.0-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:046ad6db88edb3c5ece4369af997938fb1c19d6a699b9c1b27b0db432faae4c4"},
|
||||
{file = "bcrypt-5.0.0-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:dcd58e2b3a908b5ecc9b9df2f0085592506ac2d5110786018ee5e160f28e0911"},
|
||||
{file = "bcrypt-5.0.0-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:6b8f520b61e8781efee73cba14e3e8c9556ccfb375623f4f97429544734545b4"},
|
||||
{file = "bcrypt-5.0.0.tar.gz", hash = "sha256:f748f7c2d6fd375cc93d3fba7ef4a9e3a092421b8dbf34d8d4dc06be9492dfdd"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -997,107 +1009,153 @@ pyasn1 = ">=0.4.6"
|
||||
|
||||
[[package]]
|
||||
name = "lxml"
|
||||
version = "6.0.0"
|
||||
version = "6.0.2"
|
||||
description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"all\" or extra == \"url-preview\""
|
||||
files = [
|
||||
{file = "lxml-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:35bc626eec405f745199200ccb5c6b36f202675d204aa29bb52e27ba2b71dea8"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:246b40f8a4aec341cbbf52617cad8ab7c888d944bfe12a6abd2b1f6cfb6f6082"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:2793a627e95d119e9f1e19720730472f5543a6d84c50ea33313ce328d870f2dd"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:46b9ed911f36bfeb6338e0b482e7fe7c27d362c52fde29f221fddbc9ee2227e7"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2b4790b558bee331a933e08883c423f65bbcd07e278f91b2272489e31ab1e2b4"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2030956cf4886b10be9a0285c6802e078ec2391e1dd7ff3eb509c2c95a69b76"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d23854ecf381ab1facc8f353dcd9adeddef3652268ee75297c1164c987c11dc"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:43fe5af2d590bf4691531b1d9a2495d7aab2090547eaacd224a3afec95706d76"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:74e748012f8c19b47f7d6321ac929a9a94ee92ef12bc4298c47e8b7219b26541"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:43cfbb7db02b30ad3926e8fceaef260ba2fb7df787e38fa2df890c1ca7966c3b"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:34190a1ec4f1e84af256495436b2d196529c3f2094f0af80202947567fdbf2e7"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-win32.whl", hash = "sha256:5967fe415b1920a3877a4195e9a2b779249630ee49ece22021c690320ff07452"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:f3389924581d9a770c6caa4df4e74b606180869043b9073e2cec324bad6e306e"},
|
||||
{file = "lxml-6.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:522fe7abb41309e9543b0d9b8b434f2b630c5fdaf6482bee642b34c8c70079c8"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4ee56288d0df919e4aac43b539dd0e34bb55d6a12a6562038e8d6f3ed07f9e36"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b8dd6dd0e9c1992613ccda2bcb74fc9d49159dbe0f0ca4753f37527749885c25"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:d7ae472f74afcc47320238b5dbfd363aba111a525943c8a34a1b657c6be934c3"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5592401cdf3dc682194727c1ddaa8aa0f3ddc57ca64fd03226a430b955eab6f6"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:58ffd35bd5425c3c3b9692d078bf7ab851441434531a7e517c4984d5634cd65b"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f720a14aa102a38907c6d5030e3d66b3b680c3e6f6bc95473931ea3c00c59967"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c2a5e8d207311a0170aca0eb6b160af91adc29ec121832e4ac151a57743a1e1e"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:2dd1cc3ea7e60bfb31ff32cafe07e24839df573a5e7c2d33304082a5019bcd58"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2cfcf84f1defed7e5798ef4f88aa25fcc52d279be731ce904789aa7ccfb7e8d2"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:a52a4704811e2623b0324a18d41ad4b9fabf43ce5ff99b14e40a520e2190c851"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c16304bba98f48a28ae10e32a8e75c349dd742c45156f297e16eeb1ba9287a1f"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-win32.whl", hash = "sha256:f8d19565ae3eb956d84da3ef367aa7def14a2735d05bd275cd54c0301f0d0d6c"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:b2d71cdefda9424adff9a3607ba5bbfc60ee972d73c21c7e3c19e71037574816"},
|
||||
{file = "lxml-6.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:8a2e76efbf8772add72d002d67a4c3d0958638696f541734304c7f28217a9cab"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:78718d8454a6e928470d511bf8ac93f469283a45c354995f7d19e77292f26108"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:84ef591495ffd3f9dcabffd6391db7bb70d7230b5c35ef5148354a134f56f2be"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:2930aa001a3776c3e2601cb8e0a15d21b8270528d89cc308be4843ade546b9ab"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:219e0431ea8006e15005767f0351e3f7f9143e793e58519dc97fe9e07fae5563"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bd5913b4972681ffc9718bc2d4c53cde39ef81415e1671ff93e9aa30b46595e7"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:390240baeb9f415a82eefc2e13285016f9c8b5ad71ec80574ae8fa9605093cd7"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-manylinux_2_27_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d6e200909a119626744dd81bae409fc44134389e03fbf1d68ed2a55a2fb10991"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ca50bd612438258a91b5b3788c6621c1f05c8c478e7951899f492be42defc0da"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:c24b8efd9c0f62bad0439283c2c795ef916c5a6b75f03c17799775c7ae3c0c9e"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:afd27d8629ae94c5d863e32ab0e1d5590371d296b87dae0a751fb22bf3685741"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:54c4855eabd9fc29707d30141be99e5cd1102e7d2258d2892314cf4c110726c3"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c907516d49f77f6cd8ead1322198bdfd902003c3c330c77a1c5f3cc32a0e4d16"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:36531f81c8214e293097cd2b7873f178997dae33d3667caaae8bdfb9666b76c0"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-win32.whl", hash = "sha256:690b20e3388a7ec98e899fd54c924e50ba6693874aa65ef9cb53de7f7de9d64a"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:310b719b695b3dd442cdfbbe64936b2f2e231bb91d998e99e6f0daf991a3eba3"},
|
||||
{file = "lxml-6.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:8cb26f51c82d77483cdcd2b4a53cda55bbee29b3c2f3ddeb47182a2a9064e4eb"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6da7cd4f405fd7db56e51e96bff0865b9853ae70df0e6720624049da76bde2da"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b34339898bb556a2351a1830f88f751679f343eabf9cf05841c95b165152c9e7"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:51a5e4c61a4541bd1cd3ba74766d0c9b6c12d6a1a4964ef60026832aac8e79b3"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d18a25b19ca7307045581b18b3ec9ead2b1db5ccd8719c291f0cd0a5cec6cb81"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d4f0c66df4386b75d2ab1e20a489f30dc7fd9a06a896d64980541506086be1f1"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9f4b481b6cc3a897adb4279216695150bbe7a44c03daba3c894f49d2037e0a24"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-manylinux_2_27_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8a78d6c9168f5bcb20971bf3329c2b83078611fbe1f807baadc64afc70523b3a"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae06fbab4f1bb7db4f7c8ca9897dc8db4447d1a2b9bee78474ad403437bcc29"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:1fa377b827ca2023244a06554c6e7dc6828a10aaf74ca41965c5d8a4925aebb4"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1676b56d48048a62ef77a250428d1f31f610763636e0784ba67a9740823988ca"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:0e32698462aacc5c1cf6bdfebc9c781821b7e74c79f13e5ffc8bfe27c42b1abf"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4d6036c3a296707357efb375cfc24bb64cd955b9ec731abf11ebb1e40063949f"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7488a43033c958637b1a08cddc9188eb06d3ad36582cebc7d4815980b47e27ef"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-win32.whl", hash = "sha256:5fcd7d3b1d8ecb91445bd71b9c88bdbeae528fefee4f379895becfc72298d181"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:2f34687222b78fff795feeb799a7d44eca2477c3d9d3a46ce17d51a4f383e32e"},
|
||||
{file = "lxml-6.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:21db1ec5525780fd07251636eb5f7acb84003e9382c72c18c542a87c416ade03"},
|
||||
{file = "lxml-6.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4eb114a0754fd00075c12648d991ec7a4357f9cb873042cc9a77bf3a7e30c9db"},
|
||||
{file = "lxml-6.0.0-cp38-cp38-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:7da298e1659e45d151b4028ad5c7974917e108afb48731f4ed785d02b6818994"},
|
||||
{file = "lxml-6.0.0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7bf61bc4345c1895221357af8f3e89f8c103d93156ef326532d35c707e2fb19d"},
|
||||
{file = "lxml-6.0.0-cp38-cp38-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63b634facdfbad421d4b61c90735688465d4ab3a8853ac22c76ccac2baf98d97"},
|
||||
{file = "lxml-6.0.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:e380e85b93f148ad28ac15f8117e2fd8e5437aa7732d65e260134f83ce67911b"},
|
||||
{file = "lxml-6.0.0-cp38-cp38-win32.whl", hash = "sha256:185efc2fed89cdd97552585c624d3c908f0464090f4b91f7d92f8ed2f3b18f54"},
|
||||
{file = "lxml-6.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:f97487996a39cb18278ca33f7be98198f278d0bc3c5d0fd4d7b3d63646ca3c8a"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:85b14a4689d5cff426c12eefe750738648706ea2753b20c2f973b2a000d3d261"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f64ccf593916e93b8d36ed55401bb7fe9c7d5de3180ce2e10b08f82a8f397316"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:b372d10d17a701b0945f67be58fae4664fd056b85e0ff0fbc1e6c951cdbc0512"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a674c0948789e9136d69065cc28009c1b1874c6ea340253db58be7622ce6398f"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:edf6e4c8fe14dfe316939711e3ece3f9a20760aabf686051b537a7562f4da91a"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:048a930eb4572829604982e39a0c7289ab5dc8abc7fc9f5aabd6fbc08c154e93"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c0b5fa5eda84057a4f1bbb4bb77a8c28ff20ae7ce211588d698ae453e13c6281"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-manylinux_2_31_armv7l.whl", hash = "sha256:c352fc8f36f7e9727db17adbf93f82499457b3d7e5511368569b4c5bd155a922"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8db5dc617cb937ae17ff3403c3a70a7de9df4852a046f93e71edaec678f721d0"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:2181e4b1d07dde53986023482673c0f1fba5178ef800f9ab95ad791e8bdded6a"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b3c98d5b24c6095e89e03d65d5c574705be3d49c0d8ca10c17a8a4b5201b72f5"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-win32.whl", hash = "sha256:04d67ceee6db4bcb92987ccb16e53bef6b42ced872509f333c04fb58a3315256"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:e0b1520ef900e9ef62e392dd3d7ae4f5fa224d1dd62897a792cf353eb20b6cae"},
|
||||
{file = "lxml-6.0.0-cp39-cp39-win_arm64.whl", hash = "sha256:e35e8aaaf3981489f42884b59726693de32dabfc438ac10ef4eb3409961fd402"},
|
||||
{file = "lxml-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:dbdd7679a6f4f08152818043dbb39491d1af3332128b3752c3ec5cebc0011a72"},
|
||||
{file = "lxml-6.0.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:40442e2a4456e9910875ac12951476d36c0870dcb38a68719f8c4686609897c4"},
|
||||
{file = "lxml-6.0.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:db0efd6bae1c4730b9c863fc4f5f3c0fa3e8f05cae2c44ae141cb9dfc7d091dc"},
|
||||
{file = "lxml-6.0.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9ab542c91f5a47aaa58abdd8ea84b498e8e49fe4b883d67800017757a3eb78e8"},
|
||||
{file = "lxml-6.0.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:013090383863b72c62a702d07678b658fa2567aa58d373d963cca245b017e065"},
|
||||
{file = "lxml-6.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c86df1c9af35d903d2b52d22ea3e66db8058d21dc0f59842ca5deb0595921141"},
|
||||
{file = "lxml-6.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4337e4aec93b7c011f7ee2e357b0d30562edd1955620fdd4aeab6aacd90d43c5"},
|
||||
{file = "lxml-6.0.0-pp39-pypy39_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ae74f7c762270196d2dda56f8dd7309411f08a4084ff2dfcc0b095a218df2e06"},
|
||||
{file = "lxml-6.0.0-pp39-pypy39_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:059c4cbf3973a621b62ea3132934ae737da2c132a788e6cfb9b08d63a0ef73f9"},
|
||||
{file = "lxml-6.0.0-pp39-pypy39_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:17f090a9bc0ce8da51a5632092f98a7e7f84bca26f33d161a98b57f7fb0004ca"},
|
||||
{file = "lxml-6.0.0-pp39-pypy39_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9da022c14baeec36edfcc8daf0e281e2f55b950249a455776f0d1adeeada4734"},
|
||||
{file = "lxml-6.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a55da151d0b0c6ab176b4e761670ac0e2667817a1e0dadd04a01d0561a219349"},
|
||||
{file = "lxml-6.0.0.tar.gz", hash = "sha256:032e65120339d44cdc3efc326c9f660f5f7205f3a535c1fdbf898b29ea01fb72"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f952dacaa552f3bb8834908dddd500ba7d508e6ea6eb8c52eb2d28f48ca06a31"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:71695772df6acea9f3c0e59e44ba8ac50c4f125217e84aab21074a1a55e7e5c9"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:17f68764f35fd78d7c4cc4ef209a184c38b65440378013d24b8aecd327c3e0c8"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:058027e261afed589eddcfe530fcc6f3402d7fd7e89bfd0532df82ebc1563dba"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8ffaeec5dfea5881d4c9d8913a32d10cfe3923495386106e4a24d45300ef79c"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:f2e3b1a6bb38de0bc713edd4d612969dd250ca8b724be8d460001a387507021c"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d6690ec5ec1cce0385cb20896b16be35247ac8c2046e493d03232f1c2414d321"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2a50c3c1d11cad0ebebbac357a97b26aa79d2bcaf46f256551152aa85d3a4d1"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:3efe1b21c7801ffa29a1112fab3b0f643628c30472d507f39544fd48e9549e34"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:59c45e125140b2c4b33920d21d83681940ca29f0b83f8629ea1a2196dc8cfe6a"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:452b899faa64f1805943ec1c0c9ebeaece01a1af83e130b69cdefeda180bb42c"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-win32.whl", hash = "sha256:1e786a464c191ca43b133906c6903a7e4d56bef376b75d97ccbb8ec5cf1f0a4b"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:dacf3c64ef3f7440e3167aa4b49aa9e0fb99e0aa4f9ff03795640bf94531bcb0"},
|
||||
{file = "lxml-6.0.2-cp310-cp310-win_arm64.whl", hash = "sha256:45f93e6f75123f88d7f0cfd90f2d05f441b808562bf0bc01070a00f53f5028b5"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:13e35cbc684aadf05d8711a5d1b5857c92e5e580efa9a0d2be197199c8def607"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b1675e096e17c6fe9c0e8c81434f5736c0739ff9ac6123c87c2d452f48fc938"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8ac6e5811ae2870953390452e3476694196f98d447573234592d30488147404d"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5aa0fc67ae19d7a64c3fe725dc9a1bb11f80e01f78289d05c6f62545affec438"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de496365750cc472b4e7902a485d3f152ecf57bd3ba03ddd5578ed8ceb4c5964"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:200069a593c5e40b8f6fc0d84d86d970ba43138c3e68619ffa234bc9bb806a4d"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7d2de809c2ee3b888b59f995625385f74629707c9355e0ff856445cdcae682b7"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:b2c3da8d93cf5db60e8858c17684c47d01fee6405e554fb55018dd85fc23b178"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:442de7530296ef5e188373a1ea5789a46ce90c4847e597856570439621d9c553"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2593c77efde7bfea7f6389f1ab249b15ed4aa5bc5cb5131faa3b843c429fbedb"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:3e3cb08855967a20f553ff32d147e14329b3ae70ced6edc2f282b94afbc74b2a"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ed6c667fcbb8c19c6791bbf40b7268ef8ddf5a96940ba9404b9f9a304832f6c"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b8f18914faec94132e5b91e69d76a5c1d7b0c73e2489ea8929c4aaa10b76bbf7"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-win32.whl", hash = "sha256:6605c604e6daa9e0d7f0a2137bdc47a2e93b59c60a65466353e37f8272f47c46"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e5867f2651016a3afd8dd2c8238baa66f1e2802f44bc17e236f547ace6647078"},
|
||||
{file = "lxml-6.0.2-cp311-cp311-win_arm64.whl", hash = "sha256:4197fb2534ee05fd3e7afaab5d8bfd6c2e186f65ea7f9cd6a82809c887bd1285"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a59f5448ba2ceccd06995c95ea59a7674a10de0810f2ce90c9006f3cbc044456"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e8113639f3296706fbac34a30813929e29247718e88173ad849f57ca59754924"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a8bef9b9825fa8bc816a6e641bb67219489229ebc648be422af695f6e7a4fa7f"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:65ea18d710fd14e0186c2f973dc60bb52039a275f82d3c44a0e42b43440ea534"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c371aa98126a0d4c739ca93ceffa0fd7a5d732e3ac66a46e74339acd4d334564"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:700efd30c0fa1a3581d80a748157397559396090a51d306ea59a70020223d16f"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c33e66d44fe60e72397b487ee92e01da0d09ba2d66df8eae42d77b6d06e5eba0"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:90a345bbeaf9d0587a3aaffb7006aa39ccb6ff0e96a57286c0cb2fd1520ea192"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:064fdadaf7a21af3ed1dcaa106b854077fbeada827c18f72aec9346847cd65d0"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fbc74f42c3525ac4ffa4b89cbdd00057b6196bcefe8bce794abd42d33a018092"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6ddff43f702905a4e32bc24f3f2e2edfe0f8fde3277d481bffb709a4cced7a1f"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6da5185951d72e6f5352166e3da7b0dc27aa70bd1090b0eb3f7f7212b53f1bb8"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:57a86e1ebb4020a38d295c04fc79603c7899e0df71588043eb218722dabc087f"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:2047d8234fe735ab77802ce5f2297e410ff40f5238aec569ad7c8e163d7b19a6"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6f91fd2b2ea15a6800c8e24418c0775a1694eefc011392da73bc6cef2623b322"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-win32.whl", hash = "sha256:3ae2ce7d6fedfb3414a2b6c5e20b249c4c607f72cb8d2bb7cc9c6ec7c6f4e849"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:72c87e5ee4e58a8354fb9c7c84cbf95a1c8236c127a5d1b7683f04bed8361e1f"},
|
||||
{file = "lxml-6.0.2-cp312-cp312-win_arm64.whl", hash = "sha256:61cb10eeb95570153e0c0e554f58df92ecf5109f75eacad4a95baa709e26c3d6"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9b33d21594afab46f37ae58dfadd06636f154923c4e8a4d754b0127554eb2e77"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c8963287d7a4c5c9a432ff487c52e9c5618667179c18a204bdedb27310f022f"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1941354d92699fb5ffe6ed7b32f9649e43c2feb4b97205f75866f7d21aa91452"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bb2f6ca0ae2d983ded09357b84af659c954722bbf04dea98030064996d156048"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb2a12d704f180a902d7fa778c6d71f36ceb7b0d317f34cdc76a5d05aa1dd1df"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:6ec0e3f745021bfed19c456647f0298d60a24c9ff86d9d051f52b509663feeb1"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:846ae9a12d54e368933b9759052d6206a9e8b250291109c48e350c1f1f49d916"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ef9266d2aa545d7374938fb5c484531ef5a2ec7f2d573e62f8ce722c735685fd"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:4077b7c79f31755df33b795dc12119cb557a0106bfdab0d2c2d97bd3cf3dffa6"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a7c5d5e5f1081955358533be077166ee97ed2571d6a66bdba6ec2f609a715d1a"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8f8d0cbd0674ee89863a523e6994ac25fd5be9c8486acfc3e5ccea679bad2679"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2cbcbf6d6e924c28f04a43f3b6f6e272312a090f269eff68a2982e13e5d57659"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dfb874cfa53340009af6bdd7e54ebc0d21012a60a4e65d927c2e477112e63484"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:fb8dae0b6b8b7f9e96c26fdd8121522ce5de9bb5538010870bd538683d30e9a2"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:358d9adae670b63e95bc59747c72f4dc97c9ec58881d4627fe0120da0f90d314"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-win32.whl", hash = "sha256:e8cd2415f372e7e5a789d743d133ae474290a90b9023197fd78f32e2dc6873e2"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:b30d46379644fbfc3ab81f8f82ae4de55179414651f110a1514f0b1f8f6cb2d7"},
|
||||
{file = "lxml-6.0.2-cp313-cp313-win_arm64.whl", hash = "sha256:13dcecc9946dca97b11b7c40d29fba63b55ab4170d3c0cf8c0c164343b9bfdcf"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:b0c732aa23de8f8aec23f4b580d1e52905ef468afb4abeafd3fec77042abb6fe"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:4468e3b83e10e0317a89a33d28f7aeba1caa4d1a6fd457d115dd4ffe90c5931d"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:abd44571493973bad4598a3be7e1d807ed45aa2adaf7ab92ab7c62609569b17d"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:370cd78d5855cfbffd57c422851f7d3864e6ae72d0da615fca4dad8c45d375a5"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:901e3b4219fa04ef766885fb40fa516a71662a4c61b80c94d25336b4934b71c0"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:a4bf42d2e4cf52c28cc1812d62426b9503cdb0c87a6de81442626aa7d69707ba"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b2c7fdaa4d7c3d886a42534adec7cfac73860b89b4e5298752f60aa5984641a0"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:98a5e1660dc7de2200b00d53fa00bcd3c35a3608c305d45a7bbcaf29fa16e83d"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:dc051506c30b609238d79eda75ee9cab3e520570ec8219844a72a46020901e37"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8799481bbdd212470d17513a54d568f44416db01250f49449647b5ab5b5dccb9"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9261bb77c2dab42f3ecd9103951aeca2c40277701eb7e912c545c1b16e0e4917"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:65ac4a01aba353cfa6d5725b95d7aed6356ddc0a3cd734de00124d285b04b64f"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:b22a07cbb82fea98f8a2fd814f3d1811ff9ed76d0fc6abc84eb21527596e7cc8"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:d759cdd7f3e055d6bc8d9bec3ad905227b2e4c785dc16c372eb5b5e83123f48a"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:945da35a48d193d27c188037a05fec5492937f66fb1958c24fc761fb9d40d43c"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-win32.whl", hash = "sha256:be3aaa60da67e6153eb15715cc2e19091af5dc75faef8b8a585aea372507384b"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-win_amd64.whl", hash = "sha256:fa25afbadead523f7001caf0c2382afd272c315a033a7b06336da2637d92d6ed"},
|
||||
{file = "lxml-6.0.2-cp314-cp314-win_arm64.whl", hash = "sha256:063eccf89df5b24e361b123e257e437f9e9878f425ee9aae3144c77faf6da6d8"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:6162a86d86893d63084faaf4ff937b3daea233e3682fb4474db07395794fa80d"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:414aaa94e974e23a3e92e7ca5b97d10c0cf37b6481f50911032c69eeb3991bba"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:48461bd21625458dd01e14e2c38dd0aea69addc3c4f960c30d9f59d7f93be601"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:25fcc59afc57d527cfc78a58f40ab4c9b8fd096a9a3f964d2781ffb6eb33f4ed"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5179c60288204e6ddde3f774a93350177e08876eaf3ab78aa3a3649d43eb7d37"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:967aab75434de148ec80597b75062d8123cadf2943fb4281f385141e18b21338"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d100fcc8930d697c6561156c6810ab4a508fb264c8b6779e6e61e2ed5e7558f9"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ca59e7e13e5981175b8b3e4ab84d7da57993eeff53c07764dcebda0d0e64ecd"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:957448ac63a42e2e49531b9d6c0fa449a1970dbc32467aaad46f11545be9af1d"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b7fc49c37f1786284b12af63152fe1d0990722497e2d5817acfe7a877522f9a9"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e19e0643cc936a22e837f79d01a550678da8377d7d801a14487c10c34ee49c7e"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:1db01e5cf14345628e0cbe71067204db658e2fb8e51e7f33631f5f4735fefd8d"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:875c6b5ab39ad5291588aed6925fac99d0097af0dd62f33c7b43736043d4a2ec"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:cdcbed9ad19da81c480dfd6dd161886db6096083c9938ead313d94b30aadf272"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:80dadc234ebc532e09be1975ff538d154a7fa61ea5031c03d25178855544728f"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-win32.whl", hash = "sha256:da08e7bb297b04e893d91087df19638dc7a6bb858a954b0cc2b9f5053c922312"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-win_amd64.whl", hash = "sha256:252a22982dca42f6155125ac76d3432e548a7625d56f5a273ee78a5057216eca"},
|
||||
{file = "lxml-6.0.2-cp314-cp314t-win_arm64.whl", hash = "sha256:bb4c1847b303835d89d785a18801a883436cdfd5dc3d62947f9c49e24f0f5a2c"},
|
||||
{file = "lxml-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a656ca105115f6b766bba324f23a67914d9c728dafec57638e2b92a9dcd76c62"},
|
||||
{file = "lxml-6.0.2-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c54d83a2188a10ebdba573f16bd97135d06c9ef60c3dc495315c7a28c80a263f"},
|
||||
{file = "lxml-6.0.2-cp38-cp38-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:1ea99340b3c729beea786f78c38f60f4795622f36e305d9c9be402201efdc3b7"},
|
||||
{file = "lxml-6.0.2-cp38-cp38-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:af85529ae8d2a453feee4c780d9406a5e3b17cee0dd75c18bd31adcd584debc3"},
|
||||
{file = "lxml-6.0.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:fe659f6b5d10fb5a17f00a50eb903eb277a71ee35df4615db573c069bcf967ac"},
|
||||
{file = "lxml-6.0.2-cp38-cp38-win32.whl", hash = "sha256:5921d924aa5468c939d95c9814fa9f9b5935a6ff4e679e26aaf2951f74043512"},
|
||||
{file = "lxml-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:0aa7070978f893954008ab73bb9e3c24a7c56c054e00566a21b553dc18105fca"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2c8458c2cdd29589a8367c09c8f030f1d202be673f0ca224ec18590b3b9fb694"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3fee0851639d06276e6b387f1c190eb9d7f06f7f53514e966b26bae46481ec90"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b2142a376b40b6736dfc214fd2902409e9e3857eff554fed2d3c60f097e62a62"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a6b5b39cc7e2998f968f05309e666103b53e2edd01df8dc51b90d734c0825444"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d4aec24d6b72ee457ec665344a29acb2d35937d5192faebe429ea02633151aad"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:b42f4d86b451c2f9d06ffb4f8bbc776e04df3ba070b9fe2657804b1b40277c48"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6cdaefac66e8b8f30e37a9b4768a391e1f8a16a7526d5bc77a7928408ef68e93"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-manylinux_2_31_armv7l.whl", hash = "sha256:b738f7e648735714bbb82bdfd030203360cfeab7f6e8a34772b3c8c8b820568c"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:daf42de090d59db025af61ce6bdb2521f0f102ea0e6ea310f13c17610a97da4c"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:66328dabea70b5ba7e53d94aa774b733cf66686535f3bc9250a7aab53a91caaf"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:e237b807d68a61fc3b1e845407e27e5eb8ef69bc93fe8505337c1acb4ee300b6"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:ac02dc29fd397608f8eb15ac1610ae2f2f0154b03f631e6d724d9e2ad4ee2c84"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:817ef43a0c0b4a77bd166dc9a09a555394105ff3374777ad41f453526e37f9cb"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-win32.whl", hash = "sha256:bc532422ff26b304cfb62b328826bd995c96154ffd2bac4544f37dbb95ecaa8f"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:995e783eb0374c120f528f807443ad5a83a656a8624c467ea73781fc5f8a8304"},
|
||||
{file = "lxml-6.0.2-cp39-cp39-win_arm64.whl", hash = "sha256:08b9d5e803c2e4725ae9e8559ee880e5328ed61aa0935244e0515d7d9dbec0aa"},
|
||||
{file = "lxml-6.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e748d4cf8fef2526bb2a589a417eba0c8674e29ffcb570ce2ceca44f1e567bf6"},
|
||||
{file = "lxml-6.0.2-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4ddb1049fa0579d0cbd00503ad8c58b9ab34d1254c77bc6a5576d96ec7853dba"},
|
||||
{file = "lxml-6.0.2-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cb233f9c95f83707dae461b12b720c1af9c28c2d19208e1be03387222151daf5"},
|
||||
{file = "lxml-6.0.2-pp310-pypy310_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bc456d04db0515ce3320d714a1eac7a97774ff0849e7718b492d957da4631dd4"},
|
||||
{file = "lxml-6.0.2-pp310-pypy310_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2613e67de13d619fd283d58bda40bff0ee07739f624ffee8b13b631abf33083d"},
|
||||
{file = "lxml-6.0.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:24a8e756c982c001ca8d59e87c80c4d9dcd4d9b44a4cbeb8d9be4482c514d41d"},
|
||||
{file = "lxml-6.0.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1c06035eafa8404b5cf475bb37a9f6088b0aca288d4ccc9d69389750d5543700"},
|
||||
{file = "lxml-6.0.2-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c7d13103045de1bdd6fe5d61802565f1a3537d70cd3abf596aa0af62761921ee"},
|
||||
{file = "lxml-6.0.2-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0a3c150a95fbe5ac91de323aa756219ef9cf7fde5a3f00e2281e30f33fa5fa4f"},
|
||||
{file = "lxml-6.0.2-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:60fa43be34f78bebb27812ed90f1925ec99560b0fa1decdb7d12b84d857d31e9"},
|
||||
{file = "lxml-6.0.2-pp311-pypy311_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:21c73b476d3cfe836be731225ec3421fa2f048d84f6df6a8e70433dff1376d5a"},
|
||||
{file = "lxml-6.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:27220da5be049e936c3aca06f174e8827ca6445a4353a1995584311487fc4e3e"},
|
||||
{file = "lxml-6.0.2.tar.gz", hash = "sha256:cd79f3367bd74b317dda655dc8fcfa304d9eb6e4fb06b7168c5cf27f96e0cd62"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -2562,14 +2620,14 @@ type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.deve
|
||||
|
||||
[[package]]
|
||||
name = "setuptools-rust"
|
||||
version = "1.11.1"
|
||||
version = "1.12.0"
|
||||
description = "Setuptools Rust extension plugin"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "setuptools_rust-1.11.1-py3-none-any.whl", hash = "sha256:5eaaddaed268dc24a527ffa659ce56b22d3cf17b781247b779efd611031fe8ea"},
|
||||
{file = "setuptools_rust-1.11.1.tar.gz", hash = "sha256:7dabc4392252ced314b8050d63276e05fdc5d32398fc7d3cce1f6a6ac35b76c0"},
|
||||
{file = "setuptools_rust-1.12.0-py3-none-any.whl", hash = "sha256:7e7db90547f224a835b45f5ad90c983340828a345554a9a660bdb2de8605dcdd"},
|
||||
{file = "setuptools_rust-1.12.0.tar.gz", hash = "sha256:d94a93f0c97751c17014565f07bdc324bee45d396cd1bba83d8e7af92b945f0c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2794,20 +2852,20 @@ docs = ["sphinx", "sphinx-rtd-theme"]
|
||||
|
||||
[[package]]
|
||||
name = "twine"
|
||||
version = "6.1.0"
|
||||
version = "6.2.0"
|
||||
description = "Collection of utilities for publishing packages on PyPI"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "twine-6.1.0-py3-none-any.whl", hash = "sha256:a47f973caf122930bf0fbbf17f80b83bc1602c9ce393c7845f289a3001dc5384"},
|
||||
{file = "twine-6.1.0.tar.gz", hash = "sha256:be324f6272eff91d07ee93f251edf232fc647935dd585ac003539b42404a8dbd"},
|
||||
{file = "twine-6.2.0-py3-none-any.whl", hash = "sha256:418ebf08ccda9a8caaebe414433b0ba5e25eb5e4a927667122fbe8f829f985d8"},
|
||||
{file = "twine-6.2.0.tar.gz", hash = "sha256:e5ed0d2fd70c9959770dce51c8f39c8945c574e18173a7b81802dab51b4b75cf"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
id = "*"
|
||||
importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""}
|
||||
keyring = {version = ">=15.1", markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\""}
|
||||
keyring = {version = ">=21.2.0", markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\""}
|
||||
packaging = ">=24.0"
|
||||
readme-renderer = ">=35.0"
|
||||
requests = ">=2.20"
|
||||
@@ -2817,7 +2875,7 @@ rich = ">=12.0.0"
|
||||
urllib3 = ">=1.26.0"
|
||||
|
||||
[package.extras]
|
||||
keyring = ["keyring (>=15.1)"]
|
||||
keyring = ["keyring (>=21.2.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "twisted"
|
||||
|
||||
@@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.139.0rc3"
|
||||
version = "1.139.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later OR LicenseRef-Element-Commercial"
|
||||
|
||||
@@ -2259,9 +2259,8 @@ properties:
|
||||
Setting this to a high value allows users to report content quickly, possibly in
|
||||
duplicate. This can result in higher database usage.
|
||||
default:
|
||||
per_user:
|
||||
per_second: 1.0
|
||||
burst_count: 5.0
|
||||
per_second: 1.0
|
||||
burst_count: 5.0
|
||||
examples:
|
||||
- per_second: 2.0
|
||||
burst_count: 20.0
|
||||
@@ -2270,9 +2269,8 @@ properties:
|
||||
description: >-
|
||||
Sets rate limits for how often users are able to create rooms.
|
||||
default:
|
||||
per_user:
|
||||
per_second: 0.016
|
||||
burst_count: 10.0
|
||||
per_second: 0.016
|
||||
burst_count: 10.0
|
||||
examples:
|
||||
- per_second: 1.0
|
||||
burst_count: 5.0
|
||||
|
||||
@@ -68,18 +68,42 @@ PROMETHEUS_METRIC_MISSING_FROM_LIST_TO_CHECK = ErrorCode(
|
||||
category="per-homeserver-tenant-metrics",
|
||||
)
|
||||
|
||||
PREFER_SYNAPSE_CLOCK_CALL_LATER = ErrorCode(
|
||||
"call-later-not-tracked",
|
||||
"Prefer using `synapse.util.Clock.call_later` instead of `reactor.callLater`",
|
||||
category="synapse-reactor-clock",
|
||||
)
|
||||
|
||||
PREFER_SYNAPSE_CLOCK_LOOPING_CALL = ErrorCode(
|
||||
"prefer-synapse-clock-looping-call",
|
||||
"Prefer using `synapse.util.Clock.looping_call` instead of `task.LoopingCall`",
|
||||
category="synapse-reactor-clock",
|
||||
)
|
||||
|
||||
PREFER_SYNAPSE_CLOCK_CALL_WHEN_RUNNING = ErrorCode(
|
||||
"prefer-synapse-clock-call-when-running",
|
||||
"`synapse.util.Clock.call_when_running` should be used instead of `reactor.callWhenRunning`",
|
||||
"Prefer using `synapse.util.Clock.call_when_running` instead of `reactor.callWhenRunning`",
|
||||
category="synapse-reactor-clock",
|
||||
)
|
||||
|
||||
PREFER_SYNAPSE_CLOCK_ADD_SYSTEM_EVENT_TRIGGER = ErrorCode(
|
||||
"prefer-synapse-clock-add-system-event-trigger",
|
||||
"`synapse.util.Clock.add_system_event_trigger` should be used instead of `reactor.addSystemEventTrigger`",
|
||||
"Prefer using `synapse.util.Clock.add_system_event_trigger` instead of `reactor.addSystemEventTrigger`",
|
||||
category="synapse-reactor-clock",
|
||||
)
|
||||
|
||||
MULTIPLE_INTERNAL_CLOCKS_CREATED = ErrorCode(
|
||||
"multiple-internal-clocks",
|
||||
"Only one instance of `clock.Clock` should be created",
|
||||
category="synapse-reactor-clock",
|
||||
)
|
||||
|
||||
UNTRACKED_BACKGROUND_PROCESS = ErrorCode(
|
||||
"untracked-background-process",
|
||||
"Prefer using `HomeServer.run_as_background_process` method over the bare `run_as_background_process`",
|
||||
category="synapse-tracked-calls",
|
||||
)
|
||||
|
||||
|
||||
class Sentinel(enum.Enum):
|
||||
# defining a sentinel in this way allows mypy to correctly handle the
|
||||
@@ -222,6 +246,18 @@ class SynapsePlugin(Plugin):
|
||||
# callback, let's just pass it in while we have it.
|
||||
return lambda ctx: check_prometheus_metric_instantiation(ctx, fullname)
|
||||
|
||||
if fullname == "twisted.internet.task.LoopingCall":
|
||||
return check_looping_call
|
||||
|
||||
if fullname == "synapse.util.clock.Clock":
|
||||
return check_clock_creation
|
||||
|
||||
if (
|
||||
fullname
|
||||
== "synapse.metrics.background_process_metrics.run_as_background_process"
|
||||
):
|
||||
return check_background_process
|
||||
|
||||
return None
|
||||
|
||||
def get_method_signature_hook(
|
||||
@@ -241,6 +277,13 @@ class SynapsePlugin(Plugin):
|
||||
):
|
||||
return check_is_cacheable_wrapper
|
||||
|
||||
if fullname in (
|
||||
"twisted.internet.interfaces.IReactorTime.callLater",
|
||||
"synapse.types.ISynapseThreadlessReactor.callLater",
|
||||
"synapse.types.ISynapseReactor.callLater",
|
||||
):
|
||||
return check_call_later
|
||||
|
||||
if fullname in (
|
||||
"twisted.internet.interfaces.IReactorCore.callWhenRunning",
|
||||
"synapse.types.ISynapseThreadlessReactor.callWhenRunning",
|
||||
@@ -258,6 +301,78 @@ class SynapsePlugin(Plugin):
|
||||
return None
|
||||
|
||||
|
||||
def check_clock_creation(ctx: FunctionSigContext) -> CallableType:
|
||||
"""
|
||||
Ensure that the only `clock.Clock` instance is the one used by the `HomeServer`.
|
||||
This is so that the `HomeServer` can cancel any tracked delayed or looping calls
|
||||
during server shutdown.
|
||||
|
||||
Args:
|
||||
ctx: The `FunctionSigContext` from mypy.
|
||||
"""
|
||||
signature: CallableType = ctx.default_signature
|
||||
ctx.api.fail(
|
||||
"Expected the only `clock.Clock` instance to be the one used by the `HomeServer`. "
|
||||
"This is so that the `HomeServer` can cancel any tracked delayed or looping calls "
|
||||
"during server shutdown",
|
||||
ctx.context,
|
||||
code=MULTIPLE_INTERNAL_CLOCKS_CREATED,
|
||||
)
|
||||
|
||||
return signature
|
||||
|
||||
|
||||
def check_call_later(ctx: MethodSigContext) -> CallableType:
|
||||
"""
|
||||
Ensure that the `reactor.callLater` callsites aren't used.
|
||||
|
||||
`synapse.util.Clock.call_later` should always be used instead of `reactor.callLater`.
|
||||
This is because the `synapse.util.Clock` tracks delayed calls in order to cancel any
|
||||
outstanding calls during server shutdown. Delayed calls which are either short lived
|
||||
(<~60s) or frequently called and can be tracked via other means could be candidates for
|
||||
using `synapse.util.Clock.call_later` with `call_later_cancel_on_shutdown` set to
|
||||
`False`. There shouldn't be a need to use `reactor.callLater` outside of tests or the
|
||||
`Clock` class itself. If a need arises, you can use a type ignore comment to disable the
|
||||
check, e.g. `# type: ignore[call-later-not-tracked]`.
|
||||
|
||||
Args:
|
||||
ctx: The `FunctionSigContext` from mypy.
|
||||
"""
|
||||
signature: CallableType = ctx.default_signature
|
||||
ctx.api.fail(
|
||||
"Expected all `reactor.callLater` calls to use `synapse.util.Clock.call_later` "
|
||||
"instead. This is so that long lived calls can be tracked for cancellation during "
|
||||
"server shutdown",
|
||||
ctx.context,
|
||||
code=PREFER_SYNAPSE_CLOCK_CALL_LATER,
|
||||
)
|
||||
|
||||
return signature
|
||||
|
||||
|
||||
def check_looping_call(ctx: FunctionSigContext) -> CallableType:
|
||||
"""
|
||||
Ensure that the `task.LoopingCall` callsites aren't used.
|
||||
|
||||
`synapse.util.Clock.looping_call` should always be used instead of `task.LoopingCall`.
|
||||
`synapse.util.Clock` tracks looping calls in order to cancel any outstanding calls
|
||||
during server shutdown.
|
||||
|
||||
Args:
|
||||
ctx: The `FunctionSigContext` from mypy.
|
||||
"""
|
||||
signature: CallableType = ctx.default_signature
|
||||
ctx.api.fail(
|
||||
"Expected all `task.LoopingCall` instances to use `synapse.util.Clock.looping_call` "
|
||||
"instead. This is so that long lived calls can be tracked for cancellation during "
|
||||
"server shutdown",
|
||||
ctx.context,
|
||||
code=PREFER_SYNAPSE_CLOCK_LOOPING_CALL,
|
||||
)
|
||||
|
||||
return signature
|
||||
|
||||
|
||||
def check_call_when_running(ctx: MethodSigContext) -> CallableType:
|
||||
"""
|
||||
Ensure that the `reactor.callWhenRunning` callsites aren't used.
|
||||
@@ -312,6 +427,27 @@ def check_add_system_event_trigger(ctx: MethodSigContext) -> CallableType:
|
||||
return signature
|
||||
|
||||
|
||||
def check_background_process(ctx: FunctionSigContext) -> CallableType:
|
||||
"""
|
||||
Ensure that calls to `run_as_background_process` use the `HomeServer` method.
|
||||
This is so that the `HomeServer` can cancel any running background processes during
|
||||
server shutdown.
|
||||
|
||||
Args:
|
||||
ctx: The `FunctionSigContext` from mypy.
|
||||
"""
|
||||
signature: CallableType = ctx.default_signature
|
||||
ctx.api.fail(
|
||||
"Prefer using `HomeServer.run_as_background_process` method over the bare "
|
||||
"`run_as_background_process`. This is so that the `HomeServer` can cancel "
|
||||
"any background processes during server shutdown",
|
||||
ctx.context,
|
||||
code=UNTRACKED_BACKGROUND_PROCESS,
|
||||
)
|
||||
|
||||
return signature
|
||||
|
||||
|
||||
def analyze_prometheus_metric_classes(ctx: ClassDefContext) -> None:
|
||||
"""
|
||||
Cross-check the list of Prometheus metric classes against the
|
||||
|
||||
@@ -157,7 +157,12 @@ def get_registered_paths_for_default(
|
||||
# TODO We only do this to avoid an error, but don't need the database etc
|
||||
hs.setup()
|
||||
registered_paths = get_registered_paths_for_hs(hs)
|
||||
hs.cleanup()
|
||||
# NOTE: a more robust implementation would properly shutdown/cleanup each server
|
||||
# to avoid resource buildup.
|
||||
# However, the call to `shutdown` is `async` so it would require additional complexity here.
|
||||
# We are intentionally skipping this cleanup because this is a short-lived, one-off
|
||||
# utility script where the simpler approach is sufficient and we shouldn't run into
|
||||
# any resource buildup issues.
|
||||
|
||||
return registered_paths
|
||||
|
||||
|
||||
@@ -28,7 +28,6 @@ import yaml
|
||||
from twisted.internet import defer, reactor as reactor_
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore
|
||||
from synapse.types import ISynapseReactor
|
||||
@@ -53,7 +52,6 @@ class MockHomeserver(HomeServer):
|
||||
|
||||
|
||||
def run_background_updates(hs: HomeServer) -> None:
|
||||
server_name = hs.hostname
|
||||
main = hs.get_datastores().main
|
||||
state = hs.get_datastores().state
|
||||
|
||||
@@ -67,9 +65,8 @@ def run_background_updates(hs: HomeServer) -> None:
|
||||
def run() -> None:
|
||||
# Apply all background updates on the database.
|
||||
defer.ensureDeferred(
|
||||
run_as_background_process(
|
||||
hs.run_as_background_process(
|
||||
"background_updates",
|
||||
server_name,
|
||||
run_background_updates,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -28,6 +28,7 @@ import sys
|
||||
import traceback
|
||||
import warnings
|
||||
from textwrap import indent
|
||||
from threading import Thread
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
@@ -40,6 +41,7 @@ from typing import (
|
||||
Tuple,
|
||||
cast,
|
||||
)
|
||||
from wsgiref.simple_server import WSGIServer
|
||||
|
||||
from cryptography.utils import CryptographyDeprecationWarning
|
||||
from typing_extensions import ParamSpec
|
||||
@@ -97,22 +99,47 @@ reactor = cast(ISynapseReactor, _reactor)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# list of tuples of function, args list, kwargs dict
|
||||
_sighup_callbacks: List[
|
||||
Tuple[Callable[..., None], Tuple[object, ...], Dict[str, object]]
|
||||
] = []
|
||||
_instance_id_to_sighup_callbacks_map: Dict[
|
||||
str, List[Tuple[Callable[..., None], Tuple[object, ...], Dict[str, object]]]
|
||||
] = {}
|
||||
"""
|
||||
Map from homeserver instance_id to a list of callbacks.
|
||||
|
||||
We use `instance_id` instead of `server_name` because it's possible to have multiple
|
||||
workers running in the same process with the same `server_name`.
|
||||
"""
|
||||
P = ParamSpec("P")
|
||||
|
||||
|
||||
def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs) -> None:
|
||||
def register_sighup(
|
||||
homeserver_instance_id: str,
|
||||
func: Callable[P, None],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> None:
|
||||
"""
|
||||
Register a function to be called when a SIGHUP occurs.
|
||||
|
||||
Args:
|
||||
homeserver_instance_id: The unique ID for this Synapse process instance
|
||||
(`hs.get_instance_id()`) that this hook is associated with.
|
||||
func: Function to be called when sent a SIGHUP signal.
|
||||
*args, **kwargs: args and kwargs to be passed to the target function.
|
||||
"""
|
||||
_sighup_callbacks.append((func, args, kwargs))
|
||||
|
||||
_instance_id_to_sighup_callbacks_map.setdefault(homeserver_instance_id, []).append(
|
||||
(func, args, kwargs)
|
||||
)
|
||||
|
||||
|
||||
def unregister_sighups(instance_id: str) -> None:
|
||||
"""
|
||||
Unregister all sighup functions associated with this Synapse instance.
|
||||
|
||||
Args:
|
||||
instance_id: Unique ID for this Synapse process instance.
|
||||
"""
|
||||
_instance_id_to_sighup_callbacks_map.pop(instance_id, [])
|
||||
|
||||
|
||||
def start_worker_reactor(
|
||||
@@ -281,7 +308,9 @@ def register_start(
|
||||
clock.call_when_running(lambda: defer.ensureDeferred(wrapper()))
|
||||
|
||||
|
||||
def listen_metrics(bind_addresses: StrCollection, port: int) -> None:
|
||||
def listen_metrics(
|
||||
bind_addresses: StrCollection, port: int
|
||||
) -> List[Tuple[WSGIServer, Thread]]:
|
||||
"""
|
||||
Start Prometheus metrics server.
|
||||
|
||||
@@ -294,14 +323,22 @@ def listen_metrics(bind_addresses: StrCollection, port: int) -> None:
|
||||
bytecode at a time), this still works because the metrics thread can preempt the
|
||||
Twisted reactor thread between bytecode boundaries and the metrics thread gets
|
||||
scheduled with roughly equal priority to the Twisted reactor thread.
|
||||
|
||||
Returns:
|
||||
List of WSGIServer with the thread they are running on.
|
||||
"""
|
||||
from prometheus_client import start_http_server as start_http_server_prometheus
|
||||
|
||||
from synapse.metrics import RegistryProxy
|
||||
|
||||
servers: List[Tuple[WSGIServer, Thread]] = []
|
||||
for host in bind_addresses:
|
||||
logger.info("Starting metrics listener on %s:%d", host, port)
|
||||
start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
|
||||
server, thread = start_http_server_prometheus(
|
||||
port, addr=host, registry=RegistryProxy
|
||||
)
|
||||
servers.append((server, thread))
|
||||
return servers
|
||||
|
||||
|
||||
def listen_manhole(
|
||||
@@ -309,7 +346,7 @@ def listen_manhole(
|
||||
port: int,
|
||||
manhole_settings: ManholeConfig,
|
||||
manhole_globals: dict,
|
||||
) -> None:
|
||||
) -> List[Port]:
|
||||
# twisted.conch.manhole 21.1.0 uses "int_from_bytes", which produces a confusing
|
||||
# warning. It's fixed by https://github.com/twisted/twisted/pull/1522), so
|
||||
# suppress the warning for now.
|
||||
@@ -321,7 +358,7 @@ def listen_manhole(
|
||||
|
||||
from synapse.util.manhole import manhole
|
||||
|
||||
listen_tcp(
|
||||
return listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
manhole(settings=manhole_settings, globals=manhole_globals),
|
||||
@@ -498,7 +535,7 @@ def refresh_certificate(hs: "HomeServer") -> None:
|
||||
logger.info("Context factories updated.")
|
||||
|
||||
|
||||
async def start(hs: "HomeServer") -> None:
|
||||
async def start(hs: "HomeServer", freeze: bool = True) -> None:
|
||||
"""
|
||||
Start a Synapse server or worker.
|
||||
|
||||
@@ -509,6 +546,11 @@ async def start(hs: "HomeServer") -> None:
|
||||
|
||||
Args:
|
||||
hs: homeserver instance
|
||||
freeze: whether to freeze the homeserver base objects in the garbage collector.
|
||||
May improve garbage collection performance by marking objects with an effectively
|
||||
static lifetime as frozen so they don't need to be considered for cleanup.
|
||||
If you ever want to `shutdown` the homeserver, this needs to be
|
||||
False otherwise the homeserver cannot be garbage collected after `shutdown`.
|
||||
"""
|
||||
server_name = hs.hostname
|
||||
reactor = hs.get_reactor()
|
||||
@@ -541,12 +583,17 @@ async def start(hs: "HomeServer") -> None:
|
||||
# we're not using systemd.
|
||||
sdnotify(b"RELOADING=1")
|
||||
|
||||
for i, args, kwargs in _sighup_callbacks:
|
||||
i(*args, **kwargs)
|
||||
for sighup_callbacks in _instance_id_to_sighup_callbacks_map.values():
|
||||
for func, args, kwargs in sighup_callbacks:
|
||||
func(*args, **kwargs)
|
||||
|
||||
sdnotify(b"READY=1")
|
||||
|
||||
return run_as_background_process(
|
||||
# It's okay to ignore the linter error here and call
|
||||
# `run_as_background_process` directly because `_handle_sighup` operates
|
||||
# outside of the scope of a specific `HomeServer` instance and holds no
|
||||
# references to it which would prevent a clean shutdown.
|
||||
return run_as_background_process( # type: ignore[untracked-background-process]
|
||||
"sighup",
|
||||
server_name,
|
||||
_handle_sighup,
|
||||
@@ -564,8 +611,8 @@ async def start(hs: "HomeServer") -> None:
|
||||
|
||||
signal.signal(signal.SIGHUP, run_sighup)
|
||||
|
||||
register_sighup(refresh_certificate, hs)
|
||||
register_sighup(reload_cache_config, hs.config)
|
||||
register_sighup(hs.get_instance_id(), refresh_certificate, hs)
|
||||
register_sighup(hs.get_instance_id(), reload_cache_config, hs.config)
|
||||
|
||||
# Apply the cache config.
|
||||
hs.config.caches.resize_all_caches()
|
||||
@@ -603,7 +650,11 @@ async def start(hs: "HomeServer") -> None:
|
||||
logger.info("Shutting down...")
|
||||
|
||||
# Log when we start the shut down process.
|
||||
hs.get_clock().add_system_event_trigger("before", "shutdown", log_shutdown)
|
||||
hs.register_sync_shutdown_handler(
|
||||
phase="before",
|
||||
eventType="shutdown",
|
||||
shutdown_func=log_shutdown,
|
||||
)
|
||||
|
||||
setup_sentry(hs)
|
||||
setup_sdnotify(hs)
|
||||
@@ -632,18 +683,24 @@ async def start(hs: "HomeServer") -> None:
|
||||
# `REQUIRED_ON_BACKGROUND_TASK_STARTUP`
|
||||
start_phone_stats_home(hs)
|
||||
|
||||
# We now freeze all allocated objects in the hopes that (almost)
|
||||
# everything currently allocated are things that will be used for the
|
||||
# rest of time. Doing so means less work each GC (hopefully).
|
||||
#
|
||||
# PyPy does not (yet?) implement gc.freeze()
|
||||
if hasattr(gc, "freeze"):
|
||||
gc.collect()
|
||||
gc.freeze()
|
||||
if freeze:
|
||||
# We now freeze all allocated objects in the hopes that (almost)
|
||||
# everything currently allocated are things that will be used for the
|
||||
# rest of time. Doing so means less work each GC (hopefully).
|
||||
#
|
||||
# Note that freezing the homeserver object means that it won't be able to be
|
||||
# garbage collected in the case of attempting an in-memory `shutdown`. This only
|
||||
# needs to be considered if such a case is desirable. Exiting the entire Python
|
||||
# process will function expectedly either way.
|
||||
#
|
||||
# PyPy does not (yet?) implement gc.freeze()
|
||||
if hasattr(gc, "freeze"):
|
||||
gc.collect()
|
||||
gc.freeze()
|
||||
|
||||
# Speed up shutdowns by freezing all allocated objects. This moves everything
|
||||
# into the permanent generation and excludes them from the final GC.
|
||||
atexit.register(gc.freeze)
|
||||
# Speed up process exit by freezing all allocated objects. This moves everything
|
||||
# into the permanent generation and excludes them from the final GC.
|
||||
atexit.register(gc.freeze)
|
||||
|
||||
|
||||
def reload_cache_config(config: HomeServerConfig) -> None:
|
||||
|
||||
@@ -278,11 +278,13 @@ class GenericWorkerServer(HomeServer):
|
||||
self._listen_http(listener)
|
||||
elif listener.type == "manhole":
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
self._listening_services.extend(
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
@@ -296,9 +298,11 @@ class GenericWorkerServer(HomeServer):
|
||||
)
|
||||
else:
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
self._metrics_listeners.extend(
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from typing import Dict, Iterable, List
|
||||
from typing import Dict, Iterable, List, Optional
|
||||
|
||||
from twisted.internet.tcp import Port
|
||||
from twisted.web.resource import EncodingResourceWrapper, Resource
|
||||
@@ -70,6 +70,7 @@ from synapse.rest.synapse.client import build_synapse_client_resource_tree
|
||||
from synapse.rest.well_known import well_known_resource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util.check_dependencies import VERSION, check_requirements
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.module_loader import load_module
|
||||
@@ -277,11 +278,13 @@ class SynapseHomeServer(HomeServer):
|
||||
)
|
||||
elif listener.type == "manhole":
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
self._listening_services.extend(
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
@@ -294,9 +297,11 @@ class SynapseHomeServer(HomeServer):
|
||||
)
|
||||
else:
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
self._metrics_listeners.extend(
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
@@ -340,12 +345,23 @@ def load_or_generate_config(argv_options: List[str]) -> HomeServerConfig:
|
||||
return config
|
||||
|
||||
|
||||
def setup(config: HomeServerConfig) -> SynapseHomeServer:
|
||||
def setup(
|
||||
config: HomeServerConfig,
|
||||
reactor: Optional[ISynapseReactor] = None,
|
||||
freeze: bool = True,
|
||||
) -> SynapseHomeServer:
|
||||
"""
|
||||
Create and setup a Synapse homeserver instance given a configuration.
|
||||
|
||||
Args:
|
||||
config: The configuration for the homeserver.
|
||||
reactor: Optionally provide a reactor to use. Can be useful in different
|
||||
scenarios that you want control over the reactor, such as tests.
|
||||
freeze: whether to freeze the homeserver base objects in the garbage collector.
|
||||
May improve garbage collection performance by marking objects with an effectively
|
||||
static lifetime as frozen so they don't need to be considered for cleanup.
|
||||
If you ever want to `shutdown` the homeserver, this needs to be
|
||||
False otherwise the homeserver cannot be garbage collected after `shutdown`.
|
||||
|
||||
Returns:
|
||||
A homeserver instance.
|
||||
@@ -384,6 +400,7 @@ def setup(config: HomeServerConfig) -> SynapseHomeServer:
|
||||
config.server.server_name,
|
||||
config=config,
|
||||
version_string=f"Synapse/{VERSION}",
|
||||
reactor=reactor,
|
||||
)
|
||||
|
||||
setup_logging(hs, config, use_worker_options=False)
|
||||
@@ -405,7 +422,7 @@ def setup(config: HomeServerConfig) -> SynapseHomeServer:
|
||||
# Loading the provider metadata also ensures the provider config is valid.
|
||||
await oidc.load_metadata()
|
||||
|
||||
await _base.start(hs)
|
||||
await _base.start(hs, freeze)
|
||||
|
||||
hs.get_datastores().main.db_pool.updates.start_doing_background_updates()
|
||||
|
||||
|
||||
@@ -29,19 +29,18 @@ from prometheus_client import Gauge
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.metrics.background_process_metrics import (
|
||||
run_as_background_process,
|
||||
)
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.constants import ONE_HOUR_SECONDS, ONE_MINUTE_SECONDS
|
||||
from synapse.util.constants import (
|
||||
MILLISECONDS_PER_SECOND,
|
||||
ONE_HOUR_SECONDS,
|
||||
ONE_MINUTE_SECONDS,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger("synapse.app.homeserver")
|
||||
|
||||
MILLISECONDS_PER_SECOND = 1000
|
||||
|
||||
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS
|
||||
"""
|
||||
We wait 5 minutes to send the first set of stats as the server can be quite busy the
|
||||
@@ -85,8 +84,6 @@ def phone_stats_home(
|
||||
stats: JsonDict,
|
||||
stats_process: List[Tuple[int, "resource.struct_rusage"]] = _stats_process,
|
||||
) -> "defer.Deferred[None]":
|
||||
server_name = hs.hostname
|
||||
|
||||
async def _phone_stats_home(
|
||||
hs: "HomeServer",
|
||||
stats: JsonDict,
|
||||
@@ -200,8 +197,8 @@ def phone_stats_home(
|
||||
except Exception as e:
|
||||
logger.warning("Error reporting stats: %s", e)
|
||||
|
||||
return run_as_background_process(
|
||||
"phone_stats_home", server_name, _phone_stats_home, hs, stats, stats_process
|
||||
return hs.run_as_background_process(
|
||||
"phone_stats_home", _phone_stats_home, hs, stats, stats_process
|
||||
)
|
||||
|
||||
|
||||
@@ -263,9 +260,8 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
|
||||
float(hs.config.server.max_mau_value)
|
||||
)
|
||||
|
||||
return run_as_background_process(
|
||||
return hs.run_as_background_process(
|
||||
"generate_monthly_active_users",
|
||||
server_name,
|
||||
_generate_monthly_active_users,
|
||||
)
|
||||
|
||||
@@ -285,10 +281,16 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
|
||||
|
||||
# We need to defer this init for the cases that we daemonize
|
||||
# otherwise the process ID we get is that of the non-daemon process
|
||||
clock.call_later(0, performance_stats_init)
|
||||
clock.call_later(
|
||||
0,
|
||||
performance_stats_init,
|
||||
)
|
||||
|
||||
# We wait 5 minutes to send the first set of stats as the server can
|
||||
# be quite busy the first few minutes
|
||||
clock.call_later(
|
||||
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS, phone_stats_home, hs, stats
|
||||
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS,
|
||||
phone_stats_home,
|
||||
hs,
|
||||
stats,
|
||||
)
|
||||
|
||||
@@ -23,15 +23,33 @@
|
||||
import logging
|
||||
import re
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Pattern, Sequence
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Pattern,
|
||||
Sequence,
|
||||
cast,
|
||||
)
|
||||
|
||||
import attr
|
||||
from netaddr import IPSet
|
||||
|
||||
from twisted.internet import reactor
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import DeviceListUpdates, JsonDict, JsonMapping, UserID
|
||||
from synapse.types import (
|
||||
DeviceListUpdates,
|
||||
ISynapseThreadlessReactor,
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
UserID,
|
||||
)
|
||||
from synapse.util.caches.descriptors import _CacheContext, cached
|
||||
from synapse.util.clock import Clock
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.appservice.api import ApplicationServiceApi
|
||||
@@ -98,6 +116,15 @@ class ApplicationService:
|
||||
self.sender = sender
|
||||
# The application service user should be part of the server's domain.
|
||||
self.server_name = sender.domain # nb must be called this for @cached
|
||||
|
||||
# Ideally we would require passing in the `HomeServer` `Clock` instance.
|
||||
# However this is not currently possible as there are places which use
|
||||
# `@cached` that aren't aware of the `HomeServer` instance.
|
||||
# nb must be called this for @cached
|
||||
self.clock = Clock(
|
||||
cast(ISynapseThreadlessReactor, reactor), server_name=self.server_name
|
||||
) # type: ignore[multiple-internal-clocks]
|
||||
|
||||
self.namespaces = self._check_namespaces(namespaces)
|
||||
self.id = id
|
||||
self.ip_range_whitelist = ip_range_whitelist
|
||||
|
||||
@@ -81,7 +81,6 @@ from synapse.appservice import (
|
||||
from synapse.appservice.api import ApplicationServiceApi
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.types import DeviceListUpdates, JsonMapping
|
||||
from synapse.util.clock import Clock
|
||||
@@ -200,6 +199,7 @@ class _ServiceQueuer:
|
||||
)
|
||||
self.server_name = hs.hostname
|
||||
self.clock = hs.get_clock()
|
||||
self.hs = hs
|
||||
self._store = hs.get_datastores().main
|
||||
|
||||
def start_background_request(self, service: ApplicationService) -> None:
|
||||
@@ -207,9 +207,7 @@ class _ServiceQueuer:
|
||||
if service.id in self.requests_in_flight:
|
||||
return
|
||||
|
||||
run_as_background_process(
|
||||
"as-sender", self.server_name, self._send_request, service
|
||||
)
|
||||
self.hs.run_as_background_process("as-sender", self._send_request, service)
|
||||
|
||||
async def _send_request(self, service: ApplicationService) -> None:
|
||||
# sanity-check: we shouldn't get here if this service already has a sender
|
||||
@@ -361,6 +359,7 @@ class _TransactionController:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname
|
||||
self.clock = hs.get_clock()
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastores().main
|
||||
self.as_api = hs.get_application_service_api()
|
||||
|
||||
@@ -448,6 +447,7 @@ class _TransactionController:
|
||||
recoverer = self.RECOVERER_CLASS(
|
||||
self.server_name,
|
||||
self.clock,
|
||||
self.hs,
|
||||
self.store,
|
||||
self.as_api,
|
||||
service,
|
||||
@@ -494,6 +494,7 @@ class _Recoverer:
|
||||
self,
|
||||
server_name: str,
|
||||
clock: Clock,
|
||||
hs: "HomeServer",
|
||||
store: DataStore,
|
||||
as_api: ApplicationServiceApi,
|
||||
service: ApplicationService,
|
||||
@@ -501,6 +502,7 @@ class _Recoverer:
|
||||
):
|
||||
self.server_name = server_name
|
||||
self.clock = clock
|
||||
self.hs = hs
|
||||
self.store = store
|
||||
self.as_api = as_api
|
||||
self.service = service
|
||||
@@ -513,9 +515,8 @@ class _Recoverer:
|
||||
logger.info("Scheduling retries on %s in %fs", self.service.id, delay)
|
||||
self.scheduled_recovery = self.clock.call_later(
|
||||
delay,
|
||||
run_as_background_process,
|
||||
self.hs.run_as_background_process,
|
||||
"as-recoverer",
|
||||
self.server_name,
|
||||
self.retry,
|
||||
)
|
||||
|
||||
@@ -535,9 +536,8 @@ class _Recoverer:
|
||||
if self.scheduled_recovery:
|
||||
self.clock.cancel_call_later(self.scheduled_recovery)
|
||||
# Run a retry, which will resechedule a recovery if it fails.
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"retry",
|
||||
self.server_name,
|
||||
self.retry,
|
||||
)
|
||||
|
||||
|
||||
@@ -345,7 +345,9 @@ def setup_logging(
|
||||
# Add a SIGHUP handler to reload the logging configuration, if one is available.
|
||||
from synapse.app import _base as appbase
|
||||
|
||||
appbase.register_sighup(_reload_logging_config, log_config_path)
|
||||
appbase.register_sighup(
|
||||
hs.get_instance_id(), _reload_logging_config, log_config_path
|
||||
)
|
||||
|
||||
# Log immediately so we can grep backwards.
|
||||
logger.warning("***** STARTING SERVER *****")
|
||||
|
||||
@@ -172,7 +172,7 @@ class Keyring:
|
||||
_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]
|
||||
] = BatchingQueue(
|
||||
name="keyring_server",
|
||||
server_name=self.server_name,
|
||||
hs=hs,
|
||||
clock=hs.get_clock(),
|
||||
# The method called to fetch each key
|
||||
process_batch_callback=self._inner_fetch_key_requests,
|
||||
@@ -194,6 +194,14 @@ class Keyring:
|
||||
valid_until_ts=2**63, # fake future timestamp
|
||||
)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""
|
||||
Prepares the KeyRing for garbage collection by shutting down it's queues.
|
||||
"""
|
||||
self._fetch_keys_queue.shutdown()
|
||||
for key_fetcher in self._key_fetchers:
|
||||
key_fetcher.shutdown()
|
||||
|
||||
async def verify_json_for_server(
|
||||
self,
|
||||
server_name: str,
|
||||
@@ -479,11 +487,17 @@ class KeyFetcher(metaclass=abc.ABCMeta):
|
||||
self.server_name = hs.hostname
|
||||
self._queue = BatchingQueue(
|
||||
name=self.__class__.__name__,
|
||||
server_name=self.server_name,
|
||||
hs=hs,
|
||||
clock=hs.get_clock(),
|
||||
process_batch_callback=self._fetch_keys,
|
||||
)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""
|
||||
Prepares the KeyFetcher for garbage collection by shutting down it's queue.
|
||||
"""
|
||||
self._queue.shutdown()
|
||||
|
||||
async def get_keys(
|
||||
self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int
|
||||
) -> Dict[str, FetchKeyResult]:
|
||||
|
||||
@@ -148,6 +148,7 @@ class FederationClient(FederationBase):
|
||||
self._get_pdu_cache: ExpiringCache[str, Tuple[EventBase, str]] = ExpiringCache(
|
||||
cache_name="get_pdu_cache",
|
||||
server_name=self.server_name,
|
||||
hs=self.hs,
|
||||
clock=self._clock,
|
||||
max_len=1000,
|
||||
expiry_ms=120 * 1000,
|
||||
@@ -167,6 +168,7 @@ class FederationClient(FederationBase):
|
||||
] = ExpiringCache(
|
||||
cache_name="get_room_hierarchy_cache",
|
||||
server_name=self.server_name,
|
||||
hs=self.hs,
|
||||
clock=self._clock,
|
||||
max_len=1000,
|
||||
expiry_ms=5 * 60 * 1000,
|
||||
|
||||
@@ -144,6 +144,9 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
|
||||
self.clock.looping_call(self._clear_queue, 30 * 1000)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Stops this federation sender instance from sending further transactions."""
|
||||
|
||||
def _next_pos(self) -> int:
|
||||
pos = self.pos
|
||||
self.pos += 1
|
||||
|
||||
@@ -168,7 +168,6 @@ from synapse.metrics import (
|
||||
events_processed_counter,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import (
|
||||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.types import (
|
||||
@@ -232,6 +231,11 @@ WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC = 5
|
||||
|
||||
|
||||
class AbstractFederationSender(metaclass=abc.ABCMeta):
|
||||
@abc.abstractmethod
|
||||
def shutdown(self) -> None:
|
||||
"""Stops this federation sender instance from sending further transactions."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def notify_new_events(self, max_token: RoomStreamToken) -> None:
|
||||
"""This gets called when we have some new events we might want to
|
||||
@@ -326,6 +330,7 @@ class _DestinationWakeupQueue:
|
||||
_MAX_TIME_IN_QUEUE = 30.0
|
||||
|
||||
sender: "FederationSender" = attr.ib()
|
||||
hs: "HomeServer" = attr.ib()
|
||||
server_name: str = attr.ib()
|
||||
"""
|
||||
Our homeserver name (used to label metrics) (`hs.hostname`).
|
||||
@@ -453,18 +458,30 @@ class FederationSender(AbstractFederationSender):
|
||||
1.0 / hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
|
||||
)
|
||||
self._destination_wakeup_queue = _DestinationWakeupQueue(
|
||||
self, self.server_name, self.clock, max_delay_s=rr_txn_interval_per_room_s
|
||||
self,
|
||||
hs,
|
||||
self.server_name,
|
||||
self.clock,
|
||||
max_delay_s=rr_txn_interval_per_room_s,
|
||||
)
|
||||
|
||||
# It is important for `_is_shutdown` to be instantiated before the looping call
|
||||
# for `wake_destinations_needing_catchup`.
|
||||
self._is_shutdown = False
|
||||
|
||||
# Regularly wake up destinations that have outstanding PDUs to be caught up
|
||||
self.clock.looping_call_now(
|
||||
run_as_background_process,
|
||||
self.hs.run_as_background_process,
|
||||
WAKEUP_RETRY_PERIOD_SEC * 1000.0,
|
||||
"wake_destinations_needing_catchup",
|
||||
self.server_name,
|
||||
self._wake_destinations_needing_catchup,
|
||||
)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._is_shutdown = True
|
||||
for queue in self._per_destination_queues.values():
|
||||
queue.shutdown()
|
||||
|
||||
def _get_per_destination_queue(
|
||||
self, destination: str
|
||||
) -> Optional[PerDestinationQueue]:
|
||||
@@ -503,16 +520,15 @@ class FederationSender(AbstractFederationSender):
|
||||
return
|
||||
|
||||
# fire off a processing loop in the background
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"process_event_queue_for_federation",
|
||||
self.server_name,
|
||||
self._process_event_queue_loop,
|
||||
)
|
||||
|
||||
async def _process_event_queue_loop(self) -> None:
|
||||
try:
|
||||
self._is_processing = True
|
||||
while True:
|
||||
while not self._is_shutdown:
|
||||
last_token = await self.store.get_federation_out_pos("events")
|
||||
(
|
||||
next_token,
|
||||
@@ -1123,7 +1139,7 @@ class FederationSender(AbstractFederationSender):
|
||||
|
||||
last_processed: Optional[str] = None
|
||||
|
||||
while True:
|
||||
while not self._is_shutdown:
|
||||
destinations_to_wake = (
|
||||
await self.store.get_catch_up_outstanding_destinations(last_processed)
|
||||
)
|
||||
|
||||
@@ -28,6 +28,8 @@ from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tupl
|
||||
import attr
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EduTypes
|
||||
from synapse.api.errors import (
|
||||
FederationDeniedError,
|
||||
@@ -41,7 +43,6 @@ from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.logging import issue9533_logger
|
||||
from synapse.logging.opentracing import SynapseTags, set_tag
|
||||
from synapse.metrics import SERVER_NAME_LABEL, sent_transactions_counter
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import JsonDict, ReadReceipt
|
||||
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
|
||||
from synapse.visibility import filter_events_for_server
|
||||
@@ -79,6 +80,7 @@ MAX_PRESENCE_STATES_PER_EDU = 50
|
||||
class PerDestinationQueue:
|
||||
"""
|
||||
Manages the per-destination transmission queues.
|
||||
Runs until `shutdown()` is called on the queue.
|
||||
|
||||
Args:
|
||||
hs
|
||||
@@ -94,6 +96,7 @@ class PerDestinationQueue:
|
||||
destination: str,
|
||||
):
|
||||
self.server_name = hs.hostname
|
||||
self._hs = hs
|
||||
self._clock = hs.get_clock()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._store = hs.get_datastores().main
|
||||
@@ -117,6 +120,8 @@ class PerDestinationQueue:
|
||||
|
||||
self._destination = destination
|
||||
self.transmission_loop_running = False
|
||||
self._transmission_loop_enabled = True
|
||||
self.active_transmission_loop: Optional[defer.Deferred] = None
|
||||
|
||||
# Flag to signal to any running transmission loop that there is new data
|
||||
# queued up to be sent.
|
||||
@@ -171,6 +176,20 @@ class PerDestinationQueue:
|
||||
def __str__(self) -> str:
|
||||
return "PerDestinationQueue[%s]" % self._destination
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Instruct the queue to stop processing any further requests"""
|
||||
self._transmission_loop_enabled = False
|
||||
# The transaction manager must be shutdown before cancelling the active
|
||||
# transmission loop. Otherwise the transmission loop can enter a new cycle of
|
||||
# sleeping before retrying since the shutdown flag of the _transaction_manager
|
||||
# hasn't been set yet.
|
||||
self._transaction_manager.shutdown()
|
||||
try:
|
||||
if self.active_transmission_loop is not None:
|
||||
self.active_transmission_loop.cancel()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def pending_pdu_count(self) -> int:
|
||||
return len(self._pending_pdus)
|
||||
|
||||
@@ -309,11 +328,14 @@ class PerDestinationQueue:
|
||||
)
|
||||
return
|
||||
|
||||
if not self._transmission_loop_enabled:
|
||||
logger.warning("Shutdown has been requested. Not sending transaction")
|
||||
return
|
||||
|
||||
logger.debug("TX [%s] Starting transaction loop", self._destination)
|
||||
|
||||
run_as_background_process(
|
||||
self.active_transmission_loop = self._hs.run_as_background_process(
|
||||
"federation_transaction_transmission_loop",
|
||||
self.server_name,
|
||||
self._transaction_transmission_loop,
|
||||
)
|
||||
|
||||
@@ -321,13 +343,13 @@ class PerDestinationQueue:
|
||||
pending_pdus: List[EventBase] = []
|
||||
try:
|
||||
self.transmission_loop_running = True
|
||||
|
||||
# This will throw if we wouldn't retry. We do this here so we fail
|
||||
# quickly, but we will later check this again in the http client,
|
||||
# hence why we throw the result away.
|
||||
await get_retry_limiter(
|
||||
destination=self._destination,
|
||||
our_server_name=self.server_name,
|
||||
hs=self._hs,
|
||||
clock=self._clock,
|
||||
store=self._store,
|
||||
)
|
||||
@@ -339,7 +361,7 @@ class PerDestinationQueue:
|
||||
# not caught up yet
|
||||
return
|
||||
|
||||
while True:
|
||||
while self._transmission_loop_enabled:
|
||||
self._new_data_to_send = False
|
||||
|
||||
async with _TransactionQueueManager(self) as (
|
||||
@@ -352,8 +374,8 @@ class PerDestinationQueue:
|
||||
# If we've gotten told about new things to send during
|
||||
# checking for things to send, we try looking again.
|
||||
# Otherwise new PDUs or EDUs might arrive in the meantime,
|
||||
# but not get sent because we hold the
|
||||
# `transmission_loop_running` flag.
|
||||
# but not get sent because we currently have an
|
||||
# `_active_transmission_loop` running.
|
||||
if self._new_data_to_send:
|
||||
continue
|
||||
else:
|
||||
@@ -442,6 +464,7 @@ class PerDestinationQueue:
|
||||
)
|
||||
finally:
|
||||
# We want to be *very* sure we clear this after we stop processing
|
||||
self.active_transmission_loop = None
|
||||
self.transmission_loop_running = False
|
||||
|
||||
async def _catch_up_transmission_loop(self) -> None:
|
||||
@@ -469,7 +492,7 @@ class PerDestinationQueue:
|
||||
last_successful_stream_ordering: int = _tmp_last_successful_stream_ordering
|
||||
|
||||
# get at most 50 catchup room/PDUs
|
||||
while True:
|
||||
while self._transmission_loop_enabled:
|
||||
event_ids = await self._store.get_catch_up_room_event_ids(
|
||||
self._destination, last_successful_stream_ordering
|
||||
)
|
||||
|
||||
@@ -72,6 +72,12 @@ class TransactionManager:
|
||||
# HACK to get unique tx id
|
||||
self._next_txn_id = int(self.clock.time_msec())
|
||||
|
||||
self._is_shutdown = False
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._is_shutdown = True
|
||||
self._transport_layer.shutdown()
|
||||
|
||||
@measure_func("_send_new_transaction")
|
||||
async def send_new_transaction(
|
||||
self,
|
||||
@@ -86,6 +92,12 @@ class TransactionManager:
|
||||
edus: List of EDUs to send
|
||||
"""
|
||||
|
||||
if self._is_shutdown:
|
||||
logger.warning(
|
||||
"TransactionManager has been shutdown, not sending transaction"
|
||||
)
|
||||
return
|
||||
|
||||
# Make a transaction-sending opentracing span. This span follows on from
|
||||
# all the edus in that transaction. This needs to be done since there is
|
||||
# no active span here, so if the edus were not received by the remote the
|
||||
|
||||
@@ -70,6 +70,9 @@ class TransportLayerClient:
|
||||
self.client = hs.get_federation_http_client()
|
||||
self._is_mine_server_name = hs.is_mine_server_name
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self.client.shutdown()
|
||||
|
||||
async def get_room_state_ids(
|
||||
self, destination: str, room_id: str, event_id: str
|
||||
) -> JsonDict:
|
||||
|
||||
@@ -37,10 +37,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class AccountValidityHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.server_name = (
|
||||
hs.hostname
|
||||
) # nb must be called this for @wrap_as_background_process
|
||||
self.hs = hs # nb must be called this for @wrap_as_background_process
|
||||
self.server_name = hs.hostname
|
||||
self.config = hs.config
|
||||
self.store = hs.get_datastores().main
|
||||
self.send_email_handler = hs.get_send_email_handler()
|
||||
|
||||
@@ -47,7 +47,6 @@ from synapse.metrics import (
|
||||
event_processing_loop_room_count,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import (
|
||||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.storage.databases.main.directory import RoomAliasMapping
|
||||
@@ -76,9 +75,8 @@ events_processed_counter = Counter(
|
||||
|
||||
class ApplicationServicesHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = (
|
||||
hs.hostname
|
||||
) # nb must be called this for @wrap_as_background_process
|
||||
self.server_name = hs.hostname
|
||||
self.hs = hs # nb must be called this for @wrap_as_background_process
|
||||
self.store = hs.get_datastores().main
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.appservice_api = hs.get_application_service_api()
|
||||
@@ -171,8 +169,8 @@ class ApplicationServicesHandler:
|
||||
except Exception:
|
||||
logger.error("Application Services Failure")
|
||||
|
||||
run_as_background_process(
|
||||
"as_scheduler", self.server_name, start_scheduler
|
||||
self.hs.run_as_background_process(
|
||||
"as_scheduler", start_scheduler
|
||||
)
|
||||
self.started_scheduler = True
|
||||
|
||||
|
||||
@@ -24,7 +24,6 @@ from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.http.deactivate_account import (
|
||||
ReplicationNotifyAccountDeactivatedServlet,
|
||||
)
|
||||
@@ -272,8 +271,8 @@ class DeactivateAccountHandler:
|
||||
pending deactivation, if it isn't already running.
|
||||
"""
|
||||
if not self._user_parter_running:
|
||||
run_as_background_process(
|
||||
"user_parter_loop", self.server_name, self._user_parter_loop
|
||||
self.hs.run_as_background_process(
|
||||
"user_parter_loop", self._user_parter_loop
|
||||
)
|
||||
|
||||
async def _user_parter_loop(self) -> None:
|
||||
|
||||
@@ -24,9 +24,6 @@ from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.logging.opentracing import set_tag
|
||||
from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions
|
||||
from synapse.metrics.background_process_metrics import (
|
||||
run_as_background_process,
|
||||
)
|
||||
from synapse.replication.http.delayed_events import (
|
||||
ReplicationAddedDelayedEventRestServlet,
|
||||
)
|
||||
@@ -58,6 +55,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class DelayedEventsHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.server_name = hs.hostname
|
||||
self._store = hs.get_datastores().main
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
@@ -94,7 +92,10 @@ class DelayedEventsHandler:
|
||||
hs.get_notifier().add_replication_callback(self.notify_new_event)
|
||||
# Kick off again (without blocking) to catch any missed notifications
|
||||
# that may have fired before the callback was added.
|
||||
self._clock.call_later(0, self.notify_new_event)
|
||||
self._clock.call_later(
|
||||
0,
|
||||
self.notify_new_event,
|
||||
)
|
||||
|
||||
# Delayed events that are already marked as processed on startup might not have been
|
||||
# sent properly on the last run of the server, so unmark them to send them again.
|
||||
@@ -112,15 +113,14 @@ class DelayedEventsHandler:
|
||||
self._schedule_next_at(next_send_ts)
|
||||
|
||||
# Can send the events in background after having awaited on marking them as processed
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"_send_events",
|
||||
self.server_name,
|
||||
self._send_events,
|
||||
events,
|
||||
)
|
||||
|
||||
self._initialized_from_db = run_as_background_process(
|
||||
"_schedule_db_events", self.server_name, _schedule_db_events
|
||||
self._initialized_from_db = self.hs.run_as_background_process(
|
||||
"_schedule_db_events", _schedule_db_events
|
||||
)
|
||||
else:
|
||||
self._repl_client = ReplicationAddedDelayedEventRestServlet.make_client(hs)
|
||||
@@ -145,9 +145,7 @@ class DelayedEventsHandler:
|
||||
finally:
|
||||
self._event_processing = False
|
||||
|
||||
run_as_background_process(
|
||||
"delayed_events.notify_new_event", self.server_name, process
|
||||
)
|
||||
self.hs.run_as_background_process("delayed_events.notify_new_event", process)
|
||||
|
||||
async def _unsafe_process_new_event(self) -> None:
|
||||
# We purposefully fetch the current max room stream ordering before
|
||||
@@ -542,9 +540,8 @@ class DelayedEventsHandler:
|
||||
if self._next_delayed_event_call is None:
|
||||
self._next_delayed_event_call = self._clock.call_later(
|
||||
delay_sec,
|
||||
run_as_background_process,
|
||||
self.hs.run_as_background_process,
|
||||
"_send_on_timeout",
|
||||
self.server_name,
|
||||
self._send_on_timeout,
|
||||
)
|
||||
else:
|
||||
|
||||
@@ -47,7 +47,6 @@ from synapse.api.errors import (
|
||||
)
|
||||
from synapse.logging.opentracing import log_kv, set_tag, trace
|
||||
from synapse.metrics.background_process_metrics import (
|
||||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.replication.http.devices import (
|
||||
@@ -125,7 +124,7 @@ class DeviceHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname # nb must be called this for @measure_func
|
||||
self.clock = hs.get_clock() # nb must be called this for @measure_func
|
||||
self.hs = hs
|
||||
self.hs = hs # nb must be called this for @wrap_as_background_process
|
||||
self.store = cast("GenericWorkerStore", hs.get_datastores().main)
|
||||
self.notifier = hs.get_notifier()
|
||||
self.state = hs.get_state_handler()
|
||||
@@ -191,10 +190,9 @@ class DeviceHandler:
|
||||
and self._delete_stale_devices_after is not None
|
||||
):
|
||||
self.clock.looping_call(
|
||||
run_as_background_process,
|
||||
self.hs.run_as_background_process,
|
||||
DELETE_STALE_DEVICES_INTERVAL_MS,
|
||||
desc="delete_stale_devices",
|
||||
server_name=self.server_name,
|
||||
func=self._delete_stale_devices,
|
||||
)
|
||||
|
||||
@@ -963,10 +961,9 @@ class DeviceWriterHandler(DeviceHandler):
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.server_name = hs.hostname # nb must be called this for @measure_func
|
||||
self.hs = hs # nb must be called this for @wrap_as_background_process
|
||||
|
||||
self.server_name = (
|
||||
hs.hostname
|
||||
) # nb must be called this for @measure_func and @wrap_as_background_process
|
||||
# We only need to poke the federation sender explicitly if its on the
|
||||
# same instance. Other federation sender instances will get notified by
|
||||
# `synapse.app.generic_worker.FederationSenderHandler` when it sees it
|
||||
@@ -1444,7 +1441,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
||||
def __init__(self, hs: "HomeServer", device_handler: DeviceWriterHandler):
|
||||
super().__init__(hs)
|
||||
|
||||
self.server_name = hs.hostname
|
||||
self.hs = hs
|
||||
self.federation = hs.get_federation_client()
|
||||
self.server_name = hs.hostname # nb must be called this for @measure_func
|
||||
self.clock = hs.get_clock() # nb must be called this for @measure_func
|
||||
@@ -1468,6 +1465,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
||||
self._seen_updates: ExpiringCache[str, Set[str]] = ExpiringCache(
|
||||
cache_name="device_update_edu",
|
||||
server_name=self.server_name,
|
||||
hs=self.hs,
|
||||
clock=self.clock,
|
||||
max_len=10000,
|
||||
expiry_ms=30 * 60 * 1000,
|
||||
@@ -1477,9 +1475,8 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
||||
# Attempt to resync out of sync device lists every 30s.
|
||||
self._resync_retry_lock = Lock()
|
||||
self.clock.looping_call(
|
||||
run_as_background_process,
|
||||
self.hs.run_as_background_process,
|
||||
30 * 1000,
|
||||
server_name=self.server_name,
|
||||
func=self._maybe_retry_device_resync,
|
||||
desc="_maybe_retry_device_resync",
|
||||
)
|
||||
@@ -1599,9 +1596,8 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
||||
if resync:
|
||||
# We mark as stale up front in case we get restarted.
|
||||
await self.store.mark_remote_users_device_caches_as_stale([user_id])
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"_maybe_retry_device_resync",
|
||||
self.server_name,
|
||||
self.multi_user_device_resync,
|
||||
[user_id],
|
||||
False,
|
||||
|
||||
@@ -57,7 +57,6 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
ONE_TIME_KEY_UPLOAD = "one_time_key_upload_lock"
|
||||
|
||||
|
||||
@@ -847,14 +846,22 @@ class E2eKeysHandler:
|
||||
"""
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
device_keys = keys.get("device_keys", None)
|
||||
if device_keys:
|
||||
log_kv(
|
||||
{
|
||||
"message": "Updating device_keys for user.",
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
}
|
||||
)
|
||||
await self.upload_device_keys_for_user(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
keys={"device_keys": device_keys},
|
||||
)
|
||||
else:
|
||||
log_kv({"message": "Did not update device_keys", "reason": "not a dict"})
|
||||
|
||||
one_time_keys = keys.get("one_time_keys", None)
|
||||
if one_time_keys:
|
||||
@@ -872,10 +879,9 @@ class E2eKeysHandler:
|
||||
log_kv(
|
||||
{"message": "Did not update one_time_keys", "reason": "no keys given"}
|
||||
)
|
||||
fallback_keys = keys.get("fallback_keys") or keys.get(
|
||||
"org.matrix.msc2732.fallback_keys"
|
||||
)
|
||||
if fallback_keys and isinstance(fallback_keys, dict):
|
||||
|
||||
fallback_keys = keys.get("fallback_keys")
|
||||
if fallback_keys:
|
||||
log_kv(
|
||||
{
|
||||
"message": "Updating fallback_keys for device.",
|
||||
@@ -884,8 +890,6 @@ class E2eKeysHandler:
|
||||
}
|
||||
)
|
||||
await self.store.set_e2e_fallback_keys(user_id, device_id, fallback_keys)
|
||||
elif fallback_keys:
|
||||
log_kv({"message": "Did not update fallback_keys", "reason": "not a dict"})
|
||||
else:
|
||||
log_kv(
|
||||
{"message": "Did not update fallback_keys", "reason": "no keys given"}
|
||||
|
||||
@@ -72,7 +72,6 @@ from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import nested_logging_context
|
||||
from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.module_api import NOT_SPAM
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.invite_rule import InviteRule
|
||||
@@ -188,9 +187,8 @@ class FederationHandler:
|
||||
# any partial-state-resync operations which were in flight when we
|
||||
# were shut down.
|
||||
if not hs.config.worker.worker_app:
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"resume_sync_partial_state_room",
|
||||
self.server_name,
|
||||
self._resume_partial_state_room_sync,
|
||||
)
|
||||
|
||||
@@ -318,9 +316,8 @@ class FederationHandler:
|
||||
logger.debug(
|
||||
"_maybe_backfill_inner: all backfill points are *after* current depth. Trying again with later backfill points."
|
||||
)
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"_maybe_backfill_inner_anyway_with_max_depth",
|
||||
self.server_name,
|
||||
self.maybe_backfill,
|
||||
room_id=room_id,
|
||||
# We use `MAX_DEPTH` so that we find all backfill points next
|
||||
@@ -802,9 +799,8 @@ class FederationHandler:
|
||||
# lots of requests for missing prev_events which we do actually
|
||||
# have. Hence we fire off the background task, but don't wait for it.
|
||||
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"handle_queued_pdus",
|
||||
self.server_name,
|
||||
self._handle_queued_pdus,
|
||||
room_queue,
|
||||
)
|
||||
@@ -1877,9 +1873,8 @@ class FederationHandler:
|
||||
room_id=room_id,
|
||||
)
|
||||
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
desc="sync_partial_state_room",
|
||||
server_name=self.server_name,
|
||||
func=_sync_partial_state_room_wrapper,
|
||||
)
|
||||
|
||||
|
||||
@@ -81,7 +81,6 @@ from synapse.logging.opentracing import (
|
||||
trace,
|
||||
)
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.http.federation import (
|
||||
ReplicationFederationSendEventsRestServlet,
|
||||
)
|
||||
@@ -153,6 +152,7 @@ class FederationEventHandler:
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname
|
||||
self.hs = hs
|
||||
self._clock = hs.get_clock()
|
||||
self._store = hs.get_datastores().main
|
||||
self._state_store = hs.get_datastores().state
|
||||
@@ -175,6 +175,7 @@ class FederationEventHandler:
|
||||
)
|
||||
self._notifier = hs.get_notifier()
|
||||
|
||||
self._server_name = hs.hostname
|
||||
self._is_mine_id = hs.is_mine_id
|
||||
self._is_mine_server_name = hs.is_mine_server_name
|
||||
self._instance_name = hs.get_instance_name()
|
||||
@@ -974,9 +975,8 @@ class FederationEventHandler:
|
||||
# Process previously failed backfill events in the background to not waste
|
||||
# time on something that is likely to fail again.
|
||||
if len(events_with_failed_pull_attempts) > 0:
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"_process_new_pulled_events_with_failed_pull_attempts",
|
||||
self.server_name,
|
||||
_process_new_pulled_events,
|
||||
events_with_failed_pull_attempts,
|
||||
)
|
||||
@@ -1568,9 +1568,8 @@ class FederationEventHandler:
|
||||
resync = True
|
||||
|
||||
if resync:
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"resync_device_due_to_pdu",
|
||||
self.server_name,
|
||||
self._resync_device,
|
||||
event.sender,
|
||||
)
|
||||
|
||||
@@ -67,7 +67,6 @@ from synapse.handlers.directory import DirectoryHandler
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.logging import opentracing
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.http.send_events import ReplicationSendEventsRestServlet
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.types import (
|
||||
@@ -99,6 +98,7 @@ class MessageHandler:
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.clock = hs.get_clock()
|
||||
self.state = hs.get_state_handler()
|
||||
@@ -113,8 +113,8 @@ class MessageHandler:
|
||||
self._scheduled_expiry: Optional[IDelayedCall] = None
|
||||
|
||||
if not hs.config.worker.worker_app:
|
||||
run_as_background_process(
|
||||
"_schedule_next_expiry", self.server_name, self._schedule_next_expiry
|
||||
self.hs.run_as_background_process(
|
||||
"_schedule_next_expiry", self._schedule_next_expiry
|
||||
)
|
||||
|
||||
async def get_room_data(
|
||||
@@ -444,9 +444,8 @@ class MessageHandler:
|
||||
|
||||
self._scheduled_expiry = self.clock.call_later(
|
||||
delay,
|
||||
run_as_background_process,
|
||||
self.hs.run_as_background_process,
|
||||
"_expire_event",
|
||||
self.server_name,
|
||||
self._expire_event,
|
||||
event_id,
|
||||
)
|
||||
@@ -548,9 +547,8 @@ class EventCreationHandler:
|
||||
and self.config.server.cleanup_extremities_with_dummy_events
|
||||
):
|
||||
self.clock.looping_call(
|
||||
lambda: run_as_background_process(
|
||||
lambda: self.hs.run_as_background_process(
|
||||
"send_dummy_events_to_fill_extremities",
|
||||
self.server_name,
|
||||
self._send_dummy_events_to_fill_extremities,
|
||||
),
|
||||
5 * 60 * 1000,
|
||||
@@ -570,6 +568,7 @@ class EventCreationHandler:
|
||||
self._external_cache_joined_hosts_updates = ExpiringCache(
|
||||
cache_name="_external_cache_joined_hosts_updates",
|
||||
server_name=self.server_name,
|
||||
hs=self.hs,
|
||||
clock=self.clock,
|
||||
expiry_ms=30 * 60 * 1000,
|
||||
)
|
||||
@@ -2113,9 +2112,8 @@ class EventCreationHandler:
|
||||
if event.type == EventTypes.Message:
|
||||
# We don't want to block sending messages on any presence code. This
|
||||
# matters as sometimes presence code can take a while.
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"bump_presence_active_time",
|
||||
self.server_name,
|
||||
self._bump_active_time,
|
||||
requester.user,
|
||||
requester.device_id,
|
||||
|
||||
@@ -29,7 +29,6 @@ from synapse.api.filtering import Filter
|
||||
from synapse.events.utils import SerializeEventConfig
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.rest.admin._base import assert_user_is_admin
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import (
|
||||
@@ -116,10 +115,9 @@ class PaginationHandler:
|
||||
logger.info("Setting up purge job with config: %s", job)
|
||||
|
||||
self.clock.looping_call(
|
||||
run_as_background_process,
|
||||
self.hs.run_as_background_process,
|
||||
job.interval,
|
||||
"purge_history_for_rooms_in_range",
|
||||
self.server_name,
|
||||
self.purge_history_for_rooms_in_range,
|
||||
job.shortest_max_lifetime,
|
||||
job.longest_max_lifetime,
|
||||
@@ -244,9 +242,8 @@ class PaginationHandler:
|
||||
# We want to purge everything, including local events, and to run the purge in
|
||||
# the background so that it's not blocking any other operation apart from
|
||||
# other purges in the same room.
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
PURGE_HISTORY_ACTION_NAME,
|
||||
self.server_name,
|
||||
self.purge_history,
|
||||
room_id,
|
||||
token,
|
||||
@@ -604,9 +601,8 @@ class PaginationHandler:
|
||||
# Otherwise, we can backfill in the background for eventual
|
||||
# consistency's sake but we don't need to block the client waiting
|
||||
# for a costly federation call and processing.
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"maybe_backfill_in_the_background",
|
||||
self.server_name,
|
||||
self.hs.get_federation_handler().maybe_backfill,
|
||||
room_id,
|
||||
curr_topo,
|
||||
|
||||
@@ -107,7 +107,6 @@ from synapse.events.presence_router import PresenceRouter
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.metrics import SERVER_NAME_LABEL, LaterGauge
|
||||
from synapse.metrics.background_process_metrics import (
|
||||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.replication.http.presence import (
|
||||
@@ -537,19 +536,15 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs)
|
||||
self._set_state_client = ReplicationPresenceSetState.make_client(hs)
|
||||
|
||||
self._send_stop_syncing_loop = self.clock.looping_call(
|
||||
self.send_stop_syncing, UPDATE_SYNCING_USERS_MS
|
||||
)
|
||||
|
||||
hs.get_clock().add_system_event_trigger(
|
||||
"before",
|
||||
"shutdown",
|
||||
run_as_background_process,
|
||||
"generic_presence.on_shutdown",
|
||||
self.server_name,
|
||||
self._on_shutdown,
|
||||
self.clock.looping_call(self.send_stop_syncing, UPDATE_SYNCING_USERS_MS)
|
||||
|
||||
hs.register_async_shutdown_handler(
|
||||
phase="before",
|
||||
eventType="shutdown",
|
||||
shutdown_func=self._on_shutdown,
|
||||
)
|
||||
|
||||
@wrap_as_background_process("WorkerPresenceHandler._on_shutdown")
|
||||
async def _on_shutdown(self) -> None:
|
||||
if self._track_presence:
|
||||
self.hs.get_replication_command_handler().send_command(
|
||||
@@ -779,9 +774,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
class PresenceHandler(BasePresenceHandler):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.server_name = (
|
||||
hs.hostname
|
||||
) # nb must be called this for @wrap_as_background_process
|
||||
self.server_name = hs.hostname
|
||||
self.wheel_timer: WheelTimer[str] = WheelTimer()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
@@ -842,13 +835,10 @@ class PresenceHandler(BasePresenceHandler):
|
||||
# have not yet been persisted
|
||||
self.unpersisted_users_changes: Set[str] = set()
|
||||
|
||||
hs.get_clock().add_system_event_trigger(
|
||||
"before",
|
||||
"shutdown",
|
||||
run_as_background_process,
|
||||
"presence.on_shutdown",
|
||||
self.server_name,
|
||||
self._on_shutdown,
|
||||
hs.register_async_shutdown_handler(
|
||||
phase="before",
|
||||
eventType="shutdown",
|
||||
shutdown_func=self._on_shutdown,
|
||||
)
|
||||
|
||||
# Keeps track of the number of *ongoing* syncs on this process. While
|
||||
@@ -881,7 +871,10 @@ class PresenceHandler(BasePresenceHandler):
|
||||
# The initial delay is to allow disconnected clients a chance to
|
||||
# reconnect before we treat them as offline.
|
||||
self.clock.call_later(
|
||||
30, self.clock.looping_call, self._handle_timeouts, 5000
|
||||
30,
|
||||
self.clock.looping_call,
|
||||
self._handle_timeouts,
|
||||
5000,
|
||||
)
|
||||
|
||||
# Presence information is persisted, whether or not it is being tracked
|
||||
@@ -908,6 +901,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
self._event_pos = self.store.get_room_max_stream_ordering()
|
||||
self._event_processing = False
|
||||
|
||||
@wrap_as_background_process("PresenceHandler._on_shutdown")
|
||||
async def _on_shutdown(self) -> None:
|
||||
"""Gets called when shutting down. This lets us persist any updates that
|
||||
we haven't yet persisted, e.g. updates that only changes some internal
|
||||
@@ -1539,8 +1533,8 @@ class PresenceHandler(BasePresenceHandler):
|
||||
finally:
|
||||
self._event_processing = False
|
||||
|
||||
run_as_background_process(
|
||||
"presence.notify_new_event", self.server_name, _process_presence
|
||||
self.hs.run_as_background_process(
|
||||
"presence.notify_new_event", _process_presence
|
||||
)
|
||||
|
||||
async def _unsafe_process(self) -> None:
|
||||
|
||||
@@ -56,8 +56,8 @@ class ProfileHandler:
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname # nb must be called this for @cached
|
||||
self.clock = hs.get_clock() # nb must be called this for @cached
|
||||
self.store = hs.get_datastores().main
|
||||
self.clock = hs.get_clock()
|
||||
self.hs = hs
|
||||
|
||||
self.federation = hs.get_federation_client()
|
||||
|
||||
@@ -23,7 +23,14 @@
|
||||
"""Contains functions for registering clients."""
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, TypedDict
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
TypedDict,
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
|
||||
@@ -50,7 +50,6 @@ from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.logging import opentracing
|
||||
from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.http.push import ReplicationCopyPusherRestServlet
|
||||
from synapse.storage.databases.main.state_deltas import StateDelta
|
||||
from synapse.storage.invite_rule import InviteRule
|
||||
@@ -2190,7 +2189,10 @@ class RoomForgetterHandler(StateDeltasHandler):
|
||||
self._notifier.add_replication_callback(self.notify_new_event)
|
||||
|
||||
# We kick this off to pick up outstanding work from before the last restart.
|
||||
self._clock.call_later(0, self.notify_new_event)
|
||||
self._clock.call_later(
|
||||
0,
|
||||
self.notify_new_event,
|
||||
)
|
||||
|
||||
def notify_new_event(self) -> None:
|
||||
"""Called when there may be more deltas to process"""
|
||||
@@ -2205,9 +2207,7 @@ class RoomForgetterHandler(StateDeltasHandler):
|
||||
finally:
|
||||
self._is_processing = False
|
||||
|
||||
run_as_background_process(
|
||||
"room_forgetter.notify_new_event", self.server_name, process
|
||||
)
|
||||
self._hs.run_as_background_process("room_forgetter.notify_new_event", process)
|
||||
|
||||
async def _unsafe_process(self) -> None:
|
||||
# If self.pos is None then means we haven't fetched it from DB
|
||||
|
||||
@@ -224,7 +224,7 @@ class SsoHandler:
|
||||
)
|
||||
|
||||
# a lock on the mappings
|
||||
self._mapping_lock = Linearizer(name="sso_user_mapping", clock=hs.get_clock())
|
||||
self._mapping_lock = Linearizer(clock=hs.get_clock(), name="sso_user_mapping")
|
||||
|
||||
# a map from session id to session data
|
||||
self._username_mapping_sessions: Dict[str, UsernameMappingSession] = {}
|
||||
|
||||
@@ -33,7 +33,6 @@ from typing import (
|
||||
|
||||
from synapse.api.constants import EventContentFields, EventTypes, Membership
|
||||
from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.databases.main.state_deltas import StateDelta
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.events import get_plain_text_topic_from_event_content
|
||||
@@ -75,7 +74,10 @@ class StatsHandler:
|
||||
|
||||
# We kick this off so that we don't have to wait for a change before
|
||||
# we start populating stats
|
||||
self.clock.call_later(0, self.notify_new_event)
|
||||
self.clock.call_later(
|
||||
0,
|
||||
self.notify_new_event,
|
||||
)
|
||||
|
||||
def notify_new_event(self) -> None:
|
||||
"""Called when there may be more deltas to process"""
|
||||
@@ -90,7 +92,7 @@ class StatsHandler:
|
||||
finally:
|
||||
self._is_processing = False
|
||||
|
||||
run_as_background_process("stats.notify_new_event", self.server_name, process)
|
||||
self.hs.run_as_background_process("stats.notify_new_event", process)
|
||||
|
||||
async def _unsafe_process(self) -> None:
|
||||
# If self.pos is None then means we haven't fetched it from DB
|
||||
|
||||
@@ -323,6 +323,7 @@ class SyncHandler:
|
||||
] = ExpiringCache(
|
||||
cache_name="lazy_loaded_members_cache",
|
||||
server_name=self.server_name,
|
||||
hs=hs,
|
||||
clock=self.clock,
|
||||
max_len=0,
|
||||
expiry_ms=LAZY_LOADED_MEMBERS_CACHE_MAX_AGE,
|
||||
@@ -982,6 +983,7 @@ class SyncHandler:
|
||||
logger.debug("creating LruCache for %r", cache_key)
|
||||
cache = LruCache(
|
||||
max_size=LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE,
|
||||
clock=self.clock,
|
||||
server_name=self.server_name,
|
||||
)
|
||||
self.lazy_loaded_members_cache[cache_key] = cache
|
||||
|
||||
@@ -28,7 +28,6 @@ from synapse.api.constants import EduTypes
|
||||
from synapse.api.errors import AuthError, ShadowBanError, SynapseError
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.metrics.background_process_metrics import (
|
||||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.replication.tcp.streams import TypingStream
|
||||
@@ -78,11 +77,10 @@ class FollowerTypingHandler:
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs # nb must be called this for @wrap_as_background_process
|
||||
self.store = hs.get_datastores().main
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self.server_name = (
|
||||
hs.hostname
|
||||
) # nb must be called this for @wrap_as_background_process
|
||||
self.server_name = hs.hostname
|
||||
self.clock = hs.get_clock()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.is_mine_server_name = hs.is_mine_server_name
|
||||
@@ -144,9 +142,8 @@ class FollowerTypingHandler:
|
||||
if self.federation and self.is_mine_id(member.user_id):
|
||||
last_fed_poke = self._member_last_federation_poke.get(member, None)
|
||||
if not last_fed_poke or last_fed_poke + FEDERATION_PING_INTERVAL <= now:
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"typing._push_remote",
|
||||
self.server_name,
|
||||
self._push_remote,
|
||||
member=member,
|
||||
typing=True,
|
||||
@@ -220,9 +217,8 @@ class FollowerTypingHandler:
|
||||
self._rooms_updated.add(row.room_id)
|
||||
|
||||
if self.federation:
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"_send_changes_in_typing_to_remotes",
|
||||
self.server_name,
|
||||
self._send_changes_in_typing_to_remotes,
|
||||
row.room_id,
|
||||
prev_typing,
|
||||
@@ -384,9 +380,8 @@ class TypingWriterHandler(FollowerTypingHandler):
|
||||
def _push_update(self, member: RoomMember, typing: bool) -> None:
|
||||
if self.hs.is_mine_id(member.user_id):
|
||||
# Only send updates for changes to our own users.
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"typing._push_remote",
|
||||
self.server_name,
|
||||
self._push_remote,
|
||||
member,
|
||||
typing,
|
||||
|
||||
@@ -36,7 +36,6 @@ from synapse.api.constants import (
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.databases.main.state_deltas import StateDelta
|
||||
from synapse.storage.databases.main.user_directory import SearchResult
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
@@ -137,11 +136,15 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
|
||||
# We kick this off so that we don't have to wait for a change before
|
||||
# we start populating the user directory
|
||||
self.clock.call_later(0, self.notify_new_event)
|
||||
self.clock.call_later(
|
||||
0,
|
||||
self.notify_new_event,
|
||||
)
|
||||
|
||||
# Kick off the profile refresh process on startup
|
||||
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||
10, self.kick_off_remote_profile_refresh_process
|
||||
10,
|
||||
self.kick_off_remote_profile_refresh_process,
|
||||
)
|
||||
|
||||
async def search_users(
|
||||
@@ -193,9 +196,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
self._is_processing = False
|
||||
|
||||
self._is_processing = True
|
||||
run_as_background_process(
|
||||
"user_directory.notify_new_event", self.server_name, process
|
||||
)
|
||||
self._hs.run_as_background_process("user_directory.notify_new_event", process)
|
||||
|
||||
async def handle_local_profile_change(
|
||||
self, user_id: str, profile: ProfileInfo
|
||||
@@ -609,8 +610,8 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
self._is_refreshing_remote_profiles = False
|
||||
|
||||
self._is_refreshing_remote_profiles = True
|
||||
run_as_background_process(
|
||||
"user_directory.refresh_remote_profiles", self.server_name, process
|
||||
self._hs.run_as_background_process(
|
||||
"user_directory.refresh_remote_profiles", process
|
||||
)
|
||||
|
||||
async def _unsafe_refresh_remote_profiles(self) -> None:
|
||||
@@ -655,8 +656,9 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
if not users:
|
||||
return
|
||||
_, _, next_try_at_ts = users[0]
|
||||
delay = ((next_try_at_ts - self.clock.time_msec()) // 1000) + 2
|
||||
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||
((next_try_at_ts - self.clock.time_msec()) // 1000) + 2,
|
||||
delay,
|
||||
self.kick_off_remote_profile_refresh_process,
|
||||
)
|
||||
|
||||
@@ -692,9 +694,8 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
self._is_refreshing_remote_profiles_for_servers.remove(server_name)
|
||||
|
||||
self._is_refreshing_remote_profiles_for_servers.add(server_name)
|
||||
run_as_background_process(
|
||||
self._hs.run_as_background_process(
|
||||
"user_directory.refresh_remote_profiles_for_remote_server",
|
||||
self.server_name,
|
||||
process,
|
||||
)
|
||||
|
||||
|
||||
@@ -37,13 +37,13 @@ from weakref import WeakSet
|
||||
import attr
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.interfaces import IReactorTime
|
||||
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.logging.opentracing import start_active_span
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.storage.databases.main.lock import Lock, LockStore
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.constants import ONE_MINUTE_SECONDS
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -66,10 +66,8 @@ class WorkerLocksHandler:
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer") -> None:
|
||||
self.server_name = (
|
||||
hs.hostname
|
||||
) # nb must be called this for @wrap_as_background_process
|
||||
self._reactor = hs.get_reactor()
|
||||
self.hs = hs # nb must be called this for @wrap_as_background_process
|
||||
self._clock = hs.get_clock()
|
||||
self._store = hs.get_datastores().main
|
||||
self._clock = hs.get_clock()
|
||||
self._notifier = hs.get_notifier()
|
||||
@@ -98,7 +96,7 @@ class WorkerLocksHandler:
|
||||
"""
|
||||
|
||||
lock = WaitingLock(
|
||||
reactor=self._reactor,
|
||||
clock=self._clock,
|
||||
store=self._store,
|
||||
handler=self,
|
||||
lock_name=lock_name,
|
||||
@@ -129,7 +127,7 @@ class WorkerLocksHandler:
|
||||
"""
|
||||
|
||||
lock = WaitingLock(
|
||||
reactor=self._reactor,
|
||||
clock=self._clock,
|
||||
store=self._store,
|
||||
handler=self,
|
||||
lock_name=lock_name,
|
||||
@@ -160,7 +158,7 @@ class WorkerLocksHandler:
|
||||
lock = WaitingMultiLock(
|
||||
lock_names=lock_names,
|
||||
write=write,
|
||||
reactor=self._reactor,
|
||||
clock=self._clock,
|
||||
store=self._store,
|
||||
handler=self,
|
||||
)
|
||||
@@ -197,7 +195,11 @@ class WorkerLocksHandler:
|
||||
if not deferred.called:
|
||||
deferred.callback(None)
|
||||
|
||||
self._clock.call_later(0, _wake_all_locks, locks)
|
||||
self._clock.call_later(
|
||||
0,
|
||||
_wake_all_locks,
|
||||
locks,
|
||||
)
|
||||
|
||||
@wrap_as_background_process("_cleanup_locks")
|
||||
async def _cleanup_locks(self) -> None:
|
||||
@@ -207,7 +209,7 @@ class WorkerLocksHandler:
|
||||
|
||||
@attr.s(auto_attribs=True, eq=False)
|
||||
class WaitingLock:
|
||||
reactor: IReactorTime
|
||||
clock: Clock
|
||||
store: LockStore
|
||||
handler: WorkerLocksHandler
|
||||
lock_name: str
|
||||
@@ -246,10 +248,11 @@ class WaitingLock:
|
||||
# periodically wake up in case the lock was released but we
|
||||
# weren't notified.
|
||||
with PreserveLoggingContext():
|
||||
timeout = self._get_next_retry_interval()
|
||||
await timeout_deferred(
|
||||
deferred=self.deferred,
|
||||
timeout=self._get_next_retry_interval(),
|
||||
reactor=self.reactor,
|
||||
timeout=timeout,
|
||||
clock=self.clock,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
@@ -290,7 +293,7 @@ class WaitingMultiLock:
|
||||
|
||||
write: bool
|
||||
|
||||
reactor: IReactorTime
|
||||
clock: Clock
|
||||
store: LockStore
|
||||
handler: WorkerLocksHandler
|
||||
|
||||
@@ -323,10 +326,11 @@ class WaitingMultiLock:
|
||||
# periodically wake up in case the lock was released but we
|
||||
# weren't notified.
|
||||
with PreserveLoggingContext():
|
||||
timeout = self._get_next_retry_interval()
|
||||
await timeout_deferred(
|
||||
deferred=self.deferred,
|
||||
timeout=self._get_next_retry_interval(),
|
||||
reactor=self.reactor,
|
||||
timeout=timeout,
|
||||
clock=self.clock,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@@ -54,7 +54,6 @@ from twisted.internet.interfaces import (
|
||||
IOpenSSLContextFactory,
|
||||
IReactorCore,
|
||||
IReactorPluggableNameResolver,
|
||||
IReactorTime,
|
||||
IResolutionReceiver,
|
||||
ITCPTransport,
|
||||
)
|
||||
@@ -88,6 +87,7 @@ from synapse.logging.opentracing import set_tag, start_active_span, tags
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.types import ISynapseReactor, StrSequence
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.json import json_decoder
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -165,16 +165,17 @@ def _is_ip_blocked(
|
||||
_EPSILON = 0.00000001
|
||||
|
||||
|
||||
def _make_scheduler(
|
||||
reactor: IReactorTime,
|
||||
) -> Callable[[Callable[[], object]], IDelayedCall]:
|
||||
def _make_scheduler(clock: Clock) -> Callable[[Callable[[], object]], IDelayedCall]:
|
||||
"""Makes a schedular suitable for a Cooperator using the given reactor.
|
||||
|
||||
(This is effectively just a copy from `twisted.internet.task`)
|
||||
"""
|
||||
|
||||
def _scheduler(x: Callable[[], object]) -> IDelayedCall:
|
||||
return reactor.callLater(_EPSILON, x)
|
||||
return clock.call_later(
|
||||
_EPSILON,
|
||||
x,
|
||||
)
|
||||
|
||||
return _scheduler
|
||||
|
||||
@@ -367,7 +368,7 @@ class BaseHttpClient:
|
||||
|
||||
# We use this for our body producers to ensure that they use the correct
|
||||
# reactor.
|
||||
self._cooperator = Cooperator(scheduler=_make_scheduler(hs.get_reactor()))
|
||||
self._cooperator = Cooperator(scheduler=_make_scheduler(hs.get_clock()))
|
||||
|
||||
async def request(
|
||||
self,
|
||||
@@ -436,9 +437,9 @@ class BaseHttpClient:
|
||||
# we use our own timeout mechanism rather than treq's as a workaround
|
||||
# for https://twistedmatrix.com/trac/ticket/9534.
|
||||
request_deferred = timeout_deferred(
|
||||
request_deferred,
|
||||
60,
|
||||
self.hs.get_reactor(),
|
||||
deferred=request_deferred,
|
||||
timeout=60,
|
||||
clock=self.hs.get_clock(),
|
||||
)
|
||||
|
||||
# turn timeouts into RequestTimedOutErrors
|
||||
@@ -763,7 +764,11 @@ class BaseHttpClient:
|
||||
d = read_body_with_max_size(response, output_stream, max_size)
|
||||
|
||||
# Ensure that the body is not read forever.
|
||||
d = timeout_deferred(d, 30, self.hs.get_reactor())
|
||||
d = timeout_deferred(
|
||||
deferred=d,
|
||||
timeout=30,
|
||||
clock=self.hs.get_clock(),
|
||||
)
|
||||
|
||||
length = await make_deferred_yieldable(d)
|
||||
except BodyExceededMaxSize:
|
||||
@@ -957,9 +962,9 @@ class ReplicationClient(BaseHttpClient):
|
||||
# for https://twistedmatrix.com/trac/ticket/9534.
|
||||
# (Updated url https://github.com/twisted/twisted/issues/9534)
|
||||
request_deferred = timeout_deferred(
|
||||
request_deferred,
|
||||
60,
|
||||
self.hs.get_reactor(),
|
||||
deferred=request_deferred,
|
||||
timeout=60,
|
||||
clock=self.hs.get_clock(),
|
||||
)
|
||||
|
||||
# turn timeouts into RequestTimedOutErrors
|
||||
|
||||
@@ -67,6 +67,9 @@ class MatrixFederationAgent:
|
||||
Args:
|
||||
reactor: twisted reactor to use for underlying requests
|
||||
|
||||
clock: Internal `HomeServer` clock used to track delayed and looping calls.
|
||||
Should be obtained from `hs.get_clock()`.
|
||||
|
||||
tls_client_options_factory:
|
||||
factory to use for fetching client tls options, or none to disable TLS.
|
||||
|
||||
@@ -97,6 +100,7 @@ class MatrixFederationAgent:
|
||||
*,
|
||||
server_name: str,
|
||||
reactor: ISynapseReactor,
|
||||
clock: Clock,
|
||||
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
|
||||
user_agent: bytes,
|
||||
ip_allowlist: Optional[IPSet],
|
||||
@@ -109,6 +113,7 @@ class MatrixFederationAgent:
|
||||
Args:
|
||||
server_name: Our homeserver name (used to label metrics) (`hs.hostname`).
|
||||
reactor
|
||||
clock: Should be the `hs` clock from `hs.get_clock()`
|
||||
tls_client_options_factory
|
||||
user_agent
|
||||
ip_allowlist
|
||||
@@ -124,7 +129,6 @@ class MatrixFederationAgent:
|
||||
# addresses, to prevent DNS rebinding.
|
||||
reactor = BlocklistingReactorWrapper(reactor, ip_allowlist, ip_blocklist)
|
||||
|
||||
self._clock = Clock(reactor, server_name=server_name)
|
||||
self._pool = HTTPConnectionPool(reactor)
|
||||
self._pool.retryAutomatically = False
|
||||
self._pool.maxPersistentPerHost = 5
|
||||
@@ -147,6 +151,7 @@ class MatrixFederationAgent:
|
||||
_well_known_resolver = WellKnownResolver(
|
||||
server_name=server_name,
|
||||
reactor=reactor,
|
||||
clock=clock,
|
||||
agent=BlocklistingAgentWrapper(
|
||||
ProxyAgent(
|
||||
reactor=reactor,
|
||||
|
||||
@@ -90,6 +90,7 @@ class WellKnownResolver:
|
||||
self,
|
||||
server_name: str,
|
||||
reactor: ISynapseThreadlessReactor,
|
||||
clock: Clock,
|
||||
agent: IAgent,
|
||||
user_agent: bytes,
|
||||
well_known_cache: Optional[TTLCache[bytes, Optional[bytes]]] = None,
|
||||
@@ -99,6 +100,7 @@ class WellKnownResolver:
|
||||
Args:
|
||||
server_name: Our homeserver name (used to label metrics) (`hs.hostname`).
|
||||
reactor
|
||||
clock: Should be the `hs` clock from `hs.get_clock()`
|
||||
agent
|
||||
user_agent
|
||||
well_known_cache
|
||||
@@ -107,7 +109,7 @@ class WellKnownResolver:
|
||||
|
||||
self.server_name = server_name
|
||||
self._reactor = reactor
|
||||
self._clock = Clock(reactor, server_name=server_name)
|
||||
self._clock = clock
|
||||
|
||||
if well_known_cache is None:
|
||||
well_known_cache = TTLCache(
|
||||
|
||||
@@ -90,6 +90,7 @@ from synapse.logging.opentracing import set_tag, start_active_span, tags
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.async_helpers import AwakenableSleeper, Linearizer, timeout_deferred
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.json import json_decoder
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.stringutils import parse_and_validate_server_name
|
||||
@@ -270,6 +271,7 @@ class LegacyJsonSendParser(_BaseJsonParser[Tuple[int, JsonDict]]):
|
||||
|
||||
|
||||
async def _handle_response(
|
||||
clock: Clock,
|
||||
reactor: IReactorTime,
|
||||
timeout_sec: float,
|
||||
request: MatrixFederationRequest,
|
||||
@@ -299,7 +301,11 @@ async def _handle_response(
|
||||
check_content_type_is(response.headers, parser.CONTENT_TYPE)
|
||||
|
||||
d = read_body_with_max_size(response, parser, max_response_size)
|
||||
d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor)
|
||||
d = timeout_deferred(
|
||||
deferred=d,
|
||||
timeout=timeout_sec,
|
||||
clock=clock,
|
||||
)
|
||||
|
||||
length = await make_deferred_yieldable(d)
|
||||
|
||||
@@ -411,6 +417,7 @@ class MatrixFederationHttpClient:
|
||||
self.server_name = hs.hostname
|
||||
|
||||
self.reactor = hs.get_reactor()
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
user_agent = hs.version_string
|
||||
if hs.config.server.user_agent_suffix:
|
||||
@@ -424,6 +431,7 @@ class MatrixFederationHttpClient:
|
||||
federation_agent: IAgent = MatrixFederationAgent(
|
||||
server_name=self.server_name,
|
||||
reactor=self.reactor,
|
||||
clock=self.clock,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
user_agent=user_agent.encode("ascii"),
|
||||
ip_allowlist=hs.config.server.federation_ip_range_allowlist,
|
||||
@@ -457,7 +465,6 @@ class MatrixFederationHttpClient:
|
||||
ip_blocklist=hs.config.server.federation_ip_range_blocklist,
|
||||
)
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
self._store = hs.get_datastores().main
|
||||
self.version_string_bytes = hs.version_string.encode("ascii")
|
||||
self.default_timeout_seconds = hs.config.federation.client_timeout_ms / 1000
|
||||
@@ -470,9 +477,9 @@ class MatrixFederationHttpClient:
|
||||
self.max_long_retries = hs.config.federation.max_long_retries
|
||||
self.max_short_retries = hs.config.federation.max_short_retries
|
||||
|
||||
self._cooperator = Cooperator(scheduler=_make_scheduler(self.reactor))
|
||||
self._cooperator = Cooperator(scheduler=_make_scheduler(self.clock))
|
||||
|
||||
self._sleeper = AwakenableSleeper(self.reactor)
|
||||
self._sleeper = AwakenableSleeper(self.clock)
|
||||
|
||||
self._simple_http_client = SimpleHttpClient(
|
||||
hs,
|
||||
@@ -484,6 +491,10 @@ class MatrixFederationHttpClient:
|
||||
self.remote_download_linearizer = Linearizer(
|
||||
name="remote_download_linearizer", max_count=6, clock=self.clock
|
||||
)
|
||||
self._is_shutdown = False
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._is_shutdown = True
|
||||
|
||||
def wake_destination(self, destination: str) -> None:
|
||||
"""Called when the remote server may have come back online."""
|
||||
@@ -629,6 +640,7 @@ class MatrixFederationHttpClient:
|
||||
limiter = await synapse.util.retryutils.get_retry_limiter(
|
||||
destination=request.destination,
|
||||
our_server_name=self.server_name,
|
||||
hs=self.hs,
|
||||
clock=self.clock,
|
||||
store=self._store,
|
||||
backoff_on_404=backoff_on_404,
|
||||
@@ -675,7 +687,7 @@ class MatrixFederationHttpClient:
|
||||
(b"", b"", path_bytes, None, query_bytes, b"")
|
||||
)
|
||||
|
||||
while True:
|
||||
while not self._is_shutdown:
|
||||
try:
|
||||
json = request.get_json()
|
||||
if json:
|
||||
@@ -733,9 +745,9 @@ class MatrixFederationHttpClient:
|
||||
bodyProducer=producer,
|
||||
)
|
||||
request_deferred = timeout_deferred(
|
||||
request_deferred,
|
||||
deferred=request_deferred,
|
||||
timeout=_sec_timeout,
|
||||
reactor=self.reactor,
|
||||
clock=self.clock,
|
||||
)
|
||||
|
||||
response = await make_deferred_yieldable(request_deferred)
|
||||
@@ -793,7 +805,9 @@ class MatrixFederationHttpClient:
|
||||
# Update transactions table?
|
||||
d = treq.content(response)
|
||||
d = timeout_deferred(
|
||||
d, timeout=_sec_timeout, reactor=self.reactor
|
||||
deferred=d,
|
||||
timeout=_sec_timeout,
|
||||
clock=self.clock,
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -862,6 +876,15 @@ class MatrixFederationHttpClient:
|
||||
delay_seconds,
|
||||
)
|
||||
|
||||
if self._is_shutdown:
|
||||
# Immediately fail sending the request instead of starting a
|
||||
# potentially long sleep after the server has requested
|
||||
# shutdown.
|
||||
# This is the code path followed when the
|
||||
# `federation_transaction_transmission_loop` has been
|
||||
# cancelled.
|
||||
raise
|
||||
|
||||
# Sleep for the calculated delay, or wake up immediately
|
||||
# if we get notified that the server is back up.
|
||||
await self._sleeper.sleep(
|
||||
@@ -1074,6 +1097,7 @@ class MatrixFederationHttpClient:
|
||||
parser = cast(ByteParser[T], JsonParser())
|
||||
|
||||
body = await _handle_response(
|
||||
self.clock,
|
||||
self.reactor,
|
||||
_sec_timeout,
|
||||
request,
|
||||
@@ -1152,7 +1176,13 @@ class MatrixFederationHttpClient:
|
||||
_sec_timeout = self.default_timeout_seconds
|
||||
|
||||
body = await _handle_response(
|
||||
self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser()
|
||||
self.clock,
|
||||
self.reactor,
|
||||
_sec_timeout,
|
||||
request,
|
||||
response,
|
||||
start_ms,
|
||||
parser=JsonParser(),
|
||||
)
|
||||
return body
|
||||
|
||||
@@ -1358,6 +1388,7 @@ class MatrixFederationHttpClient:
|
||||
parser = cast(ByteParser[T], JsonParser())
|
||||
|
||||
body = await _handle_response(
|
||||
self.clock,
|
||||
self.reactor,
|
||||
_sec_timeout,
|
||||
request,
|
||||
@@ -1431,7 +1462,13 @@ class MatrixFederationHttpClient:
|
||||
_sec_timeout = self.default_timeout_seconds
|
||||
|
||||
body = await _handle_response(
|
||||
self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser()
|
||||
self.clock,
|
||||
self.reactor,
|
||||
_sec_timeout,
|
||||
request,
|
||||
response,
|
||||
start_ms,
|
||||
parser=JsonParser(),
|
||||
)
|
||||
return body
|
||||
|
||||
|
||||
@@ -161,12 +161,12 @@ class ProxyResource(_AsyncResource):
|
||||
bodyProducer=QuieterFileBodyProducer(request.content),
|
||||
)
|
||||
request_deferred = timeout_deferred(
|
||||
request_deferred,
|
||||
deferred=request_deferred,
|
||||
# This should be set longer than the timeout in `MatrixFederationHttpClient`
|
||||
# so that it has enough time to complete and pass us the data before we give
|
||||
# up.
|
||||
timeout=90,
|
||||
reactor=self.reactor,
|
||||
clock=self._clock,
|
||||
)
|
||||
|
||||
response = await make_deferred_yieldable(request_deferred)
|
||||
|
||||
@@ -420,7 +420,14 @@ class DirectServeJsonResource(_AsyncResource):
|
||||
"""
|
||||
|
||||
if clock is None:
|
||||
clock = Clock(
|
||||
# Ideally we wouldn't ignore the linter error here and instead enforce a
|
||||
# required `Clock` be passed into the `__init__` function.
|
||||
# However, this would change the function signature which is currently being
|
||||
# exported to the module api. Since we don't want to break that api, we have
|
||||
# to settle with ignoring the linter error here.
|
||||
# As of the time of writing this, all Synapse internal usages of
|
||||
# `DirectServeJsonResource` pass in the existing homeserver clock instance.
|
||||
clock = Clock( # type: ignore[multiple-internal-clocks]
|
||||
cast(ISynapseThreadlessReactor, reactor),
|
||||
server_name="synapse_module_running_from_unknown_server",
|
||||
)
|
||||
@@ -608,7 +615,14 @@ class DirectServeHtmlResource(_AsyncResource):
|
||||
Only optional for the Module API.
|
||||
"""
|
||||
if clock is None:
|
||||
clock = Clock(
|
||||
# Ideally we wouldn't ignore the linter error here and instead enforce a
|
||||
# required `Clock` be passed into the `__init__` function.
|
||||
# However, this would change the function signature which is currently being
|
||||
# exported to the module api. Since we don't want to break that api, we have
|
||||
# to settle with ignoring the linter error here.
|
||||
# As of the time of writing this, all Synapse internal usages of
|
||||
# `DirectServeHtmlResource` pass in the existing homeserver clock instance.
|
||||
clock = Clock( # type: ignore[multiple-internal-clocks]
|
||||
cast(ISynapseThreadlessReactor, reactor),
|
||||
server_name="synapse_module_running_from_unknown_server",
|
||||
)
|
||||
|
||||
@@ -22,7 +22,7 @@ import contextlib
|
||||
import logging
|
||||
import time
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Any, Generator, Optional, Tuple, Union
|
||||
from typing import TYPE_CHECKING, Any, Generator, List, Optional, Tuple, Union
|
||||
|
||||
import attr
|
||||
from zope.interface import implementer
|
||||
@@ -30,6 +30,7 @@ from zope.interface import implementer
|
||||
from twisted.internet.address import UNIXAddress
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.interfaces import IAddress
|
||||
from twisted.internet.protocol import Protocol
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.http import HTTPChannel
|
||||
from twisted.web.resource import IResource, Resource
|
||||
@@ -660,6 +661,70 @@ class _XForwardedForAddress:
|
||||
host: str
|
||||
|
||||
|
||||
class SynapseProtocol(HTTPChannel):
|
||||
"""
|
||||
Synapse-specific twisted http Protocol.
|
||||
|
||||
This is a small wrapper around the twisted HTTPChannel so we can track active
|
||||
connections in order to close any outstanding connections on shutdown.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
site: "SynapseSite",
|
||||
our_server_name: str,
|
||||
max_request_body_size: int,
|
||||
request_id_header: Optional[str],
|
||||
request_class: type,
|
||||
):
|
||||
super().__init__()
|
||||
self.factory: SynapseSite = site
|
||||
self.site = site
|
||||
self.our_server_name = our_server_name
|
||||
self.max_request_body_size = max_request_body_size
|
||||
self.request_id_header = request_id_header
|
||||
self.request_class = request_class
|
||||
|
||||
def connectionMade(self) -> None:
|
||||
"""
|
||||
Called when a connection is made.
|
||||
|
||||
This may be considered the initializer of the protocol, because
|
||||
it is called when the connection is completed.
|
||||
|
||||
Add the connection to the factory's connection list when it's established.
|
||||
"""
|
||||
super().connectionMade()
|
||||
self.factory.addConnection(self)
|
||||
|
||||
def connectionLost(self, reason: Failure) -> None: # type: ignore[override]
|
||||
"""
|
||||
Called when the connection is shut down.
|
||||
|
||||
Clear any circular references here, and any external references to this
|
||||
Protocol. The connection has been closed. In our case, we need to remove the
|
||||
connection from the factory's connection list, when it's lost.
|
||||
"""
|
||||
super().connectionLost(reason)
|
||||
self.factory.removeConnection(self)
|
||||
|
||||
def requestFactory(self, http_channel: HTTPChannel, queued: bool) -> SynapseRequest: # type: ignore[override]
|
||||
"""
|
||||
A callable used to build `twisted.web.iweb.IRequest` objects.
|
||||
|
||||
Use our own custom SynapseRequest type instead of the regular
|
||||
twisted.web.server.Request.
|
||||
"""
|
||||
return self.request_class(
|
||||
self,
|
||||
self.factory,
|
||||
our_server_name=self.our_server_name,
|
||||
max_request_body_size=self.max_request_body_size,
|
||||
queued=queued,
|
||||
request_id_header=self.request_id_header,
|
||||
)
|
||||
|
||||
|
||||
class SynapseSite(ProxySite):
|
||||
"""
|
||||
Synapse-specific twisted http Site
|
||||
@@ -710,23 +775,44 @@ class SynapseSite(ProxySite):
|
||||
|
||||
assert config.http_options is not None
|
||||
proxied = config.http_options.x_forwarded
|
||||
request_class = XForwardedForRequest if proxied else SynapseRequest
|
||||
self.request_class = XForwardedForRequest if proxied else SynapseRequest
|
||||
|
||||
request_id_header = config.http_options.request_id_header
|
||||
self.request_id_header = config.http_options.request_id_header
|
||||
self.max_request_body_size = max_request_body_size
|
||||
|
||||
def request_factory(channel: HTTPChannel, queued: bool) -> Request:
|
||||
return request_class(
|
||||
channel,
|
||||
self,
|
||||
our_server_name=self.server_name,
|
||||
max_request_body_size=max_request_body_size,
|
||||
queued=queued,
|
||||
request_id_header=request_id_header,
|
||||
)
|
||||
|
||||
self.requestFactory = request_factory # type: ignore
|
||||
self.access_logger = logging.getLogger(logger_name)
|
||||
self.server_version_string = server_version_string.encode("ascii")
|
||||
self.connections: List[Protocol] = []
|
||||
|
||||
def buildProtocol(self, addr: IAddress) -> SynapseProtocol:
|
||||
protocol = SynapseProtocol(
|
||||
self,
|
||||
self.server_name,
|
||||
self.max_request_body_size,
|
||||
self.request_id_header,
|
||||
self.request_class,
|
||||
)
|
||||
return protocol
|
||||
|
||||
def addConnection(self, protocol: Protocol) -> None:
|
||||
self.connections.append(protocol)
|
||||
|
||||
def removeConnection(self, protocol: Protocol) -> None:
|
||||
if protocol in self.connections:
|
||||
self.connections.remove(protocol)
|
||||
|
||||
def stopFactory(self) -> None:
|
||||
super().stopFactory()
|
||||
|
||||
# Shutdown any connections which are still active.
|
||||
# These can be long lived HTTP connections which wouldn't normally be closed
|
||||
# when calling `shutdown` on the respective `Port`.
|
||||
# Closing the connections here is required for us to fully shutdown the
|
||||
# `SynapseHomeServer` in order for it to be garbage collected.
|
||||
for protocol in self.connections[:]:
|
||||
if protocol.transport is not None:
|
||||
protocol.transport.loseConnection()
|
||||
self.connections.clear()
|
||||
|
||||
def log(self, request: SynapseRequest) -> None: # type: ignore[override]
|
||||
pass
|
||||
|
||||
@@ -704,6 +704,7 @@ class ThreadedFileSender:
|
||||
|
||||
def __init__(self, hs: "HomeServer") -> None:
|
||||
self.reactor = hs.get_reactor()
|
||||
self.clock = hs.get_clock()
|
||||
self.thread_pool = hs.get_media_sender_thread_pool()
|
||||
|
||||
self.file: Optional[BinaryIO] = None
|
||||
@@ -712,7 +713,7 @@ class ThreadedFileSender:
|
||||
|
||||
# Signals if the thread should keep reading/sending data. Set means
|
||||
# continue, clear means pause.
|
||||
self.wakeup_event = DeferredEvent(self.reactor)
|
||||
self.wakeup_event = DeferredEvent(self.clock)
|
||||
|
||||
# Signals if the thread should terminate, e.g. because the consumer has
|
||||
# gone away.
|
||||
|
||||
@@ -67,7 +67,6 @@ from synapse.media.media_storage import (
|
||||
from synapse.media.storage_provider import StorageProviderWrapper
|
||||
from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
|
||||
from synapse.media.url_previewer import UrlPreviewer
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia
|
||||
from synapse.types import UserID
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
@@ -187,16 +186,14 @@ class MediaRepository:
|
||||
self.media_repository_callbacks = hs.get_module_api_callbacks().media_repository
|
||||
|
||||
def _start_update_recently_accessed(self) -> Deferred:
|
||||
return run_as_background_process(
|
||||
return self.hs.run_as_background_process(
|
||||
"update_recently_accessed_media",
|
||||
self.server_name,
|
||||
self._update_recently_accessed,
|
||||
)
|
||||
|
||||
def _start_apply_media_retention_rules(self) -> Deferred:
|
||||
return run_as_background_process(
|
||||
return self.hs.run_as_background_process(
|
||||
"apply_media_retention_rules",
|
||||
self.server_name,
|
||||
self._apply_media_retention_rules,
|
||||
)
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ from synapse.media._base import FileInfo, get_filename_from_headers
|
||||
from synapse.media.media_storage import MediaStorage, SHA256TransparentIOWriter
|
||||
from synapse.media.oembed import OEmbedProvider
|
||||
from synapse.media.preview_html import decode_body, parse_html_to_open_graph
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import JsonDict, UserID
|
||||
from synapse.util.async_helpers import ObservableDeferred
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
@@ -167,6 +166,7 @@ class UrlPreviewer:
|
||||
media_storage: MediaStorage,
|
||||
):
|
||||
self.clock = hs.get_clock()
|
||||
self.hs = hs
|
||||
self.filepaths = media_repo.filepaths
|
||||
self.max_spider_size = hs.config.media.max_spider_size
|
||||
self.server_name = hs.hostname
|
||||
@@ -201,15 +201,14 @@ class UrlPreviewer:
|
||||
self._cache: ExpiringCache[str, ObservableDeferred] = ExpiringCache(
|
||||
cache_name="url_previews",
|
||||
server_name=self.server_name,
|
||||
hs=self.hs,
|
||||
clock=self.clock,
|
||||
# don't spider URLs more often than once an hour
|
||||
expiry_ms=ONE_HOUR,
|
||||
)
|
||||
|
||||
if self._worker_run_media_background_jobs:
|
||||
self._cleaner_loop = self.clock.looping_call(
|
||||
self._start_expire_url_cache_data, 10 * 1000
|
||||
)
|
||||
self.clock.looping_call(self._start_expire_url_cache_data, 10 * 1000)
|
||||
|
||||
async def preview(self, url: str, user: UserID, ts: int) -> bytes:
|
||||
# the in-memory cache:
|
||||
@@ -739,8 +738,8 @@ class UrlPreviewer:
|
||||
return open_graph_result, oembed_response.author_name, expiration_ms
|
||||
|
||||
def _start_expire_url_cache_data(self) -> Deferred:
|
||||
return run_as_background_process(
|
||||
"expire_url_cache_data", self.server_name, self._expire_url_cache_data
|
||||
return self.hs.run_as_background_process(
|
||||
"expire_url_cache_data", self._expire_url_cache_data
|
||||
)
|
||||
|
||||
async def _expire_url_cache_data(self) -> None:
|
||||
|
||||
@@ -138,7 +138,9 @@ def install_gc_manager() -> None:
|
||||
gc_time.labels(i).observe(end - start)
|
||||
gc_unreachable.labels(i).set(unreachable)
|
||||
|
||||
gc_task = task.LoopingCall(_maybe_gc)
|
||||
# We can ignore the lint here since this looping call does not hold a `HomeServer`
|
||||
# reference so can be cleaned up by other means on shutdown.
|
||||
gc_task = task.LoopingCall(_maybe_gc) # type: ignore[prefer-synapse-clock-looping-call]
|
||||
gc_task.start(0.1)
|
||||
|
||||
|
||||
|
||||
@@ -66,6 +66,8 @@ if TYPE_CHECKING:
|
||||
# Old versions don't have `LiteralString`
|
||||
from typing_extensions import LiteralString
|
||||
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -397,11 +399,11 @@ def run_as_background_process(
|
||||
P = ParamSpec("P")
|
||||
|
||||
|
||||
class HasServerName(Protocol):
|
||||
server_name: str
|
||||
class HasHomeServer(Protocol):
|
||||
hs: "HomeServer"
|
||||
"""
|
||||
The homeserver name that this cache is associated with (used to label the metric)
|
||||
(`hs.hostname`).
|
||||
The homeserver that this cache is associated with (used to label the metric and
|
||||
track backgroun processes for clean shutdown).
|
||||
"""
|
||||
|
||||
|
||||
@@ -431,27 +433,22 @@ def wrap_as_background_process(
|
||||
"""
|
||||
|
||||
def wrapper(
|
||||
func: Callable[Concatenate[HasServerName, P], Awaitable[Optional[R]]],
|
||||
func: Callable[Concatenate[HasHomeServer, P], Awaitable[Optional[R]]],
|
||||
) -> Callable[P, "defer.Deferred[Optional[R]]"]:
|
||||
@wraps(func)
|
||||
def wrapped_func(
|
||||
self: HasServerName, *args: P.args, **kwargs: P.kwargs
|
||||
self: HasHomeServer, *args: P.args, **kwargs: P.kwargs
|
||||
) -> "defer.Deferred[Optional[R]]":
|
||||
assert self.server_name is not None, (
|
||||
"The `server_name` attribute must be set on the object where `@wrap_as_background_process` decorator is used."
|
||||
assert self.hs is not None, (
|
||||
"The `hs` attribute must be set on the object where `@wrap_as_background_process` decorator is used."
|
||||
)
|
||||
|
||||
return run_as_background_process(
|
||||
return self.hs.run_as_background_process(
|
||||
desc,
|
||||
self.server_name,
|
||||
func,
|
||||
self,
|
||||
*args,
|
||||
# type-ignore: mypy is confusing kwargs with the bg_start_span kwarg.
|
||||
# Argument 4 to "run_as_background_process" has incompatible type
|
||||
# "**P.kwargs"; expected "bool"
|
||||
# See https://github.com/python/mypy/issues/8862
|
||||
**kwargs, # type: ignore[arg-type]
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
# There are some shenanigans here, because we're decorating a method but
|
||||
|
||||
@@ -23,7 +23,6 @@ from typing import TYPE_CHECKING
|
||||
import attr
|
||||
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -52,6 +51,7 @@ class CommonUsageMetricsManager:
|
||||
self.server_name = hs.hostname
|
||||
self._store = hs.get_datastores().main
|
||||
self._clock = hs.get_clock()
|
||||
self._hs = hs
|
||||
|
||||
async def get_metrics(self) -> CommonUsageMetrics:
|
||||
"""Get the CommonUsageMetrics object. If no collection has happened yet, do it
|
||||
@@ -64,16 +64,14 @@ class CommonUsageMetricsManager:
|
||||
|
||||
async def setup(self) -> None:
|
||||
"""Keep the gauges for common usage metrics up to date."""
|
||||
run_as_background_process(
|
||||
self._hs.run_as_background_process(
|
||||
desc="common_usage_metrics_update_gauges",
|
||||
server_name=self.server_name,
|
||||
func=self._update_gauges,
|
||||
)
|
||||
self._clock.looping_call(
|
||||
run_as_background_process,
|
||||
self._hs.run_as_background_process,
|
||||
5 * 60 * 1000,
|
||||
desc="common_usage_metrics_update_gauges",
|
||||
server_name=self.server_name,
|
||||
func=self._update_gauges,
|
||||
)
|
||||
|
||||
|
||||
@@ -275,7 +275,15 @@ def run_as_background_process(
|
||||
# function instead.
|
||||
stub_server_name = "synapse_module_running_from_unknown_server"
|
||||
|
||||
return _run_as_background_process(
|
||||
# Ignore the linter error here. Since this is leveraging the
|
||||
# `run_as_background_process` function directly and we don't want to break the
|
||||
# module api, we need to keep the function signature the same. This means we don't
|
||||
# have access to the running `HomeServer` and cannot track this background process
|
||||
# for cleanup during shutdown.
|
||||
# This is not an issue during runtime and is only potentially problematic if the
|
||||
# application cares about being able to garbage collect `HomeServer` instances
|
||||
# during runtime.
|
||||
return _run_as_background_process( # type: ignore[untracked-background-process]
|
||||
desc,
|
||||
stub_server_name,
|
||||
func,
|
||||
@@ -1402,7 +1410,7 @@ class ModuleApi:
|
||||
|
||||
if self._hs.config.worker.run_background_tasks or run_on_all_instances:
|
||||
self._clock.looping_call(
|
||||
self.run_as_background_process,
|
||||
self._hs.run_as_background_process,
|
||||
msec,
|
||||
desc,
|
||||
lambda: maybe_awaitable(f(*args, **kwargs)),
|
||||
@@ -1460,7 +1468,7 @@ class ModuleApi:
|
||||
return self._clock.call_later(
|
||||
# convert ms to seconds as needed by call_later.
|
||||
msec * 0.001,
|
||||
self.run_as_background_process,
|
||||
self._hs.run_as_background_process,
|
||||
desc,
|
||||
lambda: maybe_awaitable(f(*args, **kwargs)),
|
||||
)
|
||||
@@ -1701,8 +1709,8 @@ class ModuleApi:
|
||||
Note that the returned Deferred does not follow the synapse logcontext
|
||||
rules.
|
||||
"""
|
||||
return _run_as_background_process(
|
||||
desc, self.server_name, func, *args, bg_start_span=bg_start_span, **kwargs
|
||||
return self._hs.run_as_background_process(
|
||||
desc, func, *args, bg_start_span=bg_start_span, **kwargs
|
||||
)
|
||||
|
||||
async def defer_to_thread(
|
||||
|
||||
@@ -676,9 +676,16 @@ class Notifier:
|
||||
# is a new token.
|
||||
listener = user_stream.new_listener(prev_token)
|
||||
listener = timeout_deferred(
|
||||
listener,
|
||||
(end_time - now) / 1000.0,
|
||||
self.hs.get_reactor(),
|
||||
deferred=listener,
|
||||
timeout=(end_time - now) / 1000.0,
|
||||
# We don't track these calls since they are constantly being
|
||||
# overridden by new calls to /sync and they don't hold the
|
||||
# `HomeServer` in memory on shutdown. It is safe to let them
|
||||
# timeout of their own accord after shutting down since it
|
||||
# won't delay shutdown and there won't be any adverse
|
||||
# behaviour.
|
||||
cancel_on_shutdown=False,
|
||||
clock=self.hs.get_clock(),
|
||||
)
|
||||
|
||||
log_kv(
|
||||
|
||||
@@ -25,7 +25,6 @@ from typing import TYPE_CHECKING, Dict, List, Optional
|
||||
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
|
||||
from twisted.internet.interfaces import IDelayedCall
|
||||
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.push import Pusher, PusherConfig, PusherConfigException, ThrottleParams
|
||||
from synapse.push.mailer import Mailer
|
||||
from synapse.push.push_types import EmailReason
|
||||
@@ -118,7 +117,7 @@ class EmailPusher(Pusher):
|
||||
if self._is_processing:
|
||||
return
|
||||
|
||||
run_as_background_process("emailpush.process", self.server_name, self._process)
|
||||
self.hs.run_as_background_process("emailpush.process", self._process)
|
||||
|
||||
def _pause_processing(self) -> None:
|
||||
"""Used by tests to temporarily pause processing of events.
|
||||
@@ -228,8 +227,10 @@ class EmailPusher(Pusher):
|
||||
self.timed_call = None
|
||||
|
||||
if soonest_due_at is not None:
|
||||
self.timed_call = self.hs.get_reactor().callLater(
|
||||
self.seconds_until(soonest_due_at), self.on_timer
|
||||
delay = self.seconds_until(soonest_due_at)
|
||||
self.timed_call = self.hs.get_clock().call_later(
|
||||
delay,
|
||||
self.on_timer,
|
||||
)
|
||||
|
||||
async def save_last_stream_ordering_and_success(
|
||||
|
||||
@@ -32,7 +32,6 @@ from synapse.api.constants import EventTypes
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging import opentracing
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.push import Pusher, PusherConfig, PusherConfigException
|
||||
from synapse.storage.databases.main.event_push_actions import HttpPushAction
|
||||
from synapse.types import JsonDict, JsonMapping
|
||||
@@ -182,8 +181,8 @@ class HttpPusher(Pusher):
|
||||
|
||||
# We could check the receipts are actually m.read receipts here,
|
||||
# but currently that's the only type of receipt anyway...
|
||||
run_as_background_process(
|
||||
"http_pusher.on_new_receipts", self.server_name, self._update_badge
|
||||
self.hs.run_as_background_process(
|
||||
"http_pusher.on_new_receipts", self._update_badge
|
||||
)
|
||||
|
||||
async def _update_badge(self) -> None:
|
||||
@@ -219,7 +218,7 @@ class HttpPusher(Pusher):
|
||||
if self.failing_since and self.timed_call and self.timed_call.active():
|
||||
return
|
||||
|
||||
run_as_background_process("httppush.process", self.server_name, self._process)
|
||||
self.hs.run_as_background_process("httppush.process", self._process)
|
||||
|
||||
async def _process(self) -> None:
|
||||
# we should never get here if we are already processing
|
||||
@@ -336,8 +335,9 @@ class HttpPusher(Pusher):
|
||||
)
|
||||
else:
|
||||
logger.info("Push failed: delaying for %ds", self.backoff_delay)
|
||||
self.timed_call = self.hs.get_reactor().callLater(
|
||||
self.backoff_delay, self.on_timer
|
||||
self.timed_call = self.hs.get_clock().call_later(
|
||||
self.backoff_delay,
|
||||
self.on_timer,
|
||||
)
|
||||
self.backoff_delay = min(
|
||||
self.backoff_delay * 2, self.MAX_BACKOFF_SEC
|
||||
|
||||
@@ -27,7 +27,6 @@ from prometheus_client import Gauge
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.metrics.background_process_metrics import (
|
||||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.push import Pusher, PusherConfig, PusherConfigException
|
||||
@@ -70,10 +69,8 @@ class PusherPool:
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.server_name = (
|
||||
hs.hostname
|
||||
) # nb must be called this for @wrap_as_background_process
|
||||
self.hs = hs # nb must be called this for @wrap_as_background_process
|
||||
self.server_name = hs.hostname
|
||||
self.pusher_factory = PusherFactory(hs)
|
||||
self.store = self.hs.get_datastores().main
|
||||
self.clock = self.hs.get_clock()
|
||||
@@ -112,9 +109,7 @@ class PusherPool:
|
||||
if not self._should_start_pushers:
|
||||
logger.info("Not starting pushers because they are disabled in the config")
|
||||
return
|
||||
run_as_background_process(
|
||||
"start_pushers", self.server_name, self._start_pushers
|
||||
)
|
||||
self.hs.run_as_background_process("start_pushers", self._start_pushers)
|
||||
|
||||
async def add_or_update_pusher(
|
||||
self,
|
||||
|
||||
@@ -185,46 +185,6 @@ class ReplicationMultiUserDevicesResyncRestServlet(ReplicationEndpoint):
|
||||
return 200, multi_user_devices
|
||||
|
||||
|
||||
# FIXME(2025-07-22): Remove this on the next release, this will only get used
|
||||
# during rollout to Synapse 1.135 and can be removed after that release.
|
||||
class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
|
||||
"""Unused endpoint, kept for backwards compatibility during rollout."""
|
||||
|
||||
NAME = "upload_keys_for_user"
|
||||
PATH_ARGS = ()
|
||||
CACHE = False
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.e2e_keys_handler = hs.get_e2e_keys_handler()
|
||||
self.store = hs.get_datastores().main
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
@staticmethod
|
||||
async def _serialize_payload( # type: ignore[override]
|
||||
user_id: str, device_id: str, keys: JsonDict
|
||||
) -> JsonDict:
|
||||
return {
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
"keys": keys,
|
||||
}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, content: JsonDict
|
||||
) -> Tuple[int, JsonDict]:
|
||||
user_id = content["user_id"]
|
||||
device_id = content["device_id"]
|
||||
keys = content["keys"]
|
||||
|
||||
results = await self.e2e_keys_handler.upload_keys_for_user(
|
||||
user_id, device_id, keys
|
||||
)
|
||||
|
||||
return 200, results
|
||||
|
||||
|
||||
class ReplicationHandleNewDeviceUpdateRestServlet(ReplicationEndpoint):
|
||||
"""Wake up a device writer to send local device list changes as federation outbound pokes.
|
||||
|
||||
@@ -291,5 +251,4 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
ReplicationNotifyUserSignatureUpdateRestServlet(hs).register(http_server)
|
||||
ReplicationMultiUserDevicesResyncRestServlet(hs).register(http_server)
|
||||
ReplicationHandleNewDeviceUpdateRestServlet(hs).register(http_server)
|
||||
ReplicationUploadKeysForUserRestServlet(hs).register(http_server)
|
||||
ReplicationDeviceHandleRoomUnPartialStated(hs).register(http_server)
|
||||
|
||||
@@ -32,7 +32,6 @@ from synapse.api.constants import EventTypes, Membership, ReceiptTypes
|
||||
from synapse.federation import send_queue
|
||||
from synapse.federation.sender import FederationSender
|
||||
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.tcp.streams import (
|
||||
AccountDataStream,
|
||||
DeviceListsStream,
|
||||
@@ -344,7 +343,9 @@ class ReplicationDataHandler:
|
||||
# to wedge here forever.
|
||||
deferred: "Deferred[None]" = Deferred()
|
||||
deferred = timeout_deferred(
|
||||
deferred, _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS, self._reactor
|
||||
deferred=deferred,
|
||||
timeout=_WAIT_FOR_REPLICATION_TIMEOUT_SECONDS,
|
||||
clock=self._clock,
|
||||
)
|
||||
|
||||
waiting_list = self._streams_to_waiters.setdefault(
|
||||
@@ -513,8 +514,8 @@ class FederationSenderHandler:
|
||||
# no need to queue up another task.
|
||||
return
|
||||
|
||||
run_as_background_process(
|
||||
"_save_and_send_ack", self.server_name, self._save_and_send_ack
|
||||
self._hs.run_as_background_process(
|
||||
"_save_and_send_ack", self._save_and_send_ack
|
||||
)
|
||||
|
||||
async def _save_and_send_ack(self) -> None:
|
||||
|
||||
@@ -41,7 +41,6 @@ from prometheus_client import Counter
|
||||
from twisted.internet.protocol import ReconnectingClientFactory
|
||||
|
||||
from synapse.metrics import SERVER_NAME_LABEL, LaterGauge
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.tcp.commands import (
|
||||
ClearUserSyncsCommand,
|
||||
Command,
|
||||
@@ -132,6 +131,7 @@ class ReplicationCommandHandler:
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname
|
||||
self.hs = hs
|
||||
self._replication_data_handler = hs.get_replication_data_handler()
|
||||
self._presence_handler = hs.get_presence_handler()
|
||||
self._store = hs.get_datastores().main
|
||||
@@ -361,9 +361,8 @@ class ReplicationCommandHandler:
|
||||
return
|
||||
|
||||
# fire off a background process to start processing the queue.
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"process-replication-data",
|
||||
self.server_name,
|
||||
self._unsafe_process_queue,
|
||||
stream_name,
|
||||
)
|
||||
|
||||
@@ -42,7 +42,6 @@ from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.metrics import SERVER_NAME_LABEL, LaterGauge
|
||||
from synapse.metrics.background_process_metrics import (
|
||||
BackgroundProcessLoggingContext,
|
||||
run_as_background_process,
|
||||
)
|
||||
from synapse.replication.tcp.commands import (
|
||||
VALID_CLIENT_COMMANDS,
|
||||
@@ -140,9 +139,14 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
max_line_buffer = 10000
|
||||
|
||||
def __init__(
|
||||
self, server_name: str, clock: Clock, handler: "ReplicationCommandHandler"
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
server_name: str,
|
||||
clock: Clock,
|
||||
handler: "ReplicationCommandHandler",
|
||||
):
|
||||
self.server_name = server_name
|
||||
self.hs = hs
|
||||
self.clock = clock
|
||||
self.command_handler = handler
|
||||
|
||||
@@ -290,9 +294,8 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
# if so.
|
||||
|
||||
if isawaitable(res):
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"replication-" + cmd.get_logcontext_id(),
|
||||
self.server_name,
|
||||
lambda: res,
|
||||
)
|
||||
|
||||
@@ -470,9 +473,13 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
|
||||
VALID_OUTBOUND_COMMANDS = VALID_SERVER_COMMANDS
|
||||
|
||||
def __init__(
|
||||
self, server_name: str, clock: Clock, handler: "ReplicationCommandHandler"
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
server_name: str,
|
||||
clock: Clock,
|
||||
handler: "ReplicationCommandHandler",
|
||||
):
|
||||
super().__init__(server_name, clock, handler)
|
||||
super().__init__(hs, server_name, clock, handler)
|
||||
|
||||
self.server_name = server_name
|
||||
|
||||
@@ -497,7 +504,7 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
|
||||
clock: Clock,
|
||||
command_handler: "ReplicationCommandHandler",
|
||||
):
|
||||
super().__init__(server_name, clock, command_handler)
|
||||
super().__init__(hs, server_name, clock, command_handler)
|
||||
|
||||
self.client_name = client_name
|
||||
self.server_name = server_name
|
||||
|
||||
@@ -40,7 +40,6 @@ from synapse.logging.context import PreserveLoggingContext, make_deferred_yielda
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.metrics.background_process_metrics import (
|
||||
BackgroundProcessLoggingContext,
|
||||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.replication.tcp.commands import (
|
||||
@@ -109,6 +108,7 @@ class RedisSubscriber(SubscriberProtocol):
|
||||
"""
|
||||
|
||||
server_name: str
|
||||
hs: "HomeServer"
|
||||
synapse_handler: "ReplicationCommandHandler"
|
||||
synapse_stream_prefix: str
|
||||
synapse_channel_names: List[str]
|
||||
@@ -146,9 +146,7 @@ class RedisSubscriber(SubscriberProtocol):
|
||||
def connectionMade(self) -> None:
|
||||
logger.info("Connected to redis")
|
||||
super().connectionMade()
|
||||
run_as_background_process(
|
||||
"subscribe-replication", self.server_name, self._send_subscribe
|
||||
)
|
||||
self.hs.run_as_background_process("subscribe-replication", self._send_subscribe)
|
||||
|
||||
async def _send_subscribe(self) -> None:
|
||||
# it's important to make sure that we only send the REPLICATE command once we
|
||||
@@ -223,8 +221,8 @@ class RedisSubscriber(SubscriberProtocol):
|
||||
# if so.
|
||||
|
||||
if isawaitable(res):
|
||||
run_as_background_process(
|
||||
"replication-" + cmd.get_logcontext_id(), self.server_name, lambda: res
|
||||
self.hs.run_as_background_process(
|
||||
"replication-" + cmd.get_logcontext_id(), lambda: res
|
||||
)
|
||||
|
||||
def connectionLost(self, reason: Failure) -> None: # type: ignore[override]
|
||||
@@ -245,9 +243,8 @@ class RedisSubscriber(SubscriberProtocol):
|
||||
Args:
|
||||
cmd: The command to send
|
||||
"""
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"send-cmd",
|
||||
self.server_name,
|
||||
self._async_send_command,
|
||||
cmd,
|
||||
# We originally started tracing background processes to avoid `There was no
|
||||
@@ -317,9 +314,8 @@ class SynapseRedisFactory(RedisFactory):
|
||||
convertNumbers=convertNumbers,
|
||||
)
|
||||
|
||||
self.server_name = (
|
||||
hs.hostname
|
||||
) # nb must be called this for @wrap_as_background_process
|
||||
self.hs = hs # nb must be called this for @wrap_as_background_process
|
||||
self.server_name = hs.hostname
|
||||
|
||||
hs.get_clock().looping_call(self._send_ping, 30 * 1000)
|
||||
|
||||
@@ -397,6 +393,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory):
|
||||
)
|
||||
|
||||
self.server_name = hs.hostname
|
||||
self.hs = hs
|
||||
self.synapse_handler = hs.get_replication_command_handler()
|
||||
self.synapse_stream_prefix = hs.hostname
|
||||
self.synapse_channel_names = channel_names
|
||||
@@ -412,6 +409,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory):
|
||||
# the base method does some other things than just instantiating the
|
||||
# protocol.
|
||||
p.server_name = self.server_name
|
||||
p.hs = self.hs
|
||||
p.synapse_handler = self.synapse_handler
|
||||
p.synapse_outbound_redis_connection = self.synapse_outbound_redis_connection
|
||||
p.synapse_stream_prefix = self.synapse_stream_prefix
|
||||
|
||||
@@ -30,7 +30,6 @@ from twisted.internet.interfaces import IAddress
|
||||
from twisted.internet.protocol import ServerFactory
|
||||
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.tcp.commands import PositionCommand
|
||||
from synapse.replication.tcp.protocol import ServerReplicationStreamProtocol
|
||||
from synapse.replication.tcp.streams import EventsStream
|
||||
@@ -55,6 +54,7 @@ class ReplicationStreamProtocolFactory(ServerFactory):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.command_handler = hs.get_replication_command_handler()
|
||||
self.clock = hs.get_clock()
|
||||
self.hs = hs
|
||||
self.server_name = hs.config.server.server_name
|
||||
|
||||
# If we've created a `ReplicationStreamProtocolFactory` then we're
|
||||
@@ -69,7 +69,7 @@ class ReplicationStreamProtocolFactory(ServerFactory):
|
||||
|
||||
def buildProtocol(self, addr: IAddress) -> ServerReplicationStreamProtocol:
|
||||
return ServerReplicationStreamProtocol(
|
||||
self.server_name, self.clock, self.command_handler
|
||||
self.hs, self.server_name, self.clock, self.command_handler
|
||||
)
|
||||
|
||||
|
||||
@@ -82,6 +82,7 @@ class ReplicationStreamer:
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastores().main
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
@@ -147,8 +148,8 @@ class ReplicationStreamer:
|
||||
logger.debug("Notifier poke loop already running")
|
||||
return
|
||||
|
||||
run_as_background_process(
|
||||
"replication_notifier", self.server_name, self._run_notifier_loop
|
||||
self.hs.run_as_background_process(
|
||||
"replication_notifier", self._run_notifier_loop
|
||||
)
|
||||
|
||||
async def _run_notifier_loop(self) -> None:
|
||||
|
||||
@@ -77,6 +77,7 @@ STREAMS_MAP = {
|
||||
__all__ = [
|
||||
"STREAMS_MAP",
|
||||
"Stream",
|
||||
"EventsStream",
|
||||
"BackfillStream",
|
||||
"PresenceStream",
|
||||
"PresenceFederationStream",
|
||||
@@ -87,6 +88,7 @@ __all__ = [
|
||||
"CachesStream",
|
||||
"DeviceListsStream",
|
||||
"ToDeviceStream",
|
||||
"FederationStream",
|
||||
"AccountDataStream",
|
||||
"ThreadSubscriptionsStream",
|
||||
"UnPartialStatedRoomStream",
|
||||
|
||||
@@ -23,10 +23,19 @@
|
||||
import logging
|
||||
import re
|
||||
from collections import Counter
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
from synapse._pydantic_compat import (
|
||||
StrictBool,
|
||||
StrictStr,
|
||||
validator,
|
||||
)
|
||||
from synapse.api.auth.mas import MasDelegatedAuth
|
||||
from synapse.api.errors import (
|
||||
Codes,
|
||||
InteractiveAuthIncompleteError,
|
||||
InvalidAPICallError,
|
||||
SynapseError,
|
||||
@@ -37,11 +46,13 @@ from synapse.http.servlet import (
|
||||
parse_integer,
|
||||
parse_json_object_from_request,
|
||||
parse_string,
|
||||
validate_json_object,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import log_kv, set_tag
|
||||
from synapse.rest.client._base import client_patterns, interactive_auth_handler
|
||||
from synapse.types import JsonDict, StreamToken
|
||||
from synapse.types.rest import RequestBodyModel
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -59,7 +70,6 @@ class KeyUploadServlet(RestServlet):
|
||||
"device_keys": {
|
||||
"user_id": "<user_id>",
|
||||
"device_id": "<device_id>",
|
||||
"valid_until_ts": <millisecond_timestamp>,
|
||||
"algorithms": [
|
||||
"m.olm.curve25519-aes-sha2",
|
||||
]
|
||||
@@ -111,12 +121,123 @@ class KeyUploadServlet(RestServlet):
|
||||
self._clock = hs.get_clock()
|
||||
self._store = hs.get_datastores().main
|
||||
|
||||
class KeyUploadRequestBody(RequestBodyModel):
|
||||
"""
|
||||
The body of a `POST /_matrix/client/v3/keys/upload` request.
|
||||
|
||||
Based on https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3keysupload.
|
||||
"""
|
||||
|
||||
class DeviceKeys(RequestBodyModel):
|
||||
algorithms: List[StrictStr]
|
||||
"""The encryption algorithms supported by this device."""
|
||||
|
||||
device_id: StrictStr
|
||||
"""The ID of the device these keys belong to. Must match the device ID used when logging in."""
|
||||
|
||||
keys: Mapping[StrictStr, StrictStr]
|
||||
"""
|
||||
Public identity keys. The names of the properties should be in the
|
||||
format `<algorithm>:<device_id>`. The keys themselves should be encoded as
|
||||
specified by the key algorithm.
|
||||
"""
|
||||
|
||||
signatures: Mapping[StrictStr, Mapping[StrictStr, StrictStr]]
|
||||
"""Signatures for the device key object. A map from user ID, to a map from "<algorithm>:<device_id>" to the signature."""
|
||||
|
||||
user_id: StrictStr
|
||||
"""The ID of the user the device belongs to. Must match the user ID used when logging in."""
|
||||
|
||||
class KeyObject(RequestBodyModel):
|
||||
key: StrictStr
|
||||
"""The key, encoded using unpadded base64."""
|
||||
|
||||
fallback: Optional[StrictBool] = False
|
||||
"""Whether this is a fallback key. Only used when handling fallback keys."""
|
||||
|
||||
signatures: Mapping[StrictStr, Mapping[StrictStr, StrictStr]]
|
||||
"""Signature for the device. Mapped from user ID to another map of key signing identifier to the signature itself.
|
||||
|
||||
See the following for more detail: https://spec.matrix.org/v1.16/appendices/#signing-details
|
||||
"""
|
||||
|
||||
device_keys: Optional[DeviceKeys] = None
|
||||
"""Identity keys for the device. May be absent if no new identity keys are required."""
|
||||
|
||||
fallback_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]]
|
||||
"""
|
||||
The public key which should be used if the device's one-time keys are
|
||||
exhausted. The fallback key is not deleted once used, but should be
|
||||
replaced when additional one-time keys are being uploaded. The server
|
||||
will notify the client of the fallback key being used through `/sync`.
|
||||
|
||||
There can only be at most one key per algorithm uploaded, and the server
|
||||
will only persist one key per algorithm.
|
||||
|
||||
When uploading a signed key, an additional fallback: true key should be
|
||||
included to denote that the key is a fallback key.
|
||||
|
||||
May be absent if a new fallback key is not required.
|
||||
"""
|
||||
|
||||
@validator("fallback_keys", pre=True)
|
||||
def validate_fallback_keys(cls: Self, v: Any) -> Any:
|
||||
if v is None:
|
||||
return v
|
||||
if not isinstance(v, dict):
|
||||
raise TypeError("fallback_keys must be a mapping")
|
||||
|
||||
for k, _ in v.items():
|
||||
if not len(k.split(":")) == 2:
|
||||
raise SynapseError(
|
||||
code=HTTPStatus.BAD_REQUEST,
|
||||
errcode=Codes.BAD_JSON,
|
||||
msg=f"Invalid fallback_keys key {k!r}. "
|
||||
'Expected "<algorithm>:<device_id>".',
|
||||
)
|
||||
return v
|
||||
|
||||
one_time_keys: Optional[Mapping[StrictStr, Union[StrictStr, KeyObject]]] = None
|
||||
"""
|
||||
One-time public keys for “pre-key” messages. The names of the properties
|
||||
should be in the format `<algorithm>:<key_id>`.
|
||||
|
||||
The format of the key is determined by the key algorithm, see:
|
||||
https://spec.matrix.org/v1.16/client-server-api/#key-algorithms.
|
||||
"""
|
||||
|
||||
@validator("one_time_keys", pre=True)
|
||||
def validate_one_time_keys(cls: Self, v: Any) -> Any:
|
||||
if v is None:
|
||||
return v
|
||||
if not isinstance(v, dict):
|
||||
raise TypeError("one_time_keys must be a mapping")
|
||||
|
||||
for k, _ in v.items():
|
||||
if not len(k.split(":")) == 2:
|
||||
raise SynapseError(
|
||||
code=HTTPStatus.BAD_REQUEST,
|
||||
errcode=Codes.BAD_JSON,
|
||||
msg=f"Invalid one_time_keys key {k!r}. "
|
||||
'Expected "<algorithm>:<device_id>".',
|
||||
)
|
||||
return v
|
||||
|
||||
async def on_POST(
|
||||
self, request: SynapseRequest, device_id: Optional[str]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
# Parse the request body. Validate separately, as the handler expects a
|
||||
# plain dict, rather than any parsed object.
|
||||
#
|
||||
# Note: It would be nice to work with a parsed object, but the handler
|
||||
# needs to encode portions of the request body as canonical JSON before
|
||||
# storing the result in the DB. There's little point in converted to a
|
||||
# parsed object and then back to a dict.
|
||||
body = parse_json_object_from_request(request)
|
||||
validate_json_object(body, self.KeyUploadRequestBody)
|
||||
|
||||
if device_id is not None:
|
||||
# Providing the device_id should only be done for setting keys
|
||||
@@ -149,8 +270,31 @@ class KeyUploadServlet(RestServlet):
|
||||
400, "To upload keys, you must pass device_id when authenticating"
|
||||
)
|
||||
|
||||
if "device_keys" in body:
|
||||
# Validate the provided `user_id` and `device_id` fields in
|
||||
# `device_keys` match that of the requesting user. We can't do
|
||||
# this directly in the pydantic model as we don't have access
|
||||
# to the requester yet.
|
||||
#
|
||||
# TODO: We could use ValidationInfo when we switch to Pydantic v2.
|
||||
# https://docs.pydantic.dev/latest/concepts/validators/#validation-info
|
||||
if body["device_keys"]["user_id"] != user_id:
|
||||
raise SynapseError(
|
||||
code=HTTPStatus.BAD_REQUEST,
|
||||
errcode=Codes.BAD_JSON,
|
||||
msg="Provided `user_id` in `device_keys` does not match that of the authenticated user",
|
||||
)
|
||||
if body["device_keys"]["device_id"] != device_id:
|
||||
raise SynapseError(
|
||||
code=HTTPStatus.BAD_REQUEST,
|
||||
errcode=Codes.BAD_JSON,
|
||||
msg="Provided `device_id` in `device_keys` does not match that of the authenticated user device",
|
||||
)
|
||||
|
||||
result = await self.e2e_keys_handler.upload_keys_for_user(
|
||||
user_id=user_id, device_id=device_id, keys=body
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
keys=body,
|
||||
)
|
||||
|
||||
return 200, result
|
||||
|
||||
@@ -66,7 +66,6 @@ from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.opentracing import set_tag
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.rest.client._base import client_patterns
|
||||
from synapse.rest.client.transactions import HttpTransactionCache
|
||||
from synapse.state import CREATE_KEY, POWER_KEY
|
||||
@@ -1225,6 +1224,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.server_name = hs.hostname
|
||||
self.hs = hs
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self.auth = hs.get_auth()
|
||||
self._store = hs.get_datastores().main
|
||||
@@ -1307,9 +1307,8 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
|
||||
)
|
||||
|
||||
if with_relations:
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"redact_related_events",
|
||||
self.server_name,
|
||||
self._relation_handler.redact_events_related_to,
|
||||
requester=requester,
|
||||
event_id=event_id,
|
||||
|
||||
@@ -126,6 +126,7 @@ class SyncRestServlet(RestServlet):
|
||||
|
||||
self._json_filter_cache: LruCache[str, bool] = LruCache(
|
||||
max_size=1000,
|
||||
clock=self.clock,
|
||||
cache_name="sync_valid_filter",
|
||||
server_name=self.server_name,
|
||||
)
|
||||
@@ -363,9 +364,6 @@ class SyncRestServlet(RestServlet):
|
||||
|
||||
# https://github.com/matrix-org/matrix-doc/blob/54255851f642f84a4f1aaf7bc063eebe3d76752b/proposals/2732-olm-fallback-keys.md
|
||||
# states that this field should always be included, as long as the server supports the feature.
|
||||
response["org.matrix.msc2732.device_unused_fallback_key_types"] = (
|
||||
sync_result.device_unused_fallback_key_types
|
||||
)
|
||||
response["device_unused_fallback_key_types"] = (
|
||||
sync_result.device_unused_fallback_key_types
|
||||
)
|
||||
|
||||
@@ -56,7 +56,7 @@ class HttpTransactionCache:
|
||||
] = {}
|
||||
# Try to clean entries every 30 mins. This means entries will exist
|
||||
# for at *LEAST* 30 mins, and at *MOST* 60 mins.
|
||||
self.cleaner = self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS)
|
||||
self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS)
|
||||
|
||||
def _get_transaction_key(self, request: IRequest, requester: Requester) -> Hashable:
|
||||
"""A helper function which returns a transaction key that can be used
|
||||
|
||||
@@ -28,10 +28,27 @@
|
||||
import abc
|
||||
import functools
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Type, TypeVar, cast
|
||||
from threading import Thread
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
cast,
|
||||
)
|
||||
from wsgiref.simple_server import WSGIServer
|
||||
|
||||
from attr import dataclass
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.base import _SystemEventID
|
||||
from twisted.internet.interfaces import IOpenSSLContextFactory
|
||||
from twisted.internet.tcp import Port
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
@@ -44,6 +61,7 @@ from synapse.api.auth.mas import MasDelegatedAuth
|
||||
from synapse.api.auth_blocking import AuthBlocking
|
||||
from synapse.api.filtering import Filtering
|
||||
from synapse.api.ratelimiting import Ratelimiter, RequestRatelimiter
|
||||
from synapse.app._base import unregister_sighups
|
||||
from synapse.appservice.api import ApplicationServiceApi
|
||||
from synapse.appservice.scheduler import ApplicationServiceScheduler
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
@@ -133,6 +151,7 @@ from synapse.metrics import (
|
||||
all_later_gauges_to_clean_up_on_shutdown,
|
||||
register_threadpool,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.module_api.callbacks import ModuleApiCallbacks
|
||||
@@ -156,6 +175,7 @@ from synapse.storage.controllers import StorageControllers
|
||||
from synapse.streams.events import EventSources
|
||||
from synapse.synapse_rust.rendezvous import RendezvousHandler
|
||||
from synapse.types import DomainSpecificString, ISynapseReactor
|
||||
from synapse.util.caches import CACHE_METRIC_REGISTRY
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.distributor import Distributor
|
||||
from synapse.util.macaroons import MacaroonGenerator
|
||||
@@ -166,7 +186,9 @@ from synapse.util.task_scheduler import TaskScheduler
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# Old Python versions don't have `LiteralString`
|
||||
from txredisapi import ConnectionHandler
|
||||
from typing_extensions import LiteralString
|
||||
|
||||
from synapse.handlers.jwt import JwtHandler
|
||||
from synapse.handlers.oidc import OidcHandler
|
||||
@@ -196,6 +218,7 @@ if TYPE_CHECKING:
|
||||
|
||||
T: TypeAlias = object
|
||||
F = TypeVar("F", bound=Callable[["HomeServer"], T])
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
def cache_in_self(builder: F) -> F:
|
||||
@@ -219,7 +242,8 @@ def cache_in_self(builder: F) -> F:
|
||||
@functools.wraps(builder)
|
||||
def _get(self: "HomeServer") -> T:
|
||||
try:
|
||||
return getattr(self, depname)
|
||||
dep = getattr(self, depname)
|
||||
return dep
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
@@ -239,6 +263,22 @@ def cache_in_self(builder: F) -> F:
|
||||
return cast(F, _get)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ShutdownInfo:
|
||||
"""Information for callable functions called at time of shutdown.
|
||||
|
||||
Attributes:
|
||||
func: the object to call before shutdown.
|
||||
trigger_id: an ID returned when registering this event trigger.
|
||||
args: the arguments to call the function with.
|
||||
kwargs: the keyword arguments to call the function with.
|
||||
"""
|
||||
|
||||
func: Callable[..., Any]
|
||||
trigger_id: _SystemEventID
|
||||
kwargs: Dict[str, object]
|
||||
|
||||
|
||||
class HomeServer(metaclass=abc.ABCMeta):
|
||||
"""A basic homeserver object without lazy component builders.
|
||||
|
||||
@@ -289,6 +329,7 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
hostname : The hostname for the server.
|
||||
config: The full config for the homeserver.
|
||||
"""
|
||||
|
||||
if not reactor:
|
||||
from twisted.internet import reactor as _reactor
|
||||
|
||||
@@ -300,6 +341,7 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
self.signing_key = config.key.signing_key[0]
|
||||
self.config = config
|
||||
self._listening_services: List[Port] = []
|
||||
self._metrics_listeners: List[Tuple[WSGIServer, Thread]] = []
|
||||
self.start_time: Optional[int] = None
|
||||
|
||||
self._instance_id = random_string(5)
|
||||
@@ -315,6 +357,211 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
# This attribute is set by the free function `refresh_certificate`.
|
||||
self.tls_server_context_factory: Optional[IOpenSSLContextFactory] = None
|
||||
|
||||
self._is_shutdown = False
|
||||
self._async_shutdown_handlers: List[ShutdownInfo] = []
|
||||
self._sync_shutdown_handlers: List[ShutdownInfo] = []
|
||||
self._background_processes: set[defer.Deferred[Optional[Any]]] = set()
|
||||
|
||||
def run_as_background_process(
|
||||
self,
|
||||
desc: "LiteralString",
|
||||
func: Callable[..., Awaitable[Optional[R]]],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> "defer.Deferred[Optional[R]]":
|
||||
"""Run the given function in its own logcontext, with resource metrics
|
||||
|
||||
This should be used to wrap processes which are fired off to run in the
|
||||
background, instead of being associated with a particular request.
|
||||
|
||||
It returns a Deferred which completes when the function completes, but it doesn't
|
||||
follow the synapse logcontext rules, which makes it appropriate for passing to
|
||||
clock.looping_call and friends (or for firing-and-forgetting in the middle of a
|
||||
normal synapse async function).
|
||||
|
||||
Because the returned Deferred does not follow the synapse logcontext rules, awaiting
|
||||
the result of this function will result in the log context being cleared (bad). In
|
||||
order to properly await the result of this function and maintain the current log
|
||||
context, use `make_deferred_yieldable`.
|
||||
|
||||
Args:
|
||||
desc: a description for this background process type
|
||||
server_name: The homeserver name that this background process is being run for
|
||||
(this should be `hs.hostname`).
|
||||
func: a function, which may return a Deferred or a coroutine
|
||||
bg_start_span: Whether to start an opentracing span. Defaults to True.
|
||||
Should only be disabled for processes that will not log to or tag
|
||||
a span.
|
||||
args: positional args for func
|
||||
kwargs: keyword args for func
|
||||
|
||||
Returns:
|
||||
Deferred which returns the result of func, or `None` if func raises.
|
||||
Note that the returned Deferred does not follow the synapse logcontext
|
||||
rules.
|
||||
"""
|
||||
if self._is_shutdown:
|
||||
raise Exception(
|
||||
f"Cannot start background process. HomeServer has been shutdown {len(self._background_processes)} {len(self.get_clock()._looping_calls)} {len(self.get_clock()._call_id_to_delayed_call)}"
|
||||
)
|
||||
|
||||
# Ignore linter error as this is the one location this should be called.
|
||||
deferred = run_as_background_process(desc, self.hostname, func, *args, **kwargs) # type: ignore[untracked-background-process]
|
||||
self._background_processes.add(deferred)
|
||||
|
||||
def on_done(res: R) -> R:
|
||||
try:
|
||||
self._background_processes.remove(deferred)
|
||||
except KeyError:
|
||||
# If the background process isn't being tracked anymore we can just move on.
|
||||
pass
|
||||
return res
|
||||
|
||||
deferred.addBoth(on_done)
|
||||
return deferred
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
"""
|
||||
Cleanly stops all aspects of the HomeServer and removes any references that
|
||||
have been handed out in order to allow the HomeServer object to be garbage
|
||||
collected.
|
||||
|
||||
You must ensure the HomeServer object to not be frozen in the garbage collector
|
||||
in order for it to be cleaned up. By default, Synapse freezes the HomeServer
|
||||
object in the garbage collector.
|
||||
"""
|
||||
|
||||
self._is_shutdown = True
|
||||
|
||||
logger.info(
|
||||
"Received shutdown request for %s (%s).",
|
||||
self.hostname,
|
||||
self.get_instance_id(),
|
||||
)
|
||||
|
||||
# Unregister sighups first. If a shutdown was requested we shouldn't be responding
|
||||
# to things like config changes. So it would be best to stop listening to these first.
|
||||
unregister_sighups(self._instance_id)
|
||||
|
||||
# TODO: It would be desireable to be able to report an error if the HomeServer
|
||||
# object is frozen in the garbage collector as that would prevent it from being
|
||||
# collected after being shutdown.
|
||||
# In theory the following should work, but it doesn't seem to make a difference
|
||||
# when I test it locally.
|
||||
#
|
||||
# if gc.is_tracked(self):
|
||||
# logger.error("HomeServer object is tracked by garbage collection so cannot be fully cleaned up")
|
||||
|
||||
for listener in self._listening_services:
|
||||
# During unit tests, an incomplete `twisted.pair.testing._FakePort` is used
|
||||
# for listeners so check listener type here to ensure shutdown procedure is
|
||||
# only applied to actual `Port` instances.
|
||||
if type(listener) is Port:
|
||||
port_shutdown = listener.stopListening()
|
||||
if port_shutdown is not None:
|
||||
await port_shutdown
|
||||
self._listening_services.clear()
|
||||
|
||||
for server, thread in self._metrics_listeners:
|
||||
server.shutdown()
|
||||
thread.join()
|
||||
self._metrics_listeners.clear()
|
||||
|
||||
# TODO: Cleanup replication pieces
|
||||
|
||||
self.get_keyring().shutdown()
|
||||
|
||||
# Cleanup metrics associated with the homeserver
|
||||
for later_gauge in all_later_gauges_to_clean_up_on_shutdown.values():
|
||||
later_gauge.unregister_hooks_for_homeserver_instance_id(
|
||||
self.get_instance_id()
|
||||
)
|
||||
|
||||
CACHE_METRIC_REGISTRY.unregister_hooks_for_homeserver(
|
||||
self.config.server.server_name
|
||||
)
|
||||
|
||||
for db in self.get_datastores().databases:
|
||||
db.stop_background_updates()
|
||||
|
||||
if self.should_send_federation():
|
||||
try:
|
||||
self.get_federation_sender().shutdown()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
for shutdown_handler in self._async_shutdown_handlers:
|
||||
try:
|
||||
self.get_reactor().removeSystemEventTrigger(shutdown_handler.trigger_id)
|
||||
defer.ensureDeferred(shutdown_handler.func(**shutdown_handler.kwargs))
|
||||
except Exception as e:
|
||||
logger.error("Error calling shutdown async handler: %s", e)
|
||||
self._async_shutdown_handlers.clear()
|
||||
|
||||
for shutdown_handler in self._sync_shutdown_handlers:
|
||||
try:
|
||||
self.get_reactor().removeSystemEventTrigger(shutdown_handler.trigger_id)
|
||||
shutdown_handler.func(**shutdown_handler.kwargs)
|
||||
except Exception as e:
|
||||
logger.error("Error calling shutdown sync handler: %s", e)
|
||||
self._sync_shutdown_handlers.clear()
|
||||
|
||||
self.get_clock().shutdown()
|
||||
|
||||
for background_process in list(self._background_processes):
|
||||
try:
|
||||
background_process.cancel()
|
||||
except Exception:
|
||||
pass
|
||||
self._background_processes.clear()
|
||||
|
||||
for db in self.get_datastores().databases:
|
||||
db._db_pool.close()
|
||||
|
||||
def register_async_shutdown_handler(
|
||||
self,
|
||||
*,
|
||||
phase: str,
|
||||
eventType: str,
|
||||
shutdown_func: Callable[..., Any],
|
||||
**kwargs: object,
|
||||
) -> None:
|
||||
"""
|
||||
Register a system event trigger with the HomeServer so it can be cleanly
|
||||
removed when the HomeServer is shutdown.
|
||||
"""
|
||||
id = self.get_clock().add_system_event_trigger(
|
||||
phase,
|
||||
eventType,
|
||||
shutdown_func,
|
||||
**kwargs,
|
||||
)
|
||||
self._async_shutdown_handlers.append(
|
||||
ShutdownInfo(func=shutdown_func, trigger_id=id, kwargs=kwargs)
|
||||
)
|
||||
|
||||
def register_sync_shutdown_handler(
|
||||
self,
|
||||
*,
|
||||
phase: str,
|
||||
eventType: str,
|
||||
shutdown_func: Callable[..., Any],
|
||||
**kwargs: object,
|
||||
) -> None:
|
||||
"""
|
||||
Register a system event trigger with the HomeServer so it can be cleanly
|
||||
removed when the HomeServer is shutdown.
|
||||
"""
|
||||
id = self.get_clock().add_system_event_trigger(
|
||||
phase,
|
||||
eventType,
|
||||
shutdown_func,
|
||||
**kwargs,
|
||||
)
|
||||
self._sync_shutdown_handlers.append(
|
||||
ShutdownInfo(func=shutdown_func, trigger_id=id, kwargs=kwargs)
|
||||
)
|
||||
|
||||
def register_module_web_resource(self, path: str, resource: Resource) -> None:
|
||||
"""Allows a module to register a web resource to be served at the given path.
|
||||
|
||||
@@ -366,36 +613,25 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
self.datastores = Databases(self.DATASTORE_CLASS, self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def __del__(self) -> None:
|
||||
"""
|
||||
Called when an the homeserver is garbage collected.
|
||||
# Register background tasks required by this server. This must be done
|
||||
# somewhat manually due to the background tasks not being registered
|
||||
# unless handlers are instantiated.
|
||||
if self.config.worker.run_background_tasks:
|
||||
self.start_background_tasks()
|
||||
|
||||
Make sure we actually do some clean-up, rather than leak data.
|
||||
"""
|
||||
self.cleanup()
|
||||
|
||||
def cleanup(self) -> None:
|
||||
"""
|
||||
WIP: Clean-up any references to the homeserver and stop any running related
|
||||
processes, timers, loops, replication stream, etc.
|
||||
|
||||
This should be called wherever you care about the HomeServer being completely
|
||||
garbage collected like in tests. It's not necessary to call if you plan to just
|
||||
shut down the whole Python process anyway.
|
||||
|
||||
Can be called multiple times.
|
||||
"""
|
||||
logger.info("Received cleanup request for %s.", self.hostname)
|
||||
|
||||
# TODO: Stop background processes, timers, loops, replication stream, etc.
|
||||
|
||||
# Cleanup metrics associated with the homeserver
|
||||
for later_gauge in all_later_gauges_to_clean_up_on_shutdown.values():
|
||||
later_gauge.unregister_hooks_for_homeserver_instance_id(
|
||||
self.get_instance_id()
|
||||
)
|
||||
|
||||
logger.info("Cleanup complete for %s.", self.hostname)
|
||||
# def __del__(self) -> None:
|
||||
# """
|
||||
# Called when an the homeserver is garbage collected.
|
||||
#
|
||||
# Make sure we actually do some clean-up, rather than leak data.
|
||||
# """
|
||||
#
|
||||
# # NOTE: This is a chicken and egg problem.
|
||||
# # __del__ will never be called since the HomeServer cannot be garbage collected
|
||||
# # until the shutdown function has been called. So it makes no sense to call
|
||||
# # shutdown inside of __del__, even though that is a logical place to assume it
|
||||
# # should be called.
|
||||
# self.shutdown()
|
||||
|
||||
def start_listening(self) -> None: # noqa: B027 (no-op by design)
|
||||
"""Start the HTTP, manhole, metrics, etc listeners
|
||||
@@ -442,7 +678,8 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
|
||||
@cache_in_self
|
||||
def get_clock(self) -> Clock:
|
||||
return Clock(self._reactor, server_name=self.hostname)
|
||||
# Ignore the linter error since this is the one place the `Clock` should be created.
|
||||
return Clock(self._reactor, server_name=self.hostname) # type: ignore[multiple-internal-clocks]
|
||||
|
||||
def get_datastores(self) -> Databases:
|
||||
if not self.datastores:
|
||||
@@ -452,7 +689,7 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
|
||||
@cache_in_self
|
||||
def get_distributor(self) -> Distributor:
|
||||
return Distributor(server_name=self.hostname)
|
||||
return Distributor(hs=self)
|
||||
|
||||
@cache_in_self
|
||||
def get_registration_ratelimiter(self) -> Ratelimiter:
|
||||
@@ -1007,8 +1244,10 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
)
|
||||
|
||||
media_threadpool.start()
|
||||
self.get_clock().add_system_event_trigger(
|
||||
"during", "shutdown", media_threadpool.stop
|
||||
self.register_sync_shutdown_handler(
|
||||
phase="during",
|
||||
eventType="shutdown",
|
||||
shutdown_func=media_threadpool.stop,
|
||||
)
|
||||
|
||||
# Register the threadpool with our metrics.
|
||||
|
||||
@@ -36,6 +36,7 @@ SERVER_NOTICE_ROOM_TAG = "m.server_notice"
|
||||
class ServerNoticesManager:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname # nb must be called this for @cached
|
||||
self.clock = hs.get_clock() # nb must be called this for @cached
|
||||
self._store = hs.get_datastores().main
|
||||
self._config = hs.config
|
||||
self._account_data_handler = hs.get_account_data_handler()
|
||||
|
||||
@@ -651,6 +651,7 @@ class StateResolutionHandler:
|
||||
ExpiringCache(
|
||||
cache_name="state_cache",
|
||||
server_name=self.server_name,
|
||||
hs=hs,
|
||||
clock=self.clock,
|
||||
max_len=100000,
|
||||
expiry_ms=EVICTION_TIMEOUT_SECONDS * 1000,
|
||||
|
||||
@@ -56,7 +56,7 @@ class SQLBaseStore(metaclass=ABCMeta):
|
||||
):
|
||||
self.hs = hs
|
||||
self.server_name = hs.hostname # nb must be called this for @cached
|
||||
self._clock = hs.get_clock()
|
||||
self.clock = hs.get_clock() # nb must be called this for @cached
|
||||
self.database_engine = database.engine
|
||||
self.db_pool = database
|
||||
|
||||
|
||||
@@ -41,7 +41,6 @@ from typing import (
|
||||
import attr
|
||||
|
||||
from synapse._pydantic_compat import BaseModel
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.storage.types import Connection, Cursor
|
||||
from synapse.types import JsonDict, StrCollection
|
||||
@@ -285,6 +284,13 @@ class BackgroundUpdater:
|
||||
self.sleep_duration_ms = hs.config.background_updates.sleep_duration_ms
|
||||
self.sleep_enabled = hs.config.background_updates.sleep_enabled
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""
|
||||
Stop any further background updates from happening.
|
||||
"""
|
||||
self.enabled = False
|
||||
self._background_update_handlers.clear()
|
||||
|
||||
def get_status(self) -> UpdaterStatus:
|
||||
"""An integer summarising the updater status. Used as a metric."""
|
||||
if self._aborted:
|
||||
@@ -396,9 +402,8 @@ class BackgroundUpdater:
|
||||
# if we start a new background update, not all updates are done.
|
||||
self._all_done = False
|
||||
sleep = self.sleep_enabled
|
||||
run_as_background_process(
|
||||
self.hs.run_as_background_process(
|
||||
"background_updates",
|
||||
self.server_name,
|
||||
self.run_background_updates,
|
||||
sleep,
|
||||
)
|
||||
|
||||
@@ -62,7 +62,6 @@ from synapse.logging.opentracing import (
|
||||
trace,
|
||||
)
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.controllers.state import StateStorageController
|
||||
from synapse.storage.databases import Databases
|
||||
from synapse.storage.databases.main.events import DeltaState
|
||||
@@ -195,6 +194,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
server_name: str,
|
||||
per_item_callback: Callable[
|
||||
[str, _EventPersistQueueTask],
|
||||
@@ -207,6 +207,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
|
||||
and its result will be returned via the Deferreds returned from add_to_queue.
|
||||
"""
|
||||
self.server_name = server_name
|
||||
self.hs = hs
|
||||
self._event_persist_queues: Dict[str, Deque[_EventPersistQueueItem]] = {}
|
||||
self._currently_persisting_rooms: Set[str] = set()
|
||||
self._per_item_callback = per_item_callback
|
||||
@@ -311,7 +312,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
|
||||
self._currently_persisting_rooms.discard(room_id)
|
||||
|
||||
# set handle_queue_loop off in the background
|
||||
run_as_background_process("persist_events", self.server_name, handle_queue_loop)
|
||||
self.hs.run_as_background_process("persist_events", handle_queue_loop)
|
||||
|
||||
def _get_drainining_queue(
|
||||
self, room_id: str
|
||||
@@ -354,7 +355,7 @@ class EventsPersistenceStorageController:
|
||||
self._instance_name = hs.get_instance_name()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self._event_persist_queue = _EventPeristenceQueue(
|
||||
self.server_name, self._process_event_persist_queue_task
|
||||
hs, self.server_name, self._process_event_persist_queue_task
|
||||
)
|
||||
self._state_resolution_handler = hs.get_state_resolution_handler()
|
||||
self._state_controller = state_controller
|
||||
|
||||
@@ -46,9 +46,8 @@ class PurgeEventsStorageController:
|
||||
"""High level interface for purging rooms and event history."""
|
||||
|
||||
def __init__(self, hs: "HomeServer", stores: Databases):
|
||||
self.server_name = (
|
||||
hs.hostname
|
||||
) # nb must be called this for @wrap_as_background_process
|
||||
self.hs = hs # nb must be called this for @wrap_as_background_process
|
||||
self.server_name = hs.hostname
|
||||
self.stores = stores
|
||||
|
||||
if hs.config.worker.run_background_tasks:
|
||||
|
||||
@@ -69,8 +69,8 @@ class StateStorageController:
|
||||
|
||||
def __init__(self, hs: "HomeServer", stores: "Databases"):
|
||||
self.server_name = hs.hostname # nb must be called this for @cached
|
||||
self.clock = hs.get_clock()
|
||||
self._is_mine_id = hs.is_mine_id
|
||||
self._clock = hs.get_clock()
|
||||
self.stores = stores
|
||||
self._partial_state_events_tracker = PartialStateEventsTracker(stores.main)
|
||||
self._partial_state_room_tracker = PartialCurrentStateTracker(stores.main)
|
||||
@@ -78,7 +78,7 @@ class StateStorageController:
|
||||
# Used by `_get_joined_hosts` to ensure only one thing mutates the cache
|
||||
# at a time. Keyed by room_id.
|
||||
self._joined_host_linearizer = Linearizer(
|
||||
name="_JoinedHostsCache", clock=self._clock
|
||||
name="_JoinedHostsCache", clock=self.clock
|
||||
)
|
||||
|
||||
def notify_event_un_partial_stated(self, event_id: str) -> None:
|
||||
@@ -817,9 +817,7 @@ class StateStorageController:
|
||||
state_group = object()
|
||||
|
||||
assert state_group is not None
|
||||
with Measure(
|
||||
self._clock, name="get_joined_hosts", server_name=self.server_name
|
||||
):
|
||||
with Measure(self.clock, name="get_joined_hosts", server_name=self.server_name):
|
||||
return await self._get_joined_hosts(
|
||||
room_id, state_group, state_entry=state_entry
|
||||
)
|
||||
|
||||
@@ -62,7 +62,6 @@ from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
)
|
||||
from synapse.metrics import SERVER_NAME_LABEL, register_threadpool
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.background_updates import BackgroundUpdater
|
||||
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
|
||||
from synapse.storage.types import Connection, Cursor, SQLQueryParameters
|
||||
@@ -638,12 +637,17 @@ class DatabasePool:
|
||||
# background updates of tables that aren't safe to update.
|
||||
self._clock.call_later(
|
||||
0.0,
|
||||
run_as_background_process,
|
||||
self.hs.run_as_background_process,
|
||||
"upsert_safety_check",
|
||||
self.server_name,
|
||||
self._check_safe_to_upsert,
|
||||
)
|
||||
|
||||
def stop_background_updates(self) -> None:
|
||||
"""
|
||||
Stops the database from running any further background updates.
|
||||
"""
|
||||
self.updates.shutdown()
|
||||
|
||||
def name(self) -> str:
|
||||
"Return the name of this database"
|
||||
return self._database_config.name
|
||||
@@ -681,9 +685,8 @@ class DatabasePool:
|
||||
if background_update_names:
|
||||
self._clock.call_later(
|
||||
15.0,
|
||||
run_as_background_process,
|
||||
self.hs.run_as_background_process,
|
||||
"upsert_safety_check",
|
||||
self.server_name,
|
||||
self._check_safe_to_upsert,
|
||||
)
|
||||
|
||||
|
||||
@@ -751,7 +751,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
||||
"instance_name": self._instance_name,
|
||||
"cache_func": cache_name,
|
||||
"keys": keys,
|
||||
"invalidation_ts": self._clock.time_msec(),
|
||||
"invalidation_ts": self.clock.time_msec(),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -778,7 +778,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
||||
assert self._cache_id_gen is not None
|
||||
|
||||
stream_ids = self._cache_id_gen.get_next_mult_txn(txn, len(key_tuples))
|
||||
ts = self._clock.time_msec()
|
||||
ts = self.clock.time_msec()
|
||||
txn.call_after(self.hs.get_notifier().on_new_replication_data)
|
||||
self.db_pool.simple_insert_many_txn(
|
||||
txn,
|
||||
@@ -830,7 +830,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
|
||||
next_interval = REGULAR_CLEANUP_INTERVAL_MS
|
||||
|
||||
self.hs.get_clock().call_later(
|
||||
next_interval / 1000, self._clean_up_cache_invalidation_wrapper
|
||||
next_interval / 1000,
|
||||
self._clean_up_cache_invalidation_wrapper,
|
||||
)
|
||||
|
||||
async def _clean_up_batch_of_old_cache_invalidations(
|
||||
|
||||
@@ -77,7 +77,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
|
||||
return
|
||||
|
||||
before_ts = (
|
||||
self._clock.time_msec() - self.hs.config.server.redaction_retention_period
|
||||
self.clock.time_msec() - self.hs.config.server.redaction_retention_period
|
||||
)
|
||||
|
||||
# We fetch all redactions that:
|
||||
|
||||
@@ -438,10 +438,11 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
|
||||
cache_name="client_ip_last_seen",
|
||||
server_name=self.server_name,
|
||||
max_size=50000,
|
||||
clock=hs.get_clock(),
|
||||
)
|
||||
|
||||
if hs.config.worker.run_background_tasks and self.user_ips_max_age:
|
||||
self._clock.looping_call(self._prune_old_user_ips, 5 * 1000)
|
||||
self.clock.looping_call(self._prune_old_user_ips, 5 * 1000)
|
||||
|
||||
if self._update_on_this_worker:
|
||||
# This is the designated worker that can write to the client IP
|
||||
@@ -452,11 +453,11 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
|
||||
Tuple[str, str, str], Tuple[str, Optional[str], int]
|
||||
] = {}
|
||||
|
||||
self._client_ip_looper = self._clock.looping_call(
|
||||
self._update_client_ips_batch, 5 * 1000
|
||||
)
|
||||
self.hs.get_clock().add_system_event_trigger(
|
||||
"before", "shutdown", self._update_client_ips_batch
|
||||
self.clock.looping_call(self._update_client_ips_batch, 5 * 1000)
|
||||
hs.register_async_shutdown_handler(
|
||||
phase="before",
|
||||
eventType="shutdown",
|
||||
shutdown_func=self._update_client_ips_batch,
|
||||
)
|
||||
|
||||
@wrap_as_background_process("prune_old_user_ips")
|
||||
@@ -492,7 +493,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
|
||||
)
|
||||
"""
|
||||
|
||||
timestamp = self._clock.time_msec() - self.user_ips_max_age
|
||||
timestamp = self.clock.time_msec() - self.user_ips_max_age
|
||||
|
||||
def _prune_old_user_ips_txn(txn: LoggingTransaction) -> None:
|
||||
txn.execute(sql, (timestamp,))
|
||||
@@ -628,7 +629,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
|
||||
return
|
||||
|
||||
if not now:
|
||||
now = int(self._clock.time_msec())
|
||||
now = int(self.clock.time_msec())
|
||||
key = (user_id, access_token, ip)
|
||||
|
||||
try:
|
||||
|
||||
@@ -96,7 +96,8 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
] = ExpiringCache(
|
||||
cache_name="last_device_delete_cache",
|
||||
server_name=self.server_name,
|
||||
clock=self._clock,
|
||||
hs=hs,
|
||||
clock=self.clock,
|
||||
max_len=10000,
|
||||
expiry_ms=30 * 60 * 1000,
|
||||
)
|
||||
@@ -154,7 +155,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
)
|
||||
|
||||
if hs.config.worker.run_background_tasks:
|
||||
self._clock.looping_call(
|
||||
self.clock.looping_call(
|
||||
run_as_background_process,
|
||||
DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL_MS,
|
||||
"_delete_old_federation_inbox_rows",
|
||||
@@ -826,7 +827,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
)
|
||||
|
||||
async with self._to_device_msg_id_gen.get_next() as stream_id:
|
||||
now_ms = self._clock.time_msec()
|
||||
now_ms = self.clock.time_msec()
|
||||
await self.db_pool.runInteraction(
|
||||
"add_messages_to_device_inbox", add_messages_txn, now_ms, stream_id
|
||||
)
|
||||
@@ -881,7 +882,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
)
|
||||
|
||||
async with self._to_device_msg_id_gen.get_next() as stream_id:
|
||||
now_ms = self._clock.time_msec()
|
||||
now_ms = self.clock.time_msec()
|
||||
await self.db_pool.runInteraction(
|
||||
"add_messages_from_remote_to_device_inbox",
|
||||
add_messages_txn,
|
||||
@@ -1002,7 +1003,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
# We delete at most 100 rows that are older than
|
||||
# DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS
|
||||
delete_before_ts = (
|
||||
self._clock.time_msec() - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS
|
||||
self.clock.time_msec() - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS
|
||||
)
|
||||
sql = """
|
||||
WITH to_delete AS (
|
||||
@@ -1032,7 +1033,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
|
||||
# We sleep a bit so that we don't hammer the database in a tight
|
||||
# loop first time we run this.
|
||||
await self._clock.sleep(1)
|
||||
await self.clock.sleep(1)
|
||||
|
||||
async def get_devices_with_messages(
|
||||
self, user_id: str, device_ids: StrCollection
|
||||
|
||||
@@ -195,7 +195,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
)
|
||||
|
||||
if hs.config.worker.run_background_tasks:
|
||||
self._clock.looping_call(
|
||||
self.clock.looping_call(
|
||||
self._prune_old_outbound_device_pokes, 60 * 60 * 1000
|
||||
)
|
||||
|
||||
@@ -1390,7 +1390,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
table="device_lists_remote_resync",
|
||||
keyvalues={"user_id": user_id},
|
||||
values={},
|
||||
insertion_values={"added_ts": self._clock.time_msec()},
|
||||
insertion_values={"added_ts": self.clock.time_msec()},
|
||||
)
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
@@ -1601,7 +1601,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
that user when the destination comes back. It doesn't matter which device
|
||||
we keep.
|
||||
"""
|
||||
yesterday = self._clock.time_msec() - prune_age
|
||||
yesterday = self.clock.time_msec() - prune_age
|
||||
|
||||
def _prune_txn(txn: LoggingTransaction) -> None:
|
||||
# look for (user, destination) pairs which have an update older than
|
||||
@@ -2086,7 +2086,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
stream_id,
|
||||
)
|
||||
|
||||
now = self._clock.time_msec()
|
||||
now = self.clock.time_msec()
|
||||
|
||||
encoded_context = json_encoder.encode(context)
|
||||
mark_sent = not self.hs.is_mine_id(user_id)
|
||||
|
||||
@@ -1564,7 +1564,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
DELETE FROM e2e_one_time_keys_json
|
||||
WHERE {clause} AND ts_added_ms < ? AND length(key_id) = 6
|
||||
"""
|
||||
args.append(self._clock.time_msec() - (7 * 24 * 3600 * 1000))
|
||||
args.append(self.clock.time_msec() - (7 * 24 * 3600 * 1000))
|
||||
txn.execute(sql, args)
|
||||
|
||||
return users, txn.rowcount
|
||||
@@ -1585,7 +1585,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
None, if there is no such key.
|
||||
Otherwise, the timestamp before which replacement is allowed without UIA.
|
||||
"""
|
||||
timestamp = self._clock.time_msec() + duration_ms
|
||||
timestamp = self.clock.time_msec() + duration_ms
|
||||
|
||||
def impl(txn: LoggingTransaction) -> Optional[int]:
|
||||
txn.execute(
|
||||
|
||||
@@ -167,6 +167,7 @@ class EventFederationWorkerStore(
|
||||
# Cache of event ID to list of auth event IDs and their depths.
|
||||
self._event_auth_cache: LruCache[str, List[Tuple[str, int]]] = LruCache(
|
||||
max_size=500000,
|
||||
clock=self.hs.get_clock(),
|
||||
server_name=self.server_name,
|
||||
cache_name="_event_auth_cache",
|
||||
size_callback=len,
|
||||
@@ -176,7 +177,7 @@ class EventFederationWorkerStore(
|
||||
# index.
|
||||
self.tests_allow_no_chain_cover_index = True
|
||||
|
||||
self._clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000)
|
||||
self.clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000)
|
||||
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
self.db_pool.updates.register_background_validate_constraint_and_delete_rows(
|
||||
@@ -1328,7 +1329,7 @@ class EventFederationWorkerStore(
|
||||
(
|
||||
room_id,
|
||||
current_depth,
|
||||
self._clock.time_msec(),
|
||||
self.clock.time_msec(),
|
||||
BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS,
|
||||
BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS,
|
||||
limit,
|
||||
@@ -1841,7 +1842,7 @@ class EventFederationWorkerStore(
|
||||
last_cause=EXCLUDED.last_cause;
|
||||
"""
|
||||
|
||||
txn.execute(sql, (room_id, event_id, 1, self._clock.time_msec(), cause))
|
||||
txn.execute(sql, (room_id, event_id, 1, self.clock.time_msec(), cause))
|
||||
|
||||
@trace
|
||||
async def get_event_ids_with_failed_pull_attempts(
|
||||
@@ -1905,7 +1906,7 @@ class EventFederationWorkerStore(
|
||||
),
|
||||
)
|
||||
|
||||
current_time = self._clock.time_msec()
|
||||
current_time = self.clock.time_msec()
|
||||
|
||||
event_ids_with_backoff = {}
|
||||
for event_id, last_attempt_ts, num_attempts in event_failed_pull_attempts:
|
||||
@@ -2025,7 +2026,7 @@ class EventFederationWorkerStore(
|
||||
values={},
|
||||
insertion_values={
|
||||
"room_id": event.room_id,
|
||||
"received_ts": self._clock.time_msec(),
|
||||
"received_ts": self.clock.time_msec(),
|
||||
"event_json": json_encoder.encode(event.get_dict()),
|
||||
"internal_metadata": json_encoder.encode(
|
||||
event.internal_metadata.get_dict()
|
||||
@@ -2299,7 +2300,7 @@ class EventFederationWorkerStore(
|
||||
# If there is nothing in the staging area default it to 0.
|
||||
age = 0
|
||||
if received_ts is not None:
|
||||
age = self._clock.time_msec() - received_ts
|
||||
age = self.clock.time_msec() - received_ts
|
||||
|
||||
return count, age
|
||||
|
||||
|
||||
@@ -95,6 +95,8 @@ from typing import (
|
||||
|
||||
import attr
|
||||
|
||||
from twisted.internet.task import LoopingCall
|
||||
|
||||
from synapse.api.constants import MAIN_TIMELINE, ReceiptTypes
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
||||
@@ -254,6 +256,8 @@ def _deserialize_action(actions: str, is_highlight: bool) -> List[Union[dict, st
|
||||
|
||||
|
||||
class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBaseStore):
|
||||
_background_tasks: List[LoopingCall] = []
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
@@ -263,7 +267,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
# Track when the process started.
|
||||
self._started_ts = self._clock.time_msec()
|
||||
self._started_ts = self.clock.time_msec()
|
||||
|
||||
# These get correctly set by _find_stream_orderings_for_times_txn
|
||||
self.stream_ordering_month_ago: Optional[int] = None
|
||||
@@ -273,18 +277,14 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||
self._find_stream_orderings_for_times_txn(cur)
|
||||
cur.close()
|
||||
|
||||
self.find_stream_orderings_looping_call = self._clock.looping_call(
|
||||
self._find_stream_orderings_for_times, 10 * 60 * 1000
|
||||
)
|
||||
self.clock.looping_call(self._find_stream_orderings_for_times, 10 * 60 * 1000)
|
||||
|
||||
self._rotate_count = 10000
|
||||
self._doing_notif_rotation = False
|
||||
if hs.config.worker.run_background_tasks:
|
||||
self._rotate_notif_loop = self._clock.looping_call(
|
||||
self._rotate_notifs, 30 * 1000
|
||||
)
|
||||
self.clock.looping_call(self._rotate_notifs, 30 * 1000)
|
||||
|
||||
self._clear_old_staging_loop = self._clock.looping_call(
|
||||
self.clock.looping_call(
|
||||
self._clear_old_push_actions_staging, 30 * 60 * 1000
|
||||
)
|
||||
|
||||
@@ -1190,7 +1190,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||
is_highlight, # highlight column
|
||||
int(count_as_unread), # unread column
|
||||
thread_id, # thread_id column
|
||||
self._clock.time_msec(), # inserted_ts column
|
||||
self.clock.time_msec(), # inserted_ts column
|
||||
)
|
||||
|
||||
await self.db_pool.simple_insert_many(
|
||||
@@ -1241,14 +1241,14 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||
def _find_stream_orderings_for_times_txn(self, txn: LoggingTransaction) -> None:
|
||||
logger.info("Searching for stream ordering 1 month ago")
|
||||
self.stream_ordering_month_ago = self._find_first_stream_ordering_after_ts_txn(
|
||||
txn, self._clock.time_msec() - 30 * 24 * 60 * 60 * 1000
|
||||
txn, self.clock.time_msec() - 30 * 24 * 60 * 60 * 1000
|
||||
)
|
||||
logger.info(
|
||||
"Found stream ordering 1 month ago: it's %d", self.stream_ordering_month_ago
|
||||
)
|
||||
logger.info("Searching for stream ordering 1 day ago")
|
||||
self.stream_ordering_day_ago = self._find_first_stream_ordering_after_ts_txn(
|
||||
txn, self._clock.time_msec() - 24 * 60 * 60 * 1000
|
||||
txn, self.clock.time_msec() - 24 * 60 * 60 * 1000
|
||||
)
|
||||
logger.info(
|
||||
"Found stream ordering 1 day ago: it's %d", self.stream_ordering_day_ago
|
||||
@@ -1787,7 +1787,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||
|
||||
# We delete anything more than an hour old, on the assumption that we'll
|
||||
# never take more than an hour to persist an event.
|
||||
delete_before_ts = self._clock.time_msec() - 60 * 60 * 1000
|
||||
delete_before_ts = self.clock.time_msec() - 60 * 60 * 1000
|
||||
|
||||
if self._started_ts > delete_before_ts:
|
||||
# We need to wait for at least an hour before we started deleting,
|
||||
@@ -1824,7 +1824,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||
return
|
||||
|
||||
# We sleep to ensure that we don't overwhelm the DB.
|
||||
await self._clock.sleep(1.0)
|
||||
await self.clock.sleep(1.0)
|
||||
|
||||
async def get_push_actions_for_user(
|
||||
self,
|
||||
|
||||
@@ -730,7 +730,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS
|
||||
WHERE ? <= event_id AND event_id <= ?
|
||||
"""
|
||||
|
||||
txn.execute(sql, (self._clock.time_msec(), last_event_id, upper_event_id))
|
||||
txn.execute(sql, (self.clock.time_msec(), last_event_id, upper_event_id))
|
||||
|
||||
self.db_pool.updates._background_update_progress_txn(
|
||||
txn, "redactions_received_ts", {"last_event_id": upper_event_id}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user