mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-11 01:40:27 +00:00
Compare commits
2 Commits
release-v1
...
anoa/remov
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
72d589ff1e | ||
|
|
88a0596565 |
@@ -173,13 +173,11 @@ steps:
|
|||||||
queue: "medium"
|
queue: "medium"
|
||||||
command:
|
command:
|
||||||
- "bash .buildkite/merge_base_branch.sh"
|
- "bash .buildkite/merge_base_branch.sh"
|
||||||
- "bash /synapse_sytest.sh"
|
- "bash .buildkite/synapse_sytest.sh"
|
||||||
plugins:
|
plugins:
|
||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "matrixdotorg/sytest-synapse:py35"
|
image: "matrixdotorg/sytest-synapse:py35"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
always-pull: true
|
|
||||||
workdir: "/src"
|
|
||||||
retry:
|
retry:
|
||||||
automatic:
|
automatic:
|
||||||
- exit_status: -1
|
- exit_status: -1
|
||||||
@@ -194,13 +192,11 @@ steps:
|
|||||||
POSTGRES: "1"
|
POSTGRES: "1"
|
||||||
command:
|
command:
|
||||||
- "bash .buildkite/merge_base_branch.sh"
|
- "bash .buildkite/merge_base_branch.sh"
|
||||||
- "bash /synapse_sytest.sh"
|
- "bash .buildkite/synapse_sytest.sh"
|
||||||
plugins:
|
plugins:
|
||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "matrixdotorg/sytest-synapse:py35"
|
image: "matrixdotorg/sytest-synapse:py35"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
always-pull: true
|
|
||||||
workdir: "/src"
|
|
||||||
retry:
|
retry:
|
||||||
automatic:
|
automatic:
|
||||||
- exit_status: -1
|
- exit_status: -1
|
||||||
@@ -216,13 +212,11 @@ steps:
|
|||||||
WORKERS: "1"
|
WORKERS: "1"
|
||||||
command:
|
command:
|
||||||
- "bash .buildkite/merge_base_branch.sh"
|
- "bash .buildkite/merge_base_branch.sh"
|
||||||
- "bash /synapse_sytest.sh"
|
- "bash .buildkite/synapse_sytest.sh"
|
||||||
plugins:
|
plugins:
|
||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "matrixdotorg/sytest-synapse:py35"
|
image: "matrixdotorg/sytest-synapse:py35"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
always-pull: true
|
|
||||||
workdir: "/src"
|
|
||||||
soft_fail: true
|
soft_fail: true
|
||||||
retry:
|
retry:
|
||||||
automatic:
|
automatic:
|
||||||
|
|||||||
145
.buildkite/synapse_sytest.sh
Normal file
145
.buildkite/synapse_sytest.sh
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Fetch sytest, and then run the tests for synapse. The entrypoint for the
|
||||||
|
# sytest-synapse docker images.
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
if [ -n "$BUILDKITE" ]
|
||||||
|
then
|
||||||
|
SYNAPSE_DIR=`pwd`
|
||||||
|
else
|
||||||
|
SYNAPSE_DIR="/src"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Attempt to find a sytest to use.
|
||||||
|
# If /sytest exists, it means that a SyTest checkout has been mounted into the Docker image.
|
||||||
|
if [ -d "/sytest" ]; then
|
||||||
|
# If the user has mounted in a SyTest checkout, use that.
|
||||||
|
echo "Using local sytests..."
|
||||||
|
|
||||||
|
# create ourselves a working directory and dos2unix some scripts therein
|
||||||
|
mkdir -p /work/jenkins
|
||||||
|
for i in install-deps.pl run-tests.pl tap-to-junit-xml.pl jenkins/prep_sytest_for_postgres.sh; do
|
||||||
|
dos2unix -n "/sytest/$i" "/work/$i"
|
||||||
|
done
|
||||||
|
ln -sf /sytest/tests /work
|
||||||
|
ln -sf /sytest/keys /work
|
||||||
|
SYTEST_LIB="/sytest/lib"
|
||||||
|
else
|
||||||
|
if [ -n "BUILDKITE_BRANCH" ]
|
||||||
|
then
|
||||||
|
branch_name=$BUILDKITE_BRANCH
|
||||||
|
else
|
||||||
|
# Otherwise, try and find out what the branch that the Synapse checkout is using. Fall back to develop if it's not a branch.
|
||||||
|
branch_name="$(git --git-dir=/src/.git symbolic-ref HEAD 2>/dev/null)" || branch_name="develop"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Try and fetch the branch
|
||||||
|
echo "Trying to get same-named sytest branch..."
|
||||||
|
wget -q https://github.com/matrix-org/sytest/archive/$branch_name.tar.gz -O sytest.tar.gz || {
|
||||||
|
# Probably a 404, fall back to develop
|
||||||
|
echo "Using develop instead..."
|
||||||
|
wget -q https://github.com/matrix-org/sytest/archive/develop.tar.gz -O sytest.tar.gz
|
||||||
|
}
|
||||||
|
|
||||||
|
mkdir -p /work
|
||||||
|
tar -C /work --strip-components=1 -xf sytest.tar.gz
|
||||||
|
SYTEST_LIB="/work/lib"
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd /work
|
||||||
|
|
||||||
|
# PostgreSQL setup
|
||||||
|
if [ -n "$POSTGRES" ]
|
||||||
|
then
|
||||||
|
export PGUSER=postgres
|
||||||
|
export POSTGRES_DB_1=pg1
|
||||||
|
export POSTGRES_DB_2=pg2
|
||||||
|
|
||||||
|
# Start the database
|
||||||
|
su -c 'eatmydata /usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
|
||||||
|
|
||||||
|
# Use the Jenkins script to write out the configuration for a PostgreSQL using Synapse
|
||||||
|
jenkins/prep_sytest_for_postgres.sh
|
||||||
|
|
||||||
|
# Make the test databases for the two Synapse servers that will be spun up
|
||||||
|
su -c 'psql -c "CREATE DATABASE pg1;"' postgres
|
||||||
|
su -c 'psql -c "CREATE DATABASE pg2;"' postgres
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$OFFLINE" ]; then
|
||||||
|
# if we're in offline mode, just put synapse into the virtualenv, and
|
||||||
|
# hope that the deps are up-to-date.
|
||||||
|
#
|
||||||
|
# (`pip install -e` likes to reinstall setuptools even if it's already installed,
|
||||||
|
# so we just run setup.py explicitly.)
|
||||||
|
#
|
||||||
|
(cd $SYNAPSE_DIR && /venv/bin/python setup.py -q develop)
|
||||||
|
else
|
||||||
|
# We've already created the virtualenv, but lets double check we have all
|
||||||
|
# deps.
|
||||||
|
/venv/bin/pip install -q --upgrade --no-cache-dir -e $SYNAPSE_DIR
|
||||||
|
/venv/bin/pip install -q --upgrade --no-cache-dir \
|
||||||
|
lxml psycopg2 coverage codecov tap.py
|
||||||
|
|
||||||
|
# Make sure all Perl deps are installed -- this is done in the docker build
|
||||||
|
# so will only install packages added since the last Docker build
|
||||||
|
./install-deps.pl
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# Run the tests
|
||||||
|
>&2 echo "+++ Running tests"
|
||||||
|
|
||||||
|
RUN_TESTS=(
|
||||||
|
perl -I "$SYTEST_LIB" ./run-tests.pl --python=/venv/bin/python --synapse-directory=$SYNAPSE_DIR --coverage -O tap --all
|
||||||
|
)
|
||||||
|
|
||||||
|
TEST_STATUS=0
|
||||||
|
|
||||||
|
if [ -n "$WORKERS" ]; then
|
||||||
|
RUN_TESTS+=(-I Synapse::ViaHaproxy --dendron-binary=/pydron.py)
|
||||||
|
else
|
||||||
|
RUN_TESTS+=(-I Synapse)
|
||||||
|
fi
|
||||||
|
|
||||||
|
"${RUN_TESTS[@]}" "$@" > results.tap || TEST_STATUS=$?
|
||||||
|
|
||||||
|
if [ $TEST_STATUS -ne 0 ]; then
|
||||||
|
>&2 echo -e "run-tests \e[31mFAILED\e[0m: exit code $TEST_STATUS"
|
||||||
|
else
|
||||||
|
>&2 echo -e "run-tests \e[32mPASSED\e[0m"
|
||||||
|
fi
|
||||||
|
|
||||||
|
>&2 echo "--- Copying assets"
|
||||||
|
|
||||||
|
# Copy out the logs
|
||||||
|
mkdir -p /logs
|
||||||
|
cp results.tap /logs/results.tap
|
||||||
|
rsync --ignore-missing-args --min-size=1B -av server-0 server-1 /logs --include "*/" --include="*.log.*" --include="*.log" --exclude="*"
|
||||||
|
|
||||||
|
# Upload coverage to codecov and upload files, if running on Buildkite
|
||||||
|
if [ -n "$BUILDKITE" ]
|
||||||
|
then
|
||||||
|
/venv/bin/coverage combine || true
|
||||||
|
/venv/bin/coverage xml || true
|
||||||
|
/venv/bin/codecov -X gcov -f coverage.xml
|
||||||
|
|
||||||
|
wget -O buildkite.tar.gz https://github.com/buildkite/agent/releases/download/v3.13.0/buildkite-agent-linux-amd64-3.13.0.tar.gz
|
||||||
|
tar xvf buildkite.tar.gz
|
||||||
|
chmod +x ./buildkite-agent
|
||||||
|
|
||||||
|
# Upload the files
|
||||||
|
./buildkite-agent artifact upload "/logs/**/*.log*"
|
||||||
|
./buildkite-agent artifact upload "/logs/results.tap"
|
||||||
|
|
||||||
|
if [ $TEST_STATUS -ne 0 ]; then
|
||||||
|
# Annotate, if failure
|
||||||
|
/venv/bin/python $SYNAPSE_DIR/.buildkite/format_tap.py /logs/results.tap "$BUILDKITE_LABEL" | ./buildkite-agent annotate --style="error" --context="$BUILDKITE_LABEL"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
exit $TEST_STATUS
|
||||||
152
CHANGES.md
152
CHANGES.md
@@ -1,155 +1,3 @@
|
|||||||
Synapse 1.2.1 (2019-07-26)
|
|
||||||
==========================
|
|
||||||
|
|
||||||
Security update
|
|
||||||
---------------
|
|
||||||
|
|
||||||
This release includes *four* security fixes:
|
|
||||||
|
|
||||||
- Prevent an attack where a federated server could send redactions for arbitrary events in v1 and v2 rooms. ([\#5767](https://github.com/matrix-org/synapse/issues/5767))
|
|
||||||
- Prevent a denial-of-service attack where cycles of redaction events would make Synapse spin infinitely. Thanks to `@lrizika:matrix.org` for identifying and responsibly disclosing this issue. ([0f2ecb961](https://github.com/matrix-org/synapse/commit/0f2ecb961))
|
|
||||||
- Prevent an attack where users could be joined or parted from public rooms without their consent. Thanks to @Dylanger for identifying and responsibly disclosing this issue. ([\#5744](https://github.com/matrix-org/synapse/issues/5744))
|
|
||||||
- Fix a vulnerability where a federated server could spoof read-receipts from
|
|
||||||
users on other servers. Thanks to @Dylanger for identifying this issue too. ([\#5743](https://github.com/matrix-org/synapse/issues/5743))
|
|
||||||
|
|
||||||
Additionally, the following fix was in Synapse **1.2.0**, but was not correctly
|
|
||||||
identified during the original release:
|
|
||||||
|
|
||||||
- It was possible for a room moderator to send a redaction for an `m.room.create` event, which would downgrade the room to version 1. Thanks to `/dev/ponies` for identifying and responsibly disclosing this issue! ([\#5701](https://github.com/matrix-org/synapse/issues/5701))
|
|
||||||
|
|
||||||
Synapse 1.2.0 (2019-07-25)
|
|
||||||
==========================
|
|
||||||
|
|
||||||
No significant changes.
|
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.2.0rc2 (2019-07-24)
|
|
||||||
=============================
|
|
||||||
|
|
||||||
Bugfixes
|
|
||||||
--------
|
|
||||||
|
|
||||||
- Fix a regression introduced in v1.2.0rc1 which led to incorrect labels on some prometheus metrics. ([\#5734](https://github.com/matrix-org/synapse/issues/5734))
|
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.2.0rc1 (2019-07-22)
|
|
||||||
=============================
|
|
||||||
|
|
||||||
Security fixes
|
|
||||||
--------------
|
|
||||||
|
|
||||||
This update included a security fix which was initially incorrectly flagged as
|
|
||||||
a regular bug fix.
|
|
||||||
|
|
||||||
- It was possible for a room moderator to send a redaction for an `m.room.create` event, which would downgrade the room to version 1. Thanks to `/dev/ponies` for identifying and responsibly disclosing this issue! ([\#5701](https://github.com/matrix-org/synapse/issues/5701))
|
|
||||||
|
|
||||||
Features
|
|
||||||
--------
|
|
||||||
|
|
||||||
- Add support for opentracing. ([\#5544](https://github.com/matrix-org/synapse/issues/5544), [\#5712](https://github.com/matrix-org/synapse/issues/5712))
|
|
||||||
- Add ability to pull all locally stored events out of synapse that a particular user can see. ([\#5589](https://github.com/matrix-org/synapse/issues/5589))
|
|
||||||
- Add a basic admin command app to allow server operators to run Synapse admin commands separately from the main production instance. ([\#5597](https://github.com/matrix-org/synapse/issues/5597))
|
|
||||||
- Add `sender` and `origin_server_ts` fields to `m.replace`. ([\#5613](https://github.com/matrix-org/synapse/issues/5613))
|
|
||||||
- Add default push rule to ignore reactions. ([\#5623](https://github.com/matrix-org/synapse/issues/5623))
|
|
||||||
- Include the original event when asking for its relations. ([\#5626](https://github.com/matrix-org/synapse/issues/5626))
|
|
||||||
- Implement `session_lifetime` configuration option, after which access tokens will expire. ([\#5660](https://github.com/matrix-org/synapse/issues/5660))
|
|
||||||
- Return "This account has been deactivated" when a deactivated user tries to login. ([\#5674](https://github.com/matrix-org/synapse/issues/5674))
|
|
||||||
- Enable aggregations support by default ([\#5714](https://github.com/matrix-org/synapse/issues/5714))
|
|
||||||
|
|
||||||
|
|
||||||
Bugfixes
|
|
||||||
--------
|
|
||||||
|
|
||||||
- Fix 'utime went backwards' errors on daemonization. ([\#5609](https://github.com/matrix-org/synapse/issues/5609))
|
|
||||||
- Various minor fixes to the federation request rate limiter. ([\#5621](https://github.com/matrix-org/synapse/issues/5621))
|
|
||||||
- Forbid viewing relations on an event once it has been redacted. ([\#5629](https://github.com/matrix-org/synapse/issues/5629))
|
|
||||||
- Fix requests to the `/store_invite` endpoint of identity servers being sent in the wrong format. ([\#5638](https://github.com/matrix-org/synapse/issues/5638))
|
|
||||||
- Fix newly-registered users not being able to lookup their own profile without joining a room. ([\#5644](https://github.com/matrix-org/synapse/issues/5644))
|
|
||||||
- Fix bug in #5626 that prevented the original_event field from actually having the contents of the original event in a call to `/relations`. ([\#5654](https://github.com/matrix-org/synapse/issues/5654))
|
|
||||||
- Fix 3PID bind requests being sent to identity servers as `application/x-form-www-urlencoded` data, which is deprecated. ([\#5658](https://github.com/matrix-org/synapse/issues/5658))
|
|
||||||
- Fix some problems with authenticating redactions in recent room versions. ([\#5699](https://github.com/matrix-org/synapse/issues/5699), [\#5700](https://github.com/matrix-org/synapse/issues/5700), [\#5707](https://github.com/matrix-org/synapse/issues/5707))
|
|
||||||
|
|
||||||
|
|
||||||
Updates to the Docker image
|
|
||||||
---------------------------
|
|
||||||
|
|
||||||
- Base Docker image on a newer Alpine Linux version (3.8 -> 3.10). ([\#5619](https://github.com/matrix-org/synapse/issues/5619))
|
|
||||||
- Add missing space in default logging file format generated by the Docker image. ([\#5620](https://github.com/matrix-org/synapse/issues/5620))
|
|
||||||
|
|
||||||
|
|
||||||
Improved Documentation
|
|
||||||
----------------------
|
|
||||||
|
|
||||||
- Add information about nginx normalisation to reverse_proxy.rst. Contributed by @skalarproduktraum - thanks! ([\#5397](https://github.com/matrix-org/synapse/issues/5397))
|
|
||||||
- --no-pep517 should be --no-use-pep517 in the documentation to setup the development environment. ([\#5651](https://github.com/matrix-org/synapse/issues/5651))
|
|
||||||
- Improvements to Postgres setup instructions. Contributed by @Lrizika - thanks! ([\#5661](https://github.com/matrix-org/synapse/issues/5661))
|
|
||||||
- Minor tweaks to postgres documentation. ([\#5675](https://github.com/matrix-org/synapse/issues/5675))
|
|
||||||
|
|
||||||
|
|
||||||
Deprecations and Removals
|
|
||||||
-------------------------
|
|
||||||
|
|
||||||
- Remove support for the `invite_3pid_guest` configuration setting. ([\#5625](https://github.com/matrix-org/synapse/issues/5625))
|
|
||||||
|
|
||||||
|
|
||||||
Internal Changes
|
|
||||||
----------------
|
|
||||||
|
|
||||||
- Move logging code out of `synapse.util` and into `synapse.logging`. ([\#5606](https://github.com/matrix-org/synapse/issues/5606), [\#5617](https://github.com/matrix-org/synapse/issues/5617))
|
|
||||||
- Add a blacklist file to the repo to blacklist certain sytests from failing CI. ([\#5611](https://github.com/matrix-org/synapse/issues/5611))
|
|
||||||
- Make runtime errors surrounding password reset emails much clearer. ([\#5616](https://github.com/matrix-org/synapse/issues/5616))
|
|
||||||
- Remove dead code for persiting outgoing federation transactions. ([\#5622](https://github.com/matrix-org/synapse/issues/5622))
|
|
||||||
- Add `lint.sh` to the scripts-dev folder which will run all linting steps required by CI. ([\#5627](https://github.com/matrix-org/synapse/issues/5627))
|
|
||||||
- Move RegistrationHandler.get_or_create_user to test code. ([\#5628](https://github.com/matrix-org/synapse/issues/5628))
|
|
||||||
- Add some more common python virtual-environment paths to the black exclusion list. ([\#5630](https://github.com/matrix-org/synapse/issues/5630))
|
|
||||||
- Some counter metrics exposed over Prometheus have been renamed, with the old names preserved for backwards compatibility and deprecated. See `docs/metrics-howto.rst` for details. ([\#5636](https://github.com/matrix-org/synapse/issues/5636))
|
|
||||||
- Unblacklist some user_directory sytests. ([\#5637](https://github.com/matrix-org/synapse/issues/5637))
|
|
||||||
- Factor out some redundant code in the login implementation. ([\#5639](https://github.com/matrix-org/synapse/issues/5639))
|
|
||||||
- Update ModuleApi to avoid register(generate_token=True). ([\#5640](https://github.com/matrix-org/synapse/issues/5640))
|
|
||||||
- Remove access-token support from `RegistrationHandler.register`, and rename it. ([\#5641](https://github.com/matrix-org/synapse/issues/5641))
|
|
||||||
- Remove access-token support from `RegistrationStore.register`, and rename it. ([\#5642](https://github.com/matrix-org/synapse/issues/5642))
|
|
||||||
- Improve logging for auto-join when a new user is created. ([\#5643](https://github.com/matrix-org/synapse/issues/5643))
|
|
||||||
- Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure. ([\#5645](https://github.com/matrix-org/synapse/issues/5645))
|
|
||||||
- Fix a small typo in a code comment. ([\#5655](https://github.com/matrix-org/synapse/issues/5655))
|
|
||||||
- Clean up exception handling around client access tokens. ([\#5656](https://github.com/matrix-org/synapse/issues/5656))
|
|
||||||
- Add a mechanism for per-test homeserver configuration in the unit tests. ([\#5657](https://github.com/matrix-org/synapse/issues/5657))
|
|
||||||
- Inline issue_access_token. ([\#5659](https://github.com/matrix-org/synapse/issues/5659))
|
|
||||||
- Update the sytest BuildKite configuration to checkout Synapse in `/src`. ([\#5664](https://github.com/matrix-org/synapse/issues/5664))
|
|
||||||
- Add a `docker` type to the towncrier configuration. ([\#5673](https://github.com/matrix-org/synapse/issues/5673))
|
|
||||||
- Convert `synapse.federation.transport.server` to `async`. Might improve some stack traces. ([\#5689](https://github.com/matrix-org/synapse/issues/5689))
|
|
||||||
- Documentation for opentracing. ([\#5703](https://github.com/matrix-org/synapse/issues/5703))
|
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.1.0 (2019-07-04)
|
|
||||||
==========================
|
|
||||||
|
|
||||||
As of v1.1.0, Synapse no longer supports Python 2, nor Postgres version 9.4.
|
|
||||||
See the [upgrade notes](UPGRADE.rst#upgrading-to-v110) for more details.
|
|
||||||
|
|
||||||
This release also deprecates the use of environment variables to configure the
|
|
||||||
docker image. See the [docker README](https://github.com/matrix-org/synapse/blob/release-v1.1.0/docker/README.md#legacy-dynamic-configuration-file-support)
|
|
||||||
for more details.
|
|
||||||
|
|
||||||
No changes since 1.1.0rc2.
|
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.1.0rc2 (2019-07-03)
|
|
||||||
=============================
|
|
||||||
|
|
||||||
Bugfixes
|
|
||||||
--------
|
|
||||||
|
|
||||||
- Fix regression in 1.1rc1 where OPTIONS requests to the media repo would fail. ([\#5593](https://github.com/matrix-org/synapse/issues/5593))
|
|
||||||
- Removed the `SYNAPSE_SMTP_*` docker container environment variables. Using these environment variables prevented the docker container from starting in Synapse v1.0, even though they didn't actually allow any functionality anyway. ([\#5596](https://github.com/matrix-org/synapse/issues/5596))
|
|
||||||
- Fix a number of "Starting txn from sentinel context" warnings. ([\#5605](https://github.com/matrix-org/synapse/issues/5605))
|
|
||||||
|
|
||||||
|
|
||||||
Internal Changes
|
|
||||||
----------------
|
|
||||||
|
|
||||||
- Update github templates. ([\#5552](https://github.com/matrix-org/synapse/issues/5552))
|
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.1.0rc1 (2019-07-02)
|
Synapse 1.1.0rc1 (2019-07-02)
|
||||||
=============================
|
=============================
|
||||||
|
|
||||||
|
|||||||
@@ -30,10 +30,11 @@ use github's pull request workflow to review the contribution, and either ask
|
|||||||
you to make any refinements needed or merge it and make them ourselves. The
|
you to make any refinements needed or merge it and make them ourselves. The
|
||||||
changes will then land on master when we next do a release.
|
changes will then land on master when we next do a release.
|
||||||
|
|
||||||
We use `Buildkite <https://buildkite.com/matrix-dot-org/synapse>`_ for
|
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Buildkite
|
||||||
continuous integration. Buildkite builds need to be authorised by a
|
<https://buildkite.com/matrix-dot-org/synapse>`_ for continuous integration.
|
||||||
maintainer. If your change breaks the build, this will be shown in GitHub, so
|
Buildkite builds need to be authorised by a maintainer. If your change breaks
|
||||||
please keep an eye on the pull request for feedback.
|
the build, this will be shown in GitHub, so please keep an eye on the pull
|
||||||
|
request for feedback.
|
||||||
|
|
||||||
To run unit tests in a local development environment, you can use:
|
To run unit tests in a local development environment, you can use:
|
||||||
|
|
||||||
@@ -69,21 +70,13 @@ All changes, even minor ones, need a corresponding changelog / newsfragment
|
|||||||
entry. These are managed by Towncrier
|
entry. These are managed by Towncrier
|
||||||
(https://github.com/hawkowl/towncrier).
|
(https://github.com/hawkowl/towncrier).
|
||||||
|
|
||||||
To create a changelog entry, make a new file in the ``changelog.d`` file named
|
To create a changelog entry, make a new file in the ``changelog.d``
|
||||||
in the format of ``PRnumber.type``. The type can be one of the following:
|
file named in the format of ``PRnumber.type``. The type can be
|
||||||
|
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
||||||
|
deprecations), or ``misc`` (for internal-only changes).
|
||||||
|
|
||||||
* ``feature``.
|
The content of the file is your changelog entry, which can contain Markdown
|
||||||
* ``bugfix``.
|
formatting. The entry should end with a full stop ('.') for consistency.
|
||||||
* ``docker`` (for updates to the Docker image).
|
|
||||||
* ``doc`` (for updates to the documentation).
|
|
||||||
* ``removal`` (also used for deprecations).
|
|
||||||
* ``misc`` (for internal-only changes).
|
|
||||||
|
|
||||||
The content of the file is your changelog entry, which should be a short
|
|
||||||
description of your change in the same style as the rest of our `changelog
|
|
||||||
<https://github.com/matrix-org/synapse/blob/master/CHANGES.md>`_. The file can
|
|
||||||
contain Markdown formatting, and should end with a full stop ('.') for
|
|
||||||
consistency.
|
|
||||||
|
|
||||||
Adding credits to the changelog is encouraged, we value your
|
Adding credits to the changelog is encouraged, we value your
|
||||||
contributions and would like to have you shouted out in the release notes!
|
contributions and would like to have you shouted out in the release notes!
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ include demo/README
|
|||||||
include demo/demo.tls.dh
|
include demo/demo.tls.dh
|
||||||
include demo/*.py
|
include demo/*.py
|
||||||
include demo/*.sh
|
include demo/*.sh
|
||||||
include sytest-blacklist
|
|
||||||
|
|
||||||
recursive-include synapse/storage/schema *.sql
|
recursive-include synapse/storage/schema *.sql
|
||||||
recursive-include synapse/storage/schema *.sql.postgres
|
recursive-include synapse/storage/schema *.sql.postgres
|
||||||
|
|||||||
@@ -272,7 +272,7 @@ to install using pip and a virtualenv::
|
|||||||
|
|
||||||
virtualenv -p python3 env
|
virtualenv -p python3 env
|
||||||
source env/bin/activate
|
source env/bin/activate
|
||||||
python -m pip install --no-use-pep517 -e .[all]
|
python -m pip install --no-pep-517 -e .[all]
|
||||||
|
|
||||||
This will run a process of downloading and installing all the needed
|
This will run a process of downloading and installing all the needed
|
||||||
dependencies into a virtual env.
|
dependencies into a virtual env.
|
||||||
|
|||||||
@@ -49,13 +49,6 @@ returned by the Client-Server API:
|
|||||||
# configured on port 443.
|
# configured on port 443.
|
||||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||||
|
|
||||||
Upgrading to v1.2.0
|
|
||||||
===================
|
|
||||||
|
|
||||||
Some counter metrics have been renamed, with the old names deprecated. See
|
|
||||||
`the metrics documentation <docs/metrics-howto.rst#renaming-of-metrics--deprecation-of-old-names-in-12>`_
|
|
||||||
for details.
|
|
||||||
|
|
||||||
Upgrading to v1.1.0
|
Upgrading to v1.1.0
|
||||||
===================
|
===================
|
||||||
|
|
||||||
|
|||||||
1
changelog.d/5552.misc
Normal file
1
changelog.d/5552.misc
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Update github templates.
|
||||||
1
changelog.d/5596.bugfix
Normal file
1
changelog.d/5596.bugfix
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Removed the `SYNAPSE_SMTP_*` docker container environment variables. Using these environment variables prevented the docker container from starting in Synapse v1.0, even though they didn't actually allow any functionality anyway. Users are advised to remove `SYNAPSE_SMTP_HOST`, `SYNAPSE_SMTP_PORT`, `SYNAPSE_SMTP_USER`, `SYNAPSE_SMTP_PASSWORD` and `SYNAPSE_SMTP_FROM` environment variables from their docker run commands.
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# Example log_config file for synapse. To enable, point `log_config` to it in
|
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||||
# `homeserver.yaml`, and restart synapse.
|
# `homeserver.yaml`, and restart synapse.
|
||||||
#
|
#
|
||||||
# This configuration will produce similar results to the defaults within
|
# This configuration will produce similar results to the defaults within
|
||||||
# synapse, but can be edited to give more flexibility.
|
# synapse, but can be edited to give more flexibility.
|
||||||
|
|
||||||
version: 1
|
version: 1
|
||||||
@@ -12,7 +12,7 @@ formatters:
|
|||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.logging.context.LoggingContextFilter
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
@@ -35,7 +35,7 @@ handlers:
|
|||||||
root:
|
root:
|
||||||
level: INFO
|
level: INFO
|
||||||
handlers: [console] # to use file handler instead, switch to [file]
|
handlers: [console] # to use file handler instead, switch to [file]
|
||||||
|
|
||||||
loggers:
|
loggers:
|
||||||
synapse:
|
synapse:
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ from synapse.util import origin_from_ucid
|
|||||||
|
|
||||||
from synapse.app.homeserver import SynapseHomeServer
|
from synapse.app.homeserver import SynapseHomeServer
|
||||||
|
|
||||||
# from synapse.logging.utils import log_function
|
# from synapse.util.logutils import log_function
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
from twisted.internet import reactor, defer
|
||||||
from twisted.python import log
|
from twisted.python import log
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ formatters:
|
|||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.logging.context.LoggingContextFilter
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
|
|||||||
26
debian/changelog
vendored
26
debian/changelog
vendored
@@ -1,31 +1,9 @@
|
|||||||
matrix-synapse-py3 (1.2.1) stable; urgency=medium
|
matrix-synapse-py3 (1.0.0+nmu1) UNRELEASED; urgency=medium
|
||||||
|
|
||||||
* New synapse release 1.2.1.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Fri, 26 Jul 2019 11:32:47 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.2.0) stable; urgency=medium
|
|
||||||
|
|
||||||
[ Amber Brown ]
|
|
||||||
* Update logging config defaults to match API changes in Synapse.
|
|
||||||
|
|
||||||
[ Richard van der Hoff ]
|
|
||||||
* Add Recommends and Depends for some libraries which you probably want.
|
|
||||||
|
|
||||||
[ Synapse Packaging team ]
|
|
||||||
* New synapse release 1.2.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Jul 2019 14:10:07 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.1.0) stable; urgency=medium
|
|
||||||
|
|
||||||
[ Silke Hofstra ]
|
[ Silke Hofstra ]
|
||||||
* Include systemd-python to allow logging to the systemd journal.
|
* Include systemd-python to allow logging to the systemd journal.
|
||||||
|
|
||||||
[ Synapse Packaging team ]
|
-- Silke Hofstra <silke@slxh.eu> Wed, 29 May 2019 09:45:29 +0200
|
||||||
* New synapse release 1.1.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Thu, 04 Jul 2019 11:43:41 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.0.0) stable; urgency=medium
|
matrix-synapse-py3 (1.0.0) stable; urgency=medium
|
||||||
|
|
||||||
|
|||||||
7
debian/control
vendored
7
debian/control
vendored
@@ -2,20 +2,16 @@ Source: matrix-synapse-py3
|
|||||||
Section: contrib/python
|
Section: contrib/python
|
||||||
Priority: extra
|
Priority: extra
|
||||||
Maintainer: Synapse Packaging team <packages@matrix.org>
|
Maintainer: Synapse Packaging team <packages@matrix.org>
|
||||||
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
|
|
||||||
Build-Depends:
|
Build-Depends:
|
||||||
debhelper (>= 9),
|
debhelper (>= 9),
|
||||||
dh-systemd,
|
dh-systemd,
|
||||||
dh-virtualenv (>= 1.1),
|
dh-virtualenv (>= 1.1),
|
||||||
libsystemd-dev,
|
|
||||||
libpq-dev,
|
|
||||||
lsb-release,
|
lsb-release,
|
||||||
python3-dev,
|
python3-dev,
|
||||||
python3,
|
python3,
|
||||||
python3-setuptools,
|
python3-setuptools,
|
||||||
python3-pip,
|
python3-pip,
|
||||||
python3-venv,
|
python3-venv,
|
||||||
libsqlite3-dev,
|
|
||||||
tar,
|
tar,
|
||||||
Standards-Version: 3.9.8
|
Standards-Version: 3.9.8
|
||||||
Homepage: https://github.com/matrix-org/synapse
|
Homepage: https://github.com/matrix-org/synapse
|
||||||
@@ -32,12 +28,9 @@ Depends:
|
|||||||
debconf,
|
debconf,
|
||||||
python3-distutils|libpython3-stdlib (<< 3.6),
|
python3-distutils|libpython3-stdlib (<< 3.6),
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
${shlibs:Depends},
|
|
||||||
${synapse:pydepends},
|
${synapse:pydepends},
|
||||||
# some of our scripts use perl, but none of them are important,
|
# some of our scripts use perl, but none of them are important,
|
||||||
# so we put perl:Depends in Suggests rather than Depends.
|
# so we put perl:Depends in Suggests rather than Depends.
|
||||||
Recommends:
|
|
||||||
${shlibs1:Recommends},
|
|
||||||
Suggests:
|
Suggests:
|
||||||
sqlite3,
|
sqlite3,
|
||||||
${perl:Depends},
|
${perl:Depends},
|
||||||
|
|||||||
2
debian/log.yaml
vendored
2
debian/log.yaml
vendored
@@ -7,7 +7,7 @@ formatters:
|
|||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.logging.context.LoggingContextFilter
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
|
|||||||
14
debian/rules
vendored
14
debian/rules
vendored
@@ -3,29 +3,15 @@
|
|||||||
# Build Debian package using https://github.com/spotify/dh-virtualenv
|
# Build Debian package using https://github.com/spotify/dh-virtualenv
|
||||||
#
|
#
|
||||||
|
|
||||||
# assume we only have one package
|
|
||||||
PACKAGE_NAME:=`dh_listpackages`
|
|
||||||
|
|
||||||
override_dh_systemd_enable:
|
override_dh_systemd_enable:
|
||||||
dh_systemd_enable --name=matrix-synapse
|
dh_systemd_enable --name=matrix-synapse
|
||||||
|
|
||||||
override_dh_installinit:
|
override_dh_installinit:
|
||||||
dh_installinit --name=matrix-synapse
|
dh_installinit --name=matrix-synapse
|
||||||
|
|
||||||
# we don't really want to strip the symbols from our object files.
|
|
||||||
override_dh_strip:
|
override_dh_strip:
|
||||||
|
|
||||||
override_dh_shlibdeps:
|
override_dh_shlibdeps:
|
||||||
# make the postgres package's dependencies a recommendation
|
|
||||||
# rather than a hard dependency.
|
|
||||||
find debian/$(PACKAGE_NAME)/ -path '*/site-packages/psycopg2/*.so' | \
|
|
||||||
xargs dpkg-shlibdeps -Tdebian/$(PACKAGE_NAME).substvars \
|
|
||||||
-pshlibs1 -dRecommends
|
|
||||||
|
|
||||||
# all the other dependencies can be normal 'Depends' requirements,
|
|
||||||
# except for PIL's, which is self-contained and which confuses
|
|
||||||
# dpkg-shlibdeps.
|
|
||||||
dh_shlibdeps -X site-packages/PIL/.libs -X site-packages/psycopg2
|
|
||||||
|
|
||||||
override_dh_virtualenv:
|
override_dh_virtualenv:
|
||||||
./debian/build_virtualenv
|
./debian/build_virtualenv
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ ARG PYTHON_VERSION=3.7
|
|||||||
###
|
###
|
||||||
### Stage 0: builder
|
### Stage 0: builder
|
||||||
###
|
###
|
||||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 as builder
|
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder
|
||||||
|
|
||||||
# install the OS build deps
|
# install the OS build deps
|
||||||
|
|
||||||
@@ -55,7 +55,7 @@ RUN pip install --prefix="/install" --no-warn-script-location \
|
|||||||
### Stage 1: runtime
|
### Stage 1: runtime
|
||||||
###
|
###
|
||||||
|
|
||||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10
|
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8
|
||||||
|
|
||||||
# xmlsec is required for saml support
|
# xmlsec is required for saml support
|
||||||
RUN apk add --no-cache --virtual .runtime_deps \
|
RUN apk add --no-cache --virtual .runtime_deps \
|
||||||
|
|||||||
@@ -43,9 +43,6 @@ RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b
|
|||||||
FROM ${distro}
|
FROM ${distro}
|
||||||
|
|
||||||
# Install the build dependencies
|
# Install the build dependencies
|
||||||
#
|
|
||||||
# NB: keep this list in sync with the list of build-deps in debian/control
|
|
||||||
# TODO: it would be nice to do that automatically.
|
|
||||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||||
|
|||||||
@@ -2,11 +2,11 @@ version: 1
|
|||||||
|
|
||||||
formatters:
|
formatters:
|
||||||
precise:
|
precise:
|
||||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.logging.context.LoggingContextFilter
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
Log Contexts
|
Log contexts
|
||||||
============
|
============
|
||||||
|
|
||||||
.. contents::
|
.. contents::
|
||||||
@@ -12,7 +12,7 @@ record.
|
|||||||
Logcontexts are also used for CPU and database accounting, so that we can track
|
Logcontexts are also used for CPU and database accounting, so that we can track
|
||||||
which requests were responsible for high CPU use or database activity.
|
which requests were responsible for high CPU use or database activity.
|
||||||
|
|
||||||
The ``synapse.logging.context`` module provides a facilities for managing the
|
The ``synapse.util.logcontext`` module provides a facilities for managing the
|
||||||
current log context (as well as providing the ``LoggingContextFilter`` class).
|
current log context (as well as providing the ``LoggingContextFilter`` class).
|
||||||
|
|
||||||
Deferreds make the whole thing complicated, so this document describes how it
|
Deferreds make the whole thing complicated, so this document describes how it
|
||||||
@@ -27,19 +27,19 @@ found them:
|
|||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
from synapse.logging import context # omitted from future snippets
|
from synapse.util import logcontext # omitted from future snippets
|
||||||
|
|
||||||
def handle_request(request_id):
|
def handle_request(request_id):
|
||||||
request_context = context.LoggingContext()
|
request_context = logcontext.LoggingContext()
|
||||||
|
|
||||||
calling_context = context.LoggingContext.current_context()
|
calling_context = logcontext.LoggingContext.current_context()
|
||||||
context.LoggingContext.set_current_context(request_context)
|
logcontext.LoggingContext.set_current_context(request_context)
|
||||||
try:
|
try:
|
||||||
request_context.request = request_id
|
request_context.request = request_id
|
||||||
do_request_handling()
|
do_request_handling()
|
||||||
logger.debug("finished")
|
logger.debug("finished")
|
||||||
finally:
|
finally:
|
||||||
context.LoggingContext.set_current_context(calling_context)
|
logcontext.LoggingContext.set_current_context(calling_context)
|
||||||
|
|
||||||
def do_request_handling():
|
def do_request_handling():
|
||||||
logger.debug("phew") # this will be logged against request_id
|
logger.debug("phew") # this will be logged against request_id
|
||||||
@@ -51,7 +51,7 @@ written much more succinctly as:
|
|||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
def handle_request(request_id):
|
def handle_request(request_id):
|
||||||
with context.LoggingContext() as request_context:
|
with logcontext.LoggingContext() as request_context:
|
||||||
request_context.request = request_id
|
request_context.request = request_id
|
||||||
do_request_handling()
|
do_request_handling()
|
||||||
logger.debug("finished")
|
logger.debug("finished")
|
||||||
@@ -74,7 +74,7 @@ blocking operation, and returns a deferred:
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_request(request_id):
|
def handle_request(request_id):
|
||||||
with context.LoggingContext() as request_context:
|
with logcontext.LoggingContext() as request_context:
|
||||||
request_context.request = request_id
|
request_context.request = request_id
|
||||||
yield do_request_handling()
|
yield do_request_handling()
|
||||||
logger.debug("finished")
|
logger.debug("finished")
|
||||||
@@ -179,7 +179,7 @@ though, we need to make up a new Deferred, or we get a Deferred back from
|
|||||||
external code. We need to make it follow our rules.
|
external code. We need to make it follow our rules.
|
||||||
|
|
||||||
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
||||||
``context.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
||||||
which returns a deferred which will run its callbacks after a given number of
|
which returns a deferred which will run its callbacks after a given number of
|
||||||
seconds. That might look like:
|
seconds. That might look like:
|
||||||
|
|
||||||
@@ -204,13 +204,13 @@ That doesn't follow the rules, but we can fix it by wrapping it with
|
|||||||
This technique works equally for external functions which return deferreds,
|
This technique works equally for external functions which return deferreds,
|
||||||
or deferreds we have made ourselves.
|
or deferreds we have made ourselves.
|
||||||
|
|
||||||
You can also use ``context.make_deferred_yieldable``, which just does the
|
You can also use ``logcontext.make_deferred_yieldable``, which just does the
|
||||||
boilerplate for you, so the above could be written:
|
boilerplate for you, so the above could be written:
|
||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
def sleep(seconds):
|
def sleep(seconds):
|
||||||
return context.make_deferred_yieldable(get_sleep_deferred(seconds))
|
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||||
|
|
||||||
|
|
||||||
Fire-and-forget
|
Fire-and-forget
|
||||||
@@ -279,7 +279,7 @@ Obviously that option means that the operations done in
|
|||||||
that might be fixed by setting a different logcontext via a ``with
|
that might be fixed by setting a different logcontext via a ``with
|
||||||
LoggingContext(...)`` in ``background_operation``).
|
LoggingContext(...)`` in ``background_operation``).
|
||||||
|
|
||||||
The second option is to use ``context.run_in_background``, which wraps a
|
The second option is to use ``logcontext.run_in_background``, which wraps a
|
||||||
function so that it doesn't reset the logcontext even when it returns an
|
function so that it doesn't reset the logcontext even when it returns an
|
||||||
incomplete deferred, and adds a callback to the returned deferred to reset the
|
incomplete deferred, and adds a callback to the returned deferred to reset the
|
||||||
logcontext. In other words, it turns a function that follows the Synapse rules
|
logcontext. In other words, it turns a function that follows the Synapse rules
|
||||||
@@ -293,7 +293,7 @@ It can be used like this:
|
|||||||
def do_request_handling():
|
def do_request_handling():
|
||||||
yield foreground_operation()
|
yield foreground_operation()
|
||||||
|
|
||||||
context.run_in_background(background_operation)
|
logcontext.run_in_background(background_operation)
|
||||||
|
|
||||||
# this will now be logged against the request context
|
# this will now be logged against the request context
|
||||||
logger.debug("Request handling complete")
|
logger.debug("Request handling complete")
|
||||||
@@ -332,7 +332,7 @@ gathered:
|
|||||||
result = yield defer.gatherResults([d1, d2])
|
result = yield defer.gatherResults([d1, d2])
|
||||||
|
|
||||||
In this case particularly, though, option two, of using
|
In this case particularly, though, option two, of using
|
||||||
``context.preserve_fn`` almost certainly makes more sense, so that
|
``logcontext.preserve_fn`` almost certainly makes more sense, so that
|
||||||
``operation1`` and ``operation2`` are both logged against the original
|
``operation1`` and ``operation2`` are both logged against the original
|
||||||
logcontext. This looks like:
|
logcontext. This looks like:
|
||||||
|
|
||||||
@@ -340,8 +340,8 @@ logcontext. This looks like:
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def do_request_handling():
|
def do_request_handling():
|
||||||
d1 = context.preserve_fn(operation1)()
|
d1 = logcontext.preserve_fn(operation1)()
|
||||||
d2 = context.preserve_fn(operation2)()
|
d2 = logcontext.preserve_fn(operation2)()
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
with PreserveLoggingContext():
|
||||||
result = yield defer.gatherResults([d1, d2])
|
result = yield defer.gatherResults([d1, d2])
|
||||||
@@ -381,7 +381,7 @@ off the background process, and then leave the ``with`` block to wait for it:
|
|||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
def handle_request(request_id):
|
def handle_request(request_id):
|
||||||
with context.LoggingContext() as request_context:
|
with logcontext.LoggingContext() as request_context:
|
||||||
request_context.request = request_id
|
request_context.request = request_id
|
||||||
d = do_request_handling()
|
d = do_request_handling()
|
||||||
|
|
||||||
@@ -414,7 +414,7 @@ runs its callbacks in the original logcontext, all is happy.
|
|||||||
|
|
||||||
The business of a Deferred which runs its callbacks in the original logcontext
|
The business of a Deferred which runs its callbacks in the original logcontext
|
||||||
isn't hard to achieve — we have it today, in the shape of
|
isn't hard to achieve — we have it today, in the shape of
|
||||||
``context._PreservingContextDeferred``:
|
``logcontext._PreservingContextDeferred``:
|
||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
|
|||||||
@@ -59,108 +59,6 @@ How to monitor Synapse metrics using Prometheus
|
|||||||
Restart Prometheus.
|
Restart Prometheus.
|
||||||
|
|
||||||
|
|
||||||
Renaming of metrics & deprecation of old names in 1.2
|
|
||||||
-----------------------------------------------------
|
|
||||||
|
|
||||||
Synapse 1.2 updates the Prometheus metrics to match the naming convention of the
|
|
||||||
upstream ``prometheus_client``. The old names are considered deprecated and will
|
|
||||||
be removed in a future version of Synapse.
|
|
||||||
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| New Name | Old Name |
|
|
||||||
+=============================================================================+=======================================================================+
|
|
||||||
| python_gc_objects_collected_total | python_gc_objects_collected |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| python_gc_objects_uncollectable_total | python_gc_objects_uncollectable |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| python_gc_collections_total | python_gc_collections |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| process_cpu_seconds_total | process_cpu_seconds |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_federation_client_sent_transactions_total | synapse_federation_client_sent_transactions |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_util_metrics_block_ru_stime_seconds_total | synapse_util_metrics_block_ru_stime_seconds |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_util_metrics_block_db_txn_count_total | synapse_util_metrics_block_db_txn_count |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_util_metrics_block_db_txn_duration_seconds_total | synapse_util_metrics_block_db_txn_duration_seconds |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_util_metrics_block_db_sched_duration_seconds_total | synapse_util_metrics_block_db_sched_duration_seconds |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_background_process_start_count_total | synapse_background_process_start_count |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_background_process_ru_utime_seconds_total | synapse_background_process_ru_utime_seconds |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_background_process_ru_stime_seconds_total | synapse_background_process_ru_stime_seconds |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_background_process_db_txn_count_total | synapse_background_process_db_txn_count |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_background_process_db_txn_duration_seconds_total | synapse_background_process_db_txn_duration_seconds |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_background_process_db_sched_duration_seconds_total | synapse_background_process_db_sched_duration_seconds |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_storage_events_persisted_events_total | synapse_storage_events_persisted_events |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_storage_events_persisted_events_sep_total | synapse_storage_events_persisted_events_sep |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_storage_events_state_delta_total | synapse_storage_events_state_delta |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_storage_events_state_delta_single_event_total | synapse_storage_events_state_delta_single_event |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_storage_events_state_delta_reuse_delta_total | synapse_storage_events_state_delta_reuse_delta |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_federation_server_received_pdus_total | synapse_federation_server_received_pdus |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_federation_server_received_edus_total | synapse_federation_server_received_edus |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_handler_presence_notified_presence_total | synapse_handler_presence_notified_presence |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_handler_presence_federation_presence_out_total | synapse_handler_presence_federation_presence_out |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_handler_presence_presence_updates_total | synapse_handler_presence_presence_updates |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_handler_presence_timers_fired_total | synapse_handler_presence_timers_fired |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_handler_presence_federation_presence_total | synapse_handler_presence_federation_presence |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_handler_presence_bump_active_time_total | synapse_handler_presence_bump_active_time |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_federation_client_sent_edus_total | synapse_federation_client_sent_edus |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_federation_client_sent_pdu_destinations_count_total | synapse_federation_client_sent_pdu_destinations:count |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_federation_client_sent_pdu_destinations_total | synapse_federation_client_sent_pdu_destinations:total |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_handlers_appservice_events_processed_total | synapse_handlers_appservice_events_processed |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_notifier_notified_events_total | synapse_notifier_notified_events |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_http_httppusher_http_pushes_processed_total | synapse_http_httppusher_http_pushes_processed |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
|
|
||||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
|
||||||
|
|
||||||
|
|
||||||
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
||||||
---------------------------------------------------------------------------------
|
---------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|||||||
@@ -1,100 +0,0 @@
|
|||||||
===========
|
|
||||||
OpenTracing
|
|
||||||
===========
|
|
||||||
|
|
||||||
Background
|
|
||||||
----------
|
|
||||||
|
|
||||||
OpenTracing is a semi-standard being adopted by a number of distributed tracing
|
|
||||||
platforms. It is a common api for facilitating vendor-agnostic tracing
|
|
||||||
instrumentation. That is, we can use the OpenTracing api and select one of a
|
|
||||||
number of tracer implementations to do the heavy lifting in the background.
|
|
||||||
Our current selected implementation is Jaeger.
|
|
||||||
|
|
||||||
OpenTracing is a tool which gives an insight into the causal relationship of
|
|
||||||
work done in and between servers. The servers each track events and report them
|
|
||||||
to a centralised server - in Synapse's case: Jaeger. The basic unit used to
|
|
||||||
represent events is the span. The span roughly represents a single piece of work
|
|
||||||
that was done and the time at which it occurred. A span can have child spans,
|
|
||||||
meaning that the work of the child had to be completed for the parent span to
|
|
||||||
complete, or it can have follow-on spans which represent work that is undertaken
|
|
||||||
as a result of the parent but is not depended on by the parent to in order to
|
|
||||||
finish.
|
|
||||||
|
|
||||||
Since this is undertaken in a distributed environment a request to another
|
|
||||||
server, such as an RPC or a simple GET, can be considered a span (a unit or
|
|
||||||
work) for the local server. This causal link is what OpenTracing aims to
|
|
||||||
capture and visualise. In order to do this metadata about the local server's
|
|
||||||
span, i.e the 'span context', needs to be included with the request to the
|
|
||||||
remote.
|
|
||||||
|
|
||||||
It is up to the remote server to decide what it does with the spans
|
|
||||||
it creates. This is called the sampling policy and it can be configured
|
|
||||||
through Jaeger's settings.
|
|
||||||
|
|
||||||
For OpenTracing concepts see
|
|
||||||
https://opentracing.io/docs/overview/what-is-tracing/.
|
|
||||||
|
|
||||||
For more information about Jaeger's implementation see
|
|
||||||
https://www.jaegertracing.io/docs/
|
|
||||||
|
|
||||||
=====================
|
|
||||||
Seting up OpenTracing
|
|
||||||
=====================
|
|
||||||
|
|
||||||
To receive OpenTracing spans, start up a Jaeger server. This can be done
|
|
||||||
using docker like so:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
docker run -d --name jaeger
|
|
||||||
-p 6831:6831/udp \
|
|
||||||
-p 6832:6832/udp \
|
|
||||||
-p 5778:5778 \
|
|
||||||
-p 16686:16686 \
|
|
||||||
-p 14268:14268 \
|
|
||||||
jaegertracing/all-in-one:1.13
|
|
||||||
|
|
||||||
Latest documentation is probably at
|
|
||||||
https://www.jaegertracing.io/docs/1.13/getting-started/
|
|
||||||
|
|
||||||
|
|
||||||
Enable OpenTracing in Synapse
|
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
OpenTracing is not enabled by default. It must be enabled in the homeserver
|
|
||||||
config by uncommenting the config options under ``opentracing`` as shown in
|
|
||||||
the `sample config <./sample_config.yaml>`_. For example:
|
|
||||||
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
opentracing:
|
|
||||||
tracer_enabled: true
|
|
||||||
homeserver_whitelist:
|
|
||||||
- "mytrustedhomeserver.org"
|
|
||||||
- "*.myotherhomeservers.com"
|
|
||||||
|
|
||||||
Homeserver whitelisting
|
|
||||||
-----------------------
|
|
||||||
|
|
||||||
The homeserver whitelist is configured using regular expressions. A list of regular
|
|
||||||
expressions can be given and their union will be compared when propagating any
|
|
||||||
spans contexts to another homeserver.
|
|
||||||
|
|
||||||
Though it's mostly safe to send and receive span contexts to and from
|
|
||||||
untrusted users since span contexts are usually opaque ids it can lead to
|
|
||||||
two problems, namely:
|
|
||||||
|
|
||||||
- If the span context is marked as sampled by the sending homeserver the receiver will
|
|
||||||
sample it. Therefore two homeservers with wildly different sampling policies
|
|
||||||
could incur higher sampling counts than intended.
|
|
||||||
- Sending servers can attach arbitrary data to spans, known as 'baggage'. For safety this has been disabled in Synapse
|
|
||||||
but that doesn't prevent another server sending you baggage which will be logged
|
|
||||||
to OpenTracing's logs.
|
|
||||||
|
|
||||||
==================
|
|
||||||
Configuring Jaeger
|
|
||||||
==================
|
|
||||||
|
|
||||||
Sampling strategies can be set as in this document:
|
|
||||||
https://www.jaegertracing.io/docs/1.13/sampling/
|
|
||||||
@@ -11,9 +11,7 @@ a postgres database.
|
|||||||
|
|
||||||
* If you are using the `matrix.org debian/ubuntu
|
* If you are using the `matrix.org debian/ubuntu
|
||||||
packages <../INSTALL.md#matrixorg-packages>`_,
|
packages <../INSTALL.md#matrixorg-packages>`_,
|
||||||
the necessary python library will already be installed, but you will need to
|
the necessary libraries will already be installed.
|
||||||
ensure the low-level postgres library is installed, which you can do with
|
|
||||||
``apt install libpq5``.
|
|
||||||
|
|
||||||
* For other pre-built packages, please consult the documentation from the
|
* For other pre-built packages, please consult the documentation from the
|
||||||
relevant package.
|
relevant package.
|
||||||
@@ -36,14 +34,9 @@ Assuming your PostgreSQL database user is called ``postgres``, create a user
|
|||||||
su - postgres
|
su - postgres
|
||||||
createuser --pwprompt synapse_user
|
createuser --pwprompt synapse_user
|
||||||
|
|
||||||
Before you can authenticate with the ``synapse_user``, you must create a
|
The PostgreSQL database used *must* have the correct encoding set, otherwise it
|
||||||
database that it can access. To create a database, first connect to the database
|
would not be able to store UTF8 strings. To create a database with the correct
|
||||||
with your database user::
|
encoding use, e.g.::
|
||||||
|
|
||||||
su - postgres
|
|
||||||
psql
|
|
||||||
|
|
||||||
and then run::
|
|
||||||
|
|
||||||
CREATE DATABASE synapse
|
CREATE DATABASE synapse
|
||||||
ENCODING 'UTF8'
|
ENCODING 'UTF8'
|
||||||
@@ -53,13 +46,7 @@ and then run::
|
|||||||
OWNER synapse_user;
|
OWNER synapse_user;
|
||||||
|
|
||||||
This would create an appropriate database named ``synapse`` owned by the
|
This would create an appropriate database named ``synapse`` owned by the
|
||||||
``synapse_user`` user (which must already have been created as above).
|
``synapse_user`` user (which must already exist).
|
||||||
|
|
||||||
Note that the PostgreSQL database *must* have the correct encoding set (as
|
|
||||||
shown above), otherwise it will not be able to store UTF8 strings.
|
|
||||||
|
|
||||||
You may need to enable password authentication so ``synapse_user`` can connect
|
|
||||||
to the database. See https://www.postgresql.org/docs/11/auth-pg-hba-conf.html.
|
|
||||||
|
|
||||||
Tuning Postgres
|
Tuning Postgres
|
||||||
===============
|
===============
|
||||||
|
|||||||
@@ -48,8 +48,6 @@ Let's assume that we expect clients to connect to our server at
|
|||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Do not add a `/` after the port in `proxy_pass`, otherwise nginx will canonicalise/normalise the URI.
|
|
||||||
|
|
||||||
* Caddy::
|
* Caddy::
|
||||||
|
|
||||||
|
|||||||
@@ -786,17 +786,6 @@ uploads_path: "DATADIR/uploads"
|
|||||||
# renew_at: 1w
|
# renew_at: 1w
|
||||||
# renew_email_subject: "Renew your %(app)s account"
|
# renew_email_subject: "Renew your %(app)s account"
|
||||||
|
|
||||||
# Time that a user's session remains valid for, after they log in.
|
|
||||||
#
|
|
||||||
# Note that this is not currently compatible with guest logins.
|
|
||||||
#
|
|
||||||
# Note also that this is calculated at login time: changes are not applied
|
|
||||||
# retrospectively to users who have already logged in.
|
|
||||||
#
|
|
||||||
# By default, this is infinite.
|
|
||||||
#
|
|
||||||
#session_lifetime: 24h
|
|
||||||
|
|
||||||
# The user must provide all of the below types of 3PID when registering.
|
# The user must provide all of the below types of 3PID when registering.
|
||||||
#
|
#
|
||||||
#registrations_require_3pid:
|
#registrations_require_3pid:
|
||||||
@@ -1406,27 +1395,3 @@ password_config:
|
|||||||
# module: "my_custom_project.SuperRulesSet"
|
# module: "my_custom_project.SuperRulesSet"
|
||||||
# config:
|
# config:
|
||||||
# example_option: 'things'
|
# example_option: 'things'
|
||||||
|
|
||||||
|
|
||||||
## Opentracing ##
|
|
||||||
|
|
||||||
# These settings enable opentracing, which implements distributed tracing.
|
|
||||||
# This allows you to observe the causal chains of events across servers
|
|
||||||
# including requests, key lookups etc., across any server running
|
|
||||||
# synapse or any other other services which supports opentracing
|
|
||||||
# (specifically those implemented with Jaeger).
|
|
||||||
#
|
|
||||||
opentracing:
|
|
||||||
# tracing is disabled by default. Uncomment the following line to enable it.
|
|
||||||
#
|
|
||||||
#enabled: true
|
|
||||||
|
|
||||||
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
|
||||||
# See docs/opentracing.rst
|
|
||||||
# This is a list of regexes which are matched against the server_name of the
|
|
||||||
# homeserver.
|
|
||||||
#
|
|
||||||
# By defult, it is empty, so no servers are matched.
|
|
||||||
#
|
|
||||||
#homeserver_whitelist:
|
|
||||||
# - ".*"
|
|
||||||
|
|||||||
@@ -14,11 +14,6 @@
|
|||||||
name = "Bugfixes"
|
name = "Bugfixes"
|
||||||
showcontent = true
|
showcontent = true
|
||||||
|
|
||||||
[[tool.towncrier.type]]
|
|
||||||
directory = "docker"
|
|
||||||
name = "Updates to the Docker image"
|
|
||||||
showcontent = true
|
|
||||||
|
|
||||||
[[tool.towncrier.type]]
|
[[tool.towncrier.type]]
|
||||||
directory = "doc"
|
directory = "doc"
|
||||||
name = "Improved Documentation"
|
name = "Improved Documentation"
|
||||||
@@ -44,8 +39,6 @@ exclude = '''
|
|||||||
| \.git # root of the project
|
| \.git # root of the project
|
||||||
| \.tox
|
| \.tox
|
||||||
| \.venv
|
| \.venv
|
||||||
| \.env
|
|
||||||
| env
|
|
||||||
| _build
|
| _build
|
||||||
| _trial_temp.*
|
| _trial_temp.*
|
||||||
| build
|
| build
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# Runs linting scripts over the local Synapse checkout
|
|
||||||
# isort - sorts import statements
|
|
||||||
# flake8 - lints and finds mistakes
|
|
||||||
# black - opinionated code formatter
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
isort -y -rc synapse tests scripts-dev scripts
|
|
||||||
flake8 synapse tests
|
|
||||||
python3 -m black synapse tests scripts-dev scripts
|
|
||||||
@@ -35,4 +35,4 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
__version__ = "1.2.1"
|
__version__ = "1.1.0rc1"
|
||||||
|
|||||||
@@ -25,13 +25,7 @@ from twisted.internet import defer
|
|||||||
import synapse.types
|
import synapse.types
|
||||||
from synapse import event_auth
|
from synapse import event_auth
|
||||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||||
from synapse.api.errors import (
|
from synapse.api.errors import AuthError, Codes, ResourceLimitError
|
||||||
AuthError,
|
|
||||||
Codes,
|
|
||||||
InvalidClientTokenError,
|
|
||||||
MissingClientTokenError,
|
|
||||||
ResourceLimitError,
|
|
||||||
)
|
|
||||||
from synapse.config.server import is_threepid_reserved
|
from synapse.config.server import is_threepid_reserved
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
||||||
@@ -69,6 +63,7 @@ class Auth(object):
|
|||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.state = hs.get_state_handler()
|
self.state = hs.get_state_handler()
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
||||||
|
|
||||||
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
||||||
register_cache("cache", "token_cache", self.token_cache)
|
register_cache("cache", "token_cache", self.token_cache)
|
||||||
@@ -194,17 +189,18 @@ class Auth(object):
|
|||||||
Returns:
|
Returns:
|
||||||
defer.Deferred: resolves to a ``synapse.types.Requester`` object
|
defer.Deferred: resolves to a ``synapse.types.Requester`` object
|
||||||
Raises:
|
Raises:
|
||||||
InvalidClientCredentialsError if no user by that token exists or the token
|
AuthError if no user by that token exists or the token is invalid.
|
||||||
is invalid.
|
|
||||||
AuthError if access is denied for the user in the access token
|
|
||||||
"""
|
"""
|
||||||
|
# Can optionally look elsewhere in the request (e.g. headers)
|
||||||
try:
|
try:
|
||||||
ip_addr = self.hs.get_ip_from_request(request)
|
ip_addr = self.hs.get_ip_from_request(request)
|
||||||
user_agent = request.requestHeaders.getRawHeaders(
|
user_agent = request.requestHeaders.getRawHeaders(
|
||||||
b"User-Agent", default=[b""]
|
b"User-Agent", default=[b""]
|
||||||
)[0].decode("ascii", "surrogateescape")
|
)[0].decode("ascii", "surrogateescape")
|
||||||
|
|
||||||
access_token = self.get_access_token_from_request(request)
|
access_token = self.get_access_token_from_request(
|
||||||
|
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||||
|
)
|
||||||
|
|
||||||
user_id, app_service = yield self._get_appservice_user_id(request)
|
user_id, app_service = yield self._get_appservice_user_id(request)
|
||||||
if user_id:
|
if user_id:
|
||||||
@@ -268,12 +264,18 @@ class Auth(object):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise MissingClientTokenError()
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||||
|
"Missing access token.",
|
||||||
|
errcode=Codes.MISSING_TOKEN,
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_appservice_user_id(self, request):
|
def _get_appservice_user_id(self, request):
|
||||||
app_service = self.store.get_app_service_by_token(
|
app_service = self.store.get_app_service_by_token(
|
||||||
self.get_access_token_from_request(request)
|
self.get_access_token_from_request(
|
||||||
|
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||||
|
)
|
||||||
)
|
)
|
||||||
if app_service is None:
|
if app_service is None:
|
||||||
defer.returnValue((None, None))
|
defer.returnValue((None, None))
|
||||||
@@ -311,25 +313,13 @@ class Auth(object):
|
|||||||
`token_id` (int|None): access token id. May be None if guest
|
`token_id` (int|None): access token id. May be None if guest
|
||||||
`device_id` (str|None): device corresponding to access token
|
`device_id` (str|None): device corresponding to access token
|
||||||
Raises:
|
Raises:
|
||||||
InvalidClientCredentialsError if no user by that token exists or the token
|
AuthError if no user by that token exists or the token is invalid.
|
||||||
is invalid.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if rights == "access":
|
if rights == "access":
|
||||||
# first look in the database
|
# first look in the database
|
||||||
r = yield self._look_up_user_by_access_token(token)
|
r = yield self._look_up_user_by_access_token(token)
|
||||||
if r:
|
if r:
|
||||||
valid_until_ms = r["valid_until_ms"]
|
|
||||||
if (
|
|
||||||
valid_until_ms is not None
|
|
||||||
and valid_until_ms < self.clock.time_msec()
|
|
||||||
):
|
|
||||||
# there was a valid access token, but it has expired.
|
|
||||||
# soft-logout the user.
|
|
||||||
raise InvalidClientTokenError(
|
|
||||||
msg="Access token has expired", soft_logout=True
|
|
||||||
)
|
|
||||||
|
|
||||||
defer.returnValue(r)
|
defer.returnValue(r)
|
||||||
|
|
||||||
# otherwise it needs to be a valid macaroon
|
# otherwise it needs to be a valid macaroon
|
||||||
@@ -341,7 +331,11 @@ class Auth(object):
|
|||||||
if not guest:
|
if not guest:
|
||||||
# non-guest access tokens must be in the database
|
# non-guest access tokens must be in the database
|
||||||
logger.warning("Unrecognised access token - not in store.")
|
logger.warning("Unrecognised access token - not in store.")
|
||||||
raise InvalidClientTokenError()
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||||
|
"Unrecognised access token.",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN,
|
||||||
|
)
|
||||||
|
|
||||||
# Guest access tokens are not stored in the database (there can
|
# Guest access tokens are not stored in the database (there can
|
||||||
# only be one access token per guest, anyway).
|
# only be one access token per guest, anyway).
|
||||||
@@ -356,10 +350,16 @@ class Auth(object):
|
|||||||
# guest tokens.
|
# guest tokens.
|
||||||
stored_user = yield self.store.get_user_by_id(user_id)
|
stored_user = yield self.store.get_user_by_id(user_id)
|
||||||
if not stored_user:
|
if not stored_user:
|
||||||
raise InvalidClientTokenError("Unknown user_id %s" % user_id)
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||||
|
"Unknown user_id %s" % user_id,
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN,
|
||||||
|
)
|
||||||
if not stored_user["is_guest"]:
|
if not stored_user["is_guest"]:
|
||||||
raise InvalidClientTokenError(
|
raise AuthError(
|
||||||
"Guest access token used for regular user"
|
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||||
|
"Guest access token used for regular user",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN,
|
||||||
)
|
)
|
||||||
ret = {
|
ret = {
|
||||||
"user": user,
|
"user": user,
|
||||||
@@ -386,7 +386,11 @@ class Auth(object):
|
|||||||
ValueError,
|
ValueError,
|
||||||
) as e:
|
) as e:
|
||||||
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
|
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
|
||||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||||
|
"Invalid macaroon passed.",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN,
|
||||||
|
)
|
||||||
|
|
||||||
def _parse_and_validate_macaroon(self, token, rights="access"):
|
def _parse_and_validate_macaroon(self, token, rights="access"):
|
||||||
"""Takes a macaroon and tries to parse and validate it. This is cached
|
"""Takes a macaroon and tries to parse and validate it. This is cached
|
||||||
@@ -426,7 +430,11 @@ class Auth(object):
|
|||||||
macaroon, rights, self.hs.config.expire_access_token, user_id=user_id
|
macaroon, rights, self.hs.config.expire_access_token, user_id=user_id
|
||||||
)
|
)
|
||||||
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||||
|
"Invalid macaroon passed.",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN,
|
||||||
|
)
|
||||||
|
|
||||||
if not has_expiry and rights == "access":
|
if not has_expiry and rights == "access":
|
||||||
self.token_cache[token] = (user_id, guest)
|
self.token_cache[token] = (user_id, guest)
|
||||||
@@ -445,14 +453,17 @@ class Auth(object):
|
|||||||
(str) user id
|
(str) user id
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
InvalidClientCredentialsError if there is no user_id caveat in the
|
AuthError if there is no user_id caveat in the macaroon
|
||||||
macaroon
|
|
||||||
"""
|
"""
|
||||||
user_prefix = "user_id = "
|
user_prefix = "user_id = "
|
||||||
for caveat in macaroon.caveats:
|
for caveat in macaroon.caveats:
|
||||||
if caveat.caveat_id.startswith(user_prefix):
|
if caveat.caveat_id.startswith(user_prefix):
|
||||||
return caveat.caveat_id[len(user_prefix) :]
|
return caveat.caveat_id[len(user_prefix) :]
|
||||||
raise InvalidClientTokenError("No user caveat in macaroon")
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||||
|
"No user caveat in macaroon",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN,
|
||||||
|
)
|
||||||
|
|
||||||
def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id):
|
def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id):
|
||||||
"""
|
"""
|
||||||
@@ -516,18 +527,26 @@ class Auth(object):
|
|||||||
"token_id": ret.get("token_id", None),
|
"token_id": ret.get("token_id", None),
|
||||||
"is_guest": False,
|
"is_guest": False,
|
||||||
"device_id": ret.get("device_id"),
|
"device_id": ret.get("device_id"),
|
||||||
"valid_until_ms": ret.get("valid_until_ms"),
|
|
||||||
}
|
}
|
||||||
defer.returnValue(user_info)
|
defer.returnValue(user_info)
|
||||||
|
|
||||||
def get_appservice_by_req(self, request):
|
def get_appservice_by_req(self, request):
|
||||||
token = self.get_access_token_from_request(request)
|
try:
|
||||||
service = self.store.get_app_service_by_token(token)
|
token = self.get_access_token_from_request(
|
||||||
if not service:
|
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||||
logger.warn("Unrecognised appservice access token.")
|
)
|
||||||
raise InvalidClientTokenError()
|
service = self.store.get_app_service_by_token(token)
|
||||||
request.authenticated_entity = service.sender
|
if not service:
|
||||||
return defer.succeed(service)
|
logger.warn("Unrecognised appservice access token.")
|
||||||
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||||
|
"Unrecognised access token.",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN,
|
||||||
|
)
|
||||||
|
request.authenticated_entity = service.sender
|
||||||
|
return defer.succeed(service)
|
||||||
|
except KeyError:
|
||||||
|
raise AuthError(self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.")
|
||||||
|
|
||||||
def is_server_admin(self, user):
|
def is_server_admin(self, user):
|
||||||
""" Check if the given user is a local server admin.
|
""" Check if the given user is a local server admin.
|
||||||
@@ -606,6 +625,21 @@ class Auth(object):
|
|||||||
|
|
||||||
defer.returnValue(auth_ids)
|
defer.returnValue(auth_ids)
|
||||||
|
|
||||||
|
def check_redaction(self, room_version, event, auth_events):
|
||||||
|
"""Check whether the event sender is allowed to redact the target event.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the the sender is allowed to redact the target event if the
|
||||||
|
target event was created by them.
|
||||||
|
False if the sender is allowed to redact the target event with no
|
||||||
|
further checks.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
AuthError if the event sender is definitely not allowed to redact
|
||||||
|
the target event.
|
||||||
|
"""
|
||||||
|
return event_auth.check_redaction(room_version, event, auth_events)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_can_change_room_list(self, room_id, user):
|
def check_can_change_room_list(self, room_id, user):
|
||||||
"""Check if the user is allowed to edit the room's entry in the
|
"""Check if the user is allowed to edit the room's entry in the
|
||||||
@@ -658,16 +692,20 @@ class Auth(object):
|
|||||||
return bool(query_params) or bool(auth_headers)
|
return bool(query_params) or bool(auth_headers)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_access_token_from_request(request):
|
def get_access_token_from_request(request, token_not_found_http_status=401):
|
||||||
"""Extracts the access_token from the request.
|
"""Extracts the access_token from the request.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
request: The http request.
|
request: The http request.
|
||||||
|
token_not_found_http_status(int): The HTTP status code to set in the
|
||||||
|
AuthError if the token isn't found. This is used in some of the
|
||||||
|
legacy APIs to change the status code to 403 from the default of
|
||||||
|
401 since some of the old clients depended on auth errors returning
|
||||||
|
403.
|
||||||
Returns:
|
Returns:
|
||||||
unicode: The access_token
|
unicode: The access_token
|
||||||
Raises:
|
Raises:
|
||||||
MissingClientTokenError: If there isn't a single access_token in the
|
AuthError: If there isn't an access_token in the request.
|
||||||
request
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||||
@@ -676,20 +714,34 @@ class Auth(object):
|
|||||||
# Try the get the access_token from a "Authorization: Bearer"
|
# Try the get the access_token from a "Authorization: Bearer"
|
||||||
# header
|
# header
|
||||||
if query_params is not None:
|
if query_params is not None:
|
||||||
raise MissingClientTokenError(
|
raise AuthError(
|
||||||
"Mixing Authorization headers and access_token query parameters."
|
token_not_found_http_status,
|
||||||
|
"Mixing Authorization headers and access_token query parameters.",
|
||||||
|
errcode=Codes.MISSING_TOKEN,
|
||||||
)
|
)
|
||||||
if len(auth_headers) > 1:
|
if len(auth_headers) > 1:
|
||||||
raise MissingClientTokenError("Too many Authorization headers.")
|
raise AuthError(
|
||||||
|
token_not_found_http_status,
|
||||||
|
"Too many Authorization headers.",
|
||||||
|
errcode=Codes.MISSING_TOKEN,
|
||||||
|
)
|
||||||
parts = auth_headers[0].split(b" ")
|
parts = auth_headers[0].split(b" ")
|
||||||
if parts[0] == b"Bearer" and len(parts) == 2:
|
if parts[0] == b"Bearer" and len(parts) == 2:
|
||||||
return parts[1].decode("ascii")
|
return parts[1].decode("ascii")
|
||||||
else:
|
else:
|
||||||
raise MissingClientTokenError("Invalid Authorization header.")
|
raise AuthError(
|
||||||
|
token_not_found_http_status,
|
||||||
|
"Invalid Authorization header.",
|
||||||
|
errcode=Codes.MISSING_TOKEN,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# Try to get the access_token from the query params.
|
# Try to get the access_token from the query params.
|
||||||
if not query_params:
|
if not query_params:
|
||||||
raise MissingClientTokenError()
|
raise AuthError(
|
||||||
|
token_not_found_http_status,
|
||||||
|
"Missing access token.",
|
||||||
|
errcode=Codes.MISSING_TOKEN,
|
||||||
|
)
|
||||||
|
|
||||||
return query_params[0].decode("ascii")
|
return query_params[0].decode("ascii")
|
||||||
|
|
||||||
|
|||||||
@@ -139,22 +139,6 @@ class ConsentNotGivenError(SynapseError):
|
|||||||
return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri)
|
return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri)
|
||||||
|
|
||||||
|
|
||||||
class UserDeactivatedError(SynapseError):
|
|
||||||
"""The error returned to the client when the user attempted to access an
|
|
||||||
authenticated endpoint, but the account has been deactivated.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, msg):
|
|
||||||
"""Constructs a UserDeactivatedError
|
|
||||||
|
|
||||||
Args:
|
|
||||||
msg (str): The human-readable error message
|
|
||||||
"""
|
|
||||||
super(UserDeactivatedError, self).__init__(
|
|
||||||
code=http_client.FORBIDDEN, msg=msg, errcode=Codes.UNKNOWN
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RegistrationError(SynapseError):
|
class RegistrationError(SynapseError):
|
||||||
"""An error raised when a registration event fails."""
|
"""An error raised when a registration event fails."""
|
||||||
|
|
||||||
@@ -226,9 +210,7 @@ class NotFoundError(SynapseError):
|
|||||||
|
|
||||||
|
|
||||||
class AuthError(SynapseError):
|
class AuthError(SynapseError):
|
||||||
"""An error raised when there was a problem authorising an event, and at various
|
"""An error raised when there was a problem authorising an event."""
|
||||||
other poorly-defined times.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
if "errcode" not in kwargs:
|
if "errcode" not in kwargs:
|
||||||
@@ -236,41 +218,6 @@ class AuthError(SynapseError):
|
|||||||
super(AuthError, self).__init__(*args, **kwargs)
|
super(AuthError, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class InvalidClientCredentialsError(SynapseError):
|
|
||||||
"""An error raised when there was a problem with the authorisation credentials
|
|
||||||
in a client request.
|
|
||||||
|
|
||||||
https://matrix.org/docs/spec/client_server/r0.5.0#using-access-tokens:
|
|
||||||
|
|
||||||
When credentials are required but missing or invalid, the HTTP call will
|
|
||||||
return with a status of 401 and the error code, M_MISSING_TOKEN or
|
|
||||||
M_UNKNOWN_TOKEN respectively.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, msg, errcode):
|
|
||||||
super().__init__(code=401, msg=msg, errcode=errcode)
|
|
||||||
|
|
||||||
|
|
||||||
class MissingClientTokenError(InvalidClientCredentialsError):
|
|
||||||
"""Raised when we couldn't find the access token in a request"""
|
|
||||||
|
|
||||||
def __init__(self, msg="Missing access token"):
|
|
||||||
super().__init__(msg=msg, errcode="M_MISSING_TOKEN")
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidClientTokenError(InvalidClientCredentialsError):
|
|
||||||
"""Raised when we didn't understand the access token in a request"""
|
|
||||||
|
|
||||||
def __init__(self, msg="Unrecognised access token", soft_logout=False):
|
|
||||||
super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN")
|
|
||||||
self._soft_logout = soft_logout
|
|
||||||
|
|
||||||
def error_dict(self):
|
|
||||||
d = super().error_dict()
|
|
||||||
d["soft_logout"] = self._soft_logout
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
class ResourceLimitError(SynapseError):
|
class ResourceLimitError(SynapseError):
|
||||||
"""
|
"""
|
||||||
Any error raised when there is a problem with resource usage.
|
Any error raised when there is a problem with resource usage.
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ from twisted.protocols.tls import TLSMemoryBIOFactory
|
|||||||
import synapse
|
import synapse
|
||||||
from synapse.app import check_bind_error
|
from synapse.app import check_bind_error
|
||||||
from synapse.crypto import context_factory
|
from synapse.crypto import context_factory
|
||||||
from synapse.logging.context import PreserveLoggingContext
|
from synapse.util import PreserveLoggingContext
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.rlimit import change_resource_limit
|
from synapse.util.rlimit import change_resource_limit
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
@@ -48,7 +48,7 @@ def register_sighup(func):
|
|||||||
_sighup_callbacks.append(func)
|
_sighup_callbacks.append(func)
|
||||||
|
|
||||||
|
|
||||||
def start_worker_reactor(appname, config, run_command=reactor.run):
|
def start_worker_reactor(appname, config):
|
||||||
""" Run the reactor in the main process
|
""" Run the reactor in the main process
|
||||||
|
|
||||||
Daemonizes if necessary, and then configures some resources, before starting
|
Daemonizes if necessary, and then configures some resources, before starting
|
||||||
@@ -57,7 +57,6 @@ def start_worker_reactor(appname, config, run_command=reactor.run):
|
|||||||
Args:
|
Args:
|
||||||
appname (str): application name which will be sent to syslog
|
appname (str): application name which will be sent to syslog
|
||||||
config (synapse.config.Config): config object
|
config (synapse.config.Config): config object
|
||||||
run_command (Callable[]): callable that actually runs the reactor
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
logger = logging.getLogger(config.worker_app)
|
logger = logging.getLogger(config.worker_app)
|
||||||
@@ -70,19 +69,11 @@ def start_worker_reactor(appname, config, run_command=reactor.run):
|
|||||||
daemonize=config.worker_daemonize,
|
daemonize=config.worker_daemonize,
|
||||||
print_pidfile=config.print_pidfile,
|
print_pidfile=config.print_pidfile,
|
||||||
logger=logger,
|
logger=logger,
|
||||||
run_command=run_command,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def start_reactor(
|
def start_reactor(
|
||||||
appname,
|
appname, soft_file_limit, gc_thresholds, pid_file, daemonize, print_pidfile, logger
|
||||||
soft_file_limit,
|
|
||||||
gc_thresholds,
|
|
||||||
pid_file,
|
|
||||||
daemonize,
|
|
||||||
print_pidfile,
|
|
||||||
logger,
|
|
||||||
run_command=reactor.run,
|
|
||||||
):
|
):
|
||||||
""" Run the reactor in the main process
|
""" Run the reactor in the main process
|
||||||
|
|
||||||
@@ -97,42 +88,38 @@ def start_reactor(
|
|||||||
daemonize (bool): true to run the reactor in a background process
|
daemonize (bool): true to run the reactor in a background process
|
||||||
print_pidfile (bool): whether to print the pid file, if daemonize is True
|
print_pidfile (bool): whether to print the pid file, if daemonize is True
|
||||||
logger (logging.Logger): logger instance to pass to Daemonize
|
logger (logging.Logger): logger instance to pass to Daemonize
|
||||||
run_command (Callable[]): callable that actually runs the reactor
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
install_dns_limiter(reactor)
|
install_dns_limiter(reactor)
|
||||||
|
|
||||||
def run():
|
def run():
|
||||||
logger.info("Running")
|
# make sure that we run the reactor with the sentinel log context,
|
||||||
change_resource_limit(soft_file_limit)
|
# otherwise other PreserveLoggingContext instances will get confused
|
||||||
if gc_thresholds:
|
# and complain when they see the logcontext arbitrarily swapping
|
||||||
gc.set_threshold(*gc_thresholds)
|
# between the sentinel and `run` logcontexts.
|
||||||
run_command()
|
with PreserveLoggingContext():
|
||||||
|
logger.info("Running")
|
||||||
|
|
||||||
# make sure that we run the reactor with the sentinel log context,
|
change_resource_limit(soft_file_limit)
|
||||||
# otherwise other PreserveLoggingContext instances will get confused
|
if gc_thresholds:
|
||||||
# and complain when they see the logcontext arbitrarily swapping
|
gc.set_threshold(*gc_thresholds)
|
||||||
# between the sentinel and `run` logcontexts.
|
reactor.run()
|
||||||
#
|
|
||||||
# We also need to drop the logcontext before forking if we're daemonizing,
|
|
||||||
# otherwise the cputime metrics get confused about the per-thread resource usage
|
|
||||||
# appearing to go backwards.
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
if daemonize:
|
|
||||||
if print_pidfile:
|
|
||||||
print(pid_file)
|
|
||||||
|
|
||||||
daemon = Daemonize(
|
if daemonize:
|
||||||
app=appname,
|
if print_pidfile:
|
||||||
pid=pid_file,
|
print(pid_file)
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
daemon = Daemonize(
|
||||||
verbose=True,
|
app=appname,
|
||||||
logger=logger,
|
pid=pid_file,
|
||||||
)
|
action=run,
|
||||||
daemon.start()
|
auto_close_fds=False,
|
||||||
else:
|
verbose=True,
|
||||||
run()
|
logger=logger,
|
||||||
|
)
|
||||||
|
daemon.start()
|
||||||
|
else:
|
||||||
|
run()
|
||||||
|
|
||||||
|
|
||||||
def quit_with_error(error_string):
|
def quit_with_error(error_string):
|
||||||
@@ -149,7 +136,8 @@ def listen_metrics(bind_addresses, port):
|
|||||||
"""
|
"""
|
||||||
Start Prometheus metrics server.
|
Start Prometheus metrics server.
|
||||||
"""
|
"""
|
||||||
from synapse.metrics import RegistryProxy, start_http_server
|
from synapse.metrics import RegistryProxy
|
||||||
|
from prometheus_client import start_http_server
|
||||||
|
|
||||||
for host in bind_addresses:
|
for host in bind_addresses:
|
||||||
logger.info("Starting metrics listener on %s:%d", host, port)
|
logger.info("Starting metrics listener on %s:%d", host, port)
|
||||||
@@ -252,9 +240,6 @@ def start(hs, listeners=None):
|
|||||||
# Load the certificate from disk.
|
# Load the certificate from disk.
|
||||||
refresh_certificate(hs)
|
refresh_certificate(hs)
|
||||||
|
|
||||||
# Start the tracer
|
|
||||||
synapse.logging.opentracing.init_tracer(hs.config)
|
|
||||||
|
|
||||||
# It is now safe to start your Synapse.
|
# It is now safe to start your Synapse.
|
||||||
hs.start_listening(listeners)
|
hs.start_listening(listeners)
|
||||||
hs.get_datastore().start_profiling()
|
hs.get_datastore().start_profiling()
|
||||||
|
|||||||
@@ -1,264 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2019 Matrix.org Foundation C.I.C.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
import argparse
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
from canonicaljson import json
|
|
||||||
|
|
||||||
from twisted.internet import defer, task
|
|
||||||
|
|
||||||
import synapse
|
|
||||||
from synapse.app import _base
|
|
||||||
from synapse.config._base import ConfigError
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.handlers.admin import ExfiltrationWriter
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
|
||||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
|
||||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
|
||||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
|
||||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
|
||||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
|
||||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
|
||||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
|
||||||
from synapse.replication.slave.storage.room import RoomStore
|
|
||||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
from synapse.storage.engines import create_engine
|
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.versionstring import get_version_string
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.admin_cmd")
|
|
||||||
|
|
||||||
|
|
||||||
class AdminCmdSlavedStore(
|
|
||||||
SlavedReceiptsStore,
|
|
||||||
SlavedAccountDataStore,
|
|
||||||
SlavedApplicationServiceStore,
|
|
||||||
SlavedRegistrationStore,
|
|
||||||
SlavedFilteringStore,
|
|
||||||
SlavedPresenceStore,
|
|
||||||
SlavedGroupServerStore,
|
|
||||||
SlavedDeviceInboxStore,
|
|
||||||
SlavedDeviceStore,
|
|
||||||
SlavedPushRuleStore,
|
|
||||||
SlavedEventStore,
|
|
||||||
SlavedClientIpStore,
|
|
||||||
RoomStore,
|
|
||||||
BaseSlavedStore,
|
|
||||||
):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class AdminCmdServer(HomeServer):
|
|
||||||
DATASTORE_CLASS = AdminCmdSlavedStore
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def build_tcp_replication(self):
|
|
||||||
return AdminCmdReplicationHandler(self)
|
|
||||||
|
|
||||||
|
|
||||||
class AdminCmdReplicationHandler(ReplicationClientHandler):
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def on_rdata(self, stream_name, token, rows):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_streams_to_replicate(self):
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def export_data_command(hs, args):
|
|
||||||
"""Export data for a user.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
hs (HomeServer)
|
|
||||||
args (argparse.Namespace)
|
|
||||||
"""
|
|
||||||
|
|
||||||
user_id = args.user_id
|
|
||||||
directory = args.output_directory
|
|
||||||
|
|
||||||
res = yield hs.get_handlers().admin_handler.export_user_data(
|
|
||||||
user_id, FileExfiltrationWriter(user_id, directory=directory)
|
|
||||||
)
|
|
||||||
print(res)
|
|
||||||
|
|
||||||
|
|
||||||
class FileExfiltrationWriter(ExfiltrationWriter):
|
|
||||||
"""An ExfiltrationWriter that writes the users data to a directory.
|
|
||||||
Returns the directory location on completion.
|
|
||||||
|
|
||||||
Note: This writes to disk on the main reactor thread.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
user_id (str): The user whose data is being exfiltrated.
|
|
||||||
directory (str|None): The directory to write the data to, if None then
|
|
||||||
will write to a temporary directory.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, user_id, directory=None):
|
|
||||||
self.user_id = user_id
|
|
||||||
|
|
||||||
if directory:
|
|
||||||
self.base_directory = directory
|
|
||||||
else:
|
|
||||||
self.base_directory = tempfile.mkdtemp(
|
|
||||||
prefix="synapse-exfiltrate__%s__" % (user_id,)
|
|
||||||
)
|
|
||||||
|
|
||||||
os.makedirs(self.base_directory, exist_ok=True)
|
|
||||||
if list(os.listdir(self.base_directory)):
|
|
||||||
raise Exception("Directory must be empty")
|
|
||||||
|
|
||||||
def write_events(self, room_id, events):
|
|
||||||
room_directory = os.path.join(self.base_directory, "rooms", room_id)
|
|
||||||
os.makedirs(room_directory, exist_ok=True)
|
|
||||||
events_file = os.path.join(room_directory, "events")
|
|
||||||
|
|
||||||
with open(events_file, "a") as f:
|
|
||||||
for event in events:
|
|
||||||
print(json.dumps(event.get_pdu_json()), file=f)
|
|
||||||
|
|
||||||
def write_state(self, room_id, event_id, state):
|
|
||||||
room_directory = os.path.join(self.base_directory, "rooms", room_id)
|
|
||||||
state_directory = os.path.join(room_directory, "state")
|
|
||||||
os.makedirs(state_directory, exist_ok=True)
|
|
||||||
|
|
||||||
event_file = os.path.join(state_directory, event_id)
|
|
||||||
|
|
||||||
with open(event_file, "a") as f:
|
|
||||||
for event in state.values():
|
|
||||||
print(json.dumps(event.get_pdu_json()), file=f)
|
|
||||||
|
|
||||||
def write_invite(self, room_id, event, state):
|
|
||||||
self.write_events(room_id, [event])
|
|
||||||
|
|
||||||
# We write the invite state somewhere else as they aren't full events
|
|
||||||
# and are only a subset of the state at the event.
|
|
||||||
room_directory = os.path.join(self.base_directory, "rooms", room_id)
|
|
||||||
os.makedirs(room_directory, exist_ok=True)
|
|
||||||
|
|
||||||
invite_state = os.path.join(room_directory, "invite_state")
|
|
||||||
|
|
||||||
with open(invite_state, "a") as f:
|
|
||||||
for event in state.values():
|
|
||||||
print(json.dumps(event), file=f)
|
|
||||||
|
|
||||||
def finished(self):
|
|
||||||
return self.base_directory
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
|
||||||
parser = argparse.ArgumentParser(description="Synapse Admin Command")
|
|
||||||
HomeServerConfig.add_arguments_to_parser(parser)
|
|
||||||
|
|
||||||
subparser = parser.add_subparsers(
|
|
||||||
title="Admin Commands",
|
|
||||||
required=True,
|
|
||||||
dest="command",
|
|
||||||
metavar="<admin_command>",
|
|
||||||
help="The admin command to perform.",
|
|
||||||
)
|
|
||||||
export_data_parser = subparser.add_parser(
|
|
||||||
"export-data", help="Export all data for a user"
|
|
||||||
)
|
|
||||||
export_data_parser.add_argument("user_id", help="User to extra data from")
|
|
||||||
export_data_parser.add_argument(
|
|
||||||
"--output-directory",
|
|
||||||
action="store",
|
|
||||||
metavar="DIRECTORY",
|
|
||||||
required=False,
|
|
||||||
help="The directory to store the exported data in. Must be empty. Defaults"
|
|
||||||
" to creating a temp directory.",
|
|
||||||
)
|
|
||||||
export_data_parser.set_defaults(func=export_data_command)
|
|
||||||
|
|
||||||
try:
|
|
||||||
config, args = HomeServerConfig.load_config_with_parser(parser, config_options)
|
|
||||||
except ConfigError as e:
|
|
||||||
sys.stderr.write("\n" + str(e) + "\n")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if config.worker_app is not None:
|
|
||||||
assert config.worker_app == "synapse.app.admin_cmd"
|
|
||||||
|
|
||||||
# Update the config with some basic overrides so that don't have to specify
|
|
||||||
# a full worker config.
|
|
||||||
config.worker_app = "synapse.app.admin_cmd"
|
|
||||||
|
|
||||||
if (
|
|
||||||
not config.worker_daemonize
|
|
||||||
and not config.worker_log_file
|
|
||||||
and not config.worker_log_config
|
|
||||||
):
|
|
||||||
# Since we're meant to be run as a "command" let's not redirect stdio
|
|
||||||
# unless we've actually set log config.
|
|
||||||
config.no_redirect_stdio = True
|
|
||||||
|
|
||||||
# Explicitly disable background processes
|
|
||||||
config.update_user_directory = False
|
|
||||||
config.start_pushers = False
|
|
||||||
config.send_federation = False
|
|
||||||
|
|
||||||
setup_logging(config, use_worker_options=True)
|
|
||||||
|
|
||||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
|
||||||
|
|
||||||
ss = AdminCmdServer(
|
|
||||||
config.server_name,
|
|
||||||
db_config=config.database_config,
|
|
||||||
config=config,
|
|
||||||
version_string="Synapse/" + get_version_string(synapse),
|
|
||||||
database_engine=database_engine,
|
|
||||||
)
|
|
||||||
|
|
||||||
ss.setup()
|
|
||||||
|
|
||||||
# We use task.react as the basic run command as it correctly handles tearing
|
|
||||||
# down the reactor when the deferreds resolve and setting the return value.
|
|
||||||
# We also make sure that `_base.start` gets run before we actually run the
|
|
||||||
# command.
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def run(_reactor):
|
|
||||||
with LoggingContext("command"):
|
|
||||||
yield _base.start(ss, [])
|
|
||||||
yield args.func(ss, args)
|
|
||||||
|
|
||||||
_base.start_worker_reactor(
|
|
||||||
"synapse-admin-cmd", config, run_command=lambda: task.react(run)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
with LoggingContext("main"):
|
|
||||||
start(sys.argv[1:])
|
|
||||||
@@ -26,8 +26,8 @@ from synapse.config._base import ConfigError
|
|||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext, run_in_background
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
@@ -36,6 +36,7 @@ from synapse.replication.tcp.client import ReplicationClientHandler
|
|||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -27,8 +27,8 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
@@ -64,6 +64,7 @@ from synapse.rest.client.versions import VersionsRestServlet
|
|||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -27,8 +27,8 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
@@ -59,6 +59,7 @@ from synapse.server import HomeServer
|
|||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.user_directory import UserDirectoryStore
|
from synapse.storage.user_directory import UserDirectoryStore
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
@@ -48,6 +48,7 @@ from synapse.rest.key.v2 import KeyApiV2Resource
|
|||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -27,9 +27,9 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.federation import send_queue
|
from synapse.federation import send_queue
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext, run_in_background
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
@@ -44,6 +44,7 @@ from synapse.storage.engines import create_engine
|
|||||||
from synapse.types import ReadReceipt
|
from synapse.types import ReadReceipt
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -29,8 +29,8 @@ from synapse.config.logger import setup_logging
|
|||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
@@ -41,6 +41,7 @@ from synapse.rest.client.v2_alpha._base import client_patterns
|
|||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -54,9 +54,9 @@ from synapse.federation.transport.server import TransportLayerServer
|
|||||||
from synapse.http.additional_resource import AdditionalResource
|
from synapse.http.additional_resource import AdditionalResource
|
||||||
from synapse.http.server import RootRedirect
|
from synapse.http.server import RootRedirect
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.module_api import ModuleApi
|
from synapse.module_api import ModuleApi
|
||||||
from synapse.python_dependencies import check_requirements
|
from synapse.python_dependencies import check_requirements
|
||||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||||
@@ -72,6 +72,7 @@ from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
|||||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.module_loader import load_module
|
from synapse.util.module_loader import load_module
|
||||||
from synapse.util.rlimit import change_resource_limit
|
from synapse.util.rlimit import change_resource_limit
|
||||||
|
|||||||
@@ -27,8 +27,8 @@ from synapse.config._base import ConfigError
|
|||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
@@ -40,6 +40,7 @@ from synapse.server import HomeServer
|
|||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.media_repository import MediaRepositoryStore
|
from synapse.storage.media_repository import MediaRepositoryStore
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -26,8 +26,8 @@ from synapse.config._base import ConfigError
|
|||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext, run_in_background
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import __func__
|
from synapse.replication.slave.storage._base import __func__
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
@@ -38,6 +38,7 @@ from synapse.server import HomeServer
|
|||||||
from synapse.storage import DataStore
|
from synapse.storage import DataStore
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -31,8 +31,8 @@ from synapse.config.logger import setup_logging
|
|||||||
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext, run_in_background
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
@@ -57,6 +57,7 @@ from synapse.server import HomeServer
|
|||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.presence import UserPresenceState
|
from synapse.storage.presence import UserPresenceState
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.stringutils import random_string
|
from synapse.util.stringutils import random_string
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext, run_in_background
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
@@ -46,6 +46,7 @@ from synapse.storage.engines import create_engine
|
|||||||
from synapse.storage.user_directory import UserDirectoryStore
|
from synapse.storage.user_directory import UserDirectoryStore
|
||||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -53,8 +53,8 @@ import logging
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.appservice import ApplicationServiceState
|
from synapse.appservice import ApplicationServiceState
|
||||||
from synapse.logging.context import run_in_background
|
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
|
from synapse.util.logcontext import run_in_background
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -137,42 +137,12 @@ class Config(object):
|
|||||||
return file_stream.read()
|
return file_stream.read()
|
||||||
|
|
||||||
def invoke_all(self, name, *args, **kargs):
|
def invoke_all(self, name, *args, **kargs):
|
||||||
"""Invoke all instance methods with the given name and arguments in the
|
|
||||||
class's MRO.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
name (str): Name of function to invoke
|
|
||||||
*args
|
|
||||||
**kwargs
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
list: The list of the return values from each method called
|
|
||||||
"""
|
|
||||||
results = []
|
results = []
|
||||||
for cls in type(self).mro():
|
for cls in type(self).mro():
|
||||||
if name in cls.__dict__:
|
if name in cls.__dict__:
|
||||||
results.append(getattr(cls, name)(self, *args, **kargs))
|
results.append(getattr(cls, name)(self, *args, **kargs))
|
||||||
return results
|
return results
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def invoke_all_static(cls, name, *args, **kargs):
|
|
||||||
"""Invoke all static methods with the given name and arguments in the
|
|
||||||
class's MRO.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
name (str): Name of function to invoke
|
|
||||||
*args
|
|
||||||
**kwargs
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
list: The list of the return values from each method called
|
|
||||||
"""
|
|
||||||
results = []
|
|
||||||
for c in cls.mro():
|
|
||||||
if name in c.__dict__:
|
|
||||||
results.append(getattr(c, name)(*args, **kargs))
|
|
||||||
return results
|
|
||||||
|
|
||||||
def generate_config(
|
def generate_config(
|
||||||
self,
|
self,
|
||||||
config_dir_path,
|
config_dir_path,
|
||||||
@@ -232,23 +202,6 @@ class Config(object):
|
|||||||
Returns: Config object.
|
Returns: Config object.
|
||||||
"""
|
"""
|
||||||
config_parser = argparse.ArgumentParser(description=description)
|
config_parser = argparse.ArgumentParser(description=description)
|
||||||
cls.add_arguments_to_parser(config_parser)
|
|
||||||
obj, _ = cls.load_config_with_parser(config_parser, argv)
|
|
||||||
|
|
||||||
return obj
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def add_arguments_to_parser(cls, config_parser):
|
|
||||||
"""Adds all the config flags to an ArgumentParser.
|
|
||||||
|
|
||||||
Doesn't support config-file-generation: used by the worker apps.
|
|
||||||
|
|
||||||
Used for workers where we want to add extra flags/subcommands.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config_parser (ArgumentParser): App description
|
|
||||||
"""
|
|
||||||
|
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"-c",
|
"-c",
|
||||||
"--config-path",
|
"--config-path",
|
||||||
@@ -266,34 +219,16 @@ class Config(object):
|
|||||||
" Defaults to the directory containing the last config file",
|
" Defaults to the directory containing the last config file",
|
||||||
)
|
)
|
||||||
|
|
||||||
cls.invoke_all_static("add_arguments", config_parser)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def load_config_with_parser(cls, parser, argv):
|
|
||||||
"""Parse the commandline and config files with the given parser
|
|
||||||
|
|
||||||
Doesn't support config-file-generation: used by the worker apps.
|
|
||||||
|
|
||||||
Used for workers where we want to add extra flags/subcommands.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
parser (ArgumentParser)
|
|
||||||
argv (list[str])
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
tuple[HomeServerConfig, argparse.Namespace]: Returns the parsed
|
|
||||||
config object and the parsed argparse.Namespace object from
|
|
||||||
`parser.parse_args(..)`
|
|
||||||
"""
|
|
||||||
|
|
||||||
obj = cls()
|
obj = cls()
|
||||||
|
|
||||||
config_args = parser.parse_args(argv)
|
obj.invoke_all("add_arguments", config_parser)
|
||||||
|
|
||||||
|
config_args = config_parser.parse_args(argv)
|
||||||
|
|
||||||
config_files = find_config_files(search_paths=config_args.config_path)
|
config_files = find_config_files(search_paths=config_args.config_path)
|
||||||
|
|
||||||
if not config_files:
|
if not config_files:
|
||||||
parser.error("Must supply a config file.")
|
config_parser.error("Must supply a config file.")
|
||||||
|
|
||||||
if config_args.keys_directory:
|
if config_args.keys_directory:
|
||||||
config_dir_path = config_args.keys_directory
|
config_dir_path = config_args.keys_directory
|
||||||
@@ -309,7 +244,7 @@ class Config(object):
|
|||||||
|
|
||||||
obj.invoke_all("read_arguments", config_args)
|
obj.invoke_all("read_arguments", config_args)
|
||||||
|
|
||||||
return obj, config_args
|
return obj
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load_or_generate_config(cls, description, argv):
|
def load_or_generate_config(cls, description, argv):
|
||||||
@@ -466,7 +401,7 @@ class Config(object):
|
|||||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
)
|
)
|
||||||
|
|
||||||
obj.invoke_all_static("add_arguments", parser)
|
obj.invoke_all("add_arguments", parser)
|
||||||
args = parser.parse_args(remaining_args)
|
args = parser.parse_args(remaining_args)
|
||||||
|
|
||||||
config_dict = read_config_files(config_files)
|
config_dict = read_config_files(config_files)
|
||||||
|
|||||||
@@ -69,8 +69,7 @@ class DatabaseConfig(Config):
|
|||||||
if database_path is not None:
|
if database_path is not None:
|
||||||
self.database_config["args"]["database"] = database_path
|
self.database_config["args"]["database"] = database_path
|
||||||
|
|
||||||
@staticmethod
|
def add_arguments(self, parser):
|
||||||
def add_arguments(parser):
|
|
||||||
db_group = parser.add_argument_group("database")
|
db_group = parser.add_argument_group("database")
|
||||||
db_group.add_argument(
|
db_group.add_argument(
|
||||||
"-d",
|
"-d",
|
||||||
|
|||||||
@@ -112,17 +112,13 @@ class EmailConfig(Config):
|
|||||||
missing = []
|
missing = []
|
||||||
for k in required:
|
for k in required:
|
||||||
if k not in email_config:
|
if k not in email_config:
|
||||||
missing.append("email." + k)
|
missing.append(k)
|
||||||
|
|
||||||
if config.get("public_baseurl") is None:
|
|
||||||
missing.append("public_base_url")
|
|
||||||
|
|
||||||
if len(missing) > 0:
|
if len(missing) > 0:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
"Password resets emails are configured to be sent from "
|
"email.password_reset_behaviour is set to 'local' "
|
||||||
"this homeserver due to a partial 'email' block. "
|
"but required keys are missing: %s"
|
||||||
"However, the following required keys are missing: %s"
|
% (", ".join(["email." + k for k in missing]),)
|
||||||
% (", ".join(missing),)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Templates for password reset emails
|
# Templates for password reset emails
|
||||||
@@ -160,6 +156,13 @@ class EmailConfig(Config):
|
|||||||
filepath, "email.password_reset_template_success_html"
|
filepath, "email.password_reset_template_success_html"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if config.get("public_baseurl") is None:
|
||||||
|
raise RuntimeError(
|
||||||
|
"email.password_reset_behaviour is set to 'local' but no "
|
||||||
|
"public_baseurl is set. This is necessary to generate password "
|
||||||
|
"reset links"
|
||||||
|
)
|
||||||
|
|
||||||
if self.email_enable_notifs:
|
if self.email_enable_notifs:
|
||||||
required = [
|
required = [
|
||||||
"smtp_host",
|
"smtp_host",
|
||||||
|
|||||||
@@ -40,7 +40,6 @@ from .spam_checker import SpamCheckerConfig
|
|||||||
from .stats import StatsConfig
|
from .stats import StatsConfig
|
||||||
from .third_party_event_rules import ThirdPartyRulesConfig
|
from .third_party_event_rules import ThirdPartyRulesConfig
|
||||||
from .tls import TlsConfig
|
from .tls import TlsConfig
|
||||||
from .tracer import TracerConfig
|
|
||||||
from .user_directory import UserDirectoryConfig
|
from .user_directory import UserDirectoryConfig
|
||||||
from .voip import VoipConfig
|
from .voip import VoipConfig
|
||||||
from .workers import WorkerConfig
|
from .workers import WorkerConfig
|
||||||
@@ -76,6 +75,5 @@ class HomeServerConfig(
|
|||||||
ServerNoticesConfig,
|
ServerNoticesConfig,
|
||||||
RoomDirectoryConfig,
|
RoomDirectoryConfig,
|
||||||
ThirdPartyRulesConfig,
|
ThirdPartyRulesConfig,
|
||||||
TracerConfig,
|
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ from twisted.logger import STDLibLogObserver, globalLogBeginner
|
|||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
from synapse.app import _base as appbase
|
from synapse.app import _base as appbase
|
||||||
from synapse.logging.context import LoggingContextFilter
|
from synapse.util.logcontext import LoggingContextFilter
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
from ._base import Config
|
from ._base import Config
|
||||||
@@ -40,7 +40,7 @@ formatters:
|
|||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.logging.context.LoggingContextFilter
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
@@ -103,8 +103,7 @@ class LoggingConfig(Config):
|
|||||||
if args.log_file is not None:
|
if args.log_file is not None:
|
||||||
self.log_file = args.log_file
|
self.log_file = args.log_file
|
||||||
|
|
||||||
@staticmethod
|
def add_arguments(cls, parser):
|
||||||
def add_arguments(parser):
|
|
||||||
logging_group = parser.add_argument_group("logging")
|
logging_group = parser.add_argument_group("logging")
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
"-v",
|
"-v",
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ class RateLimitConfig(object):
|
|||||||
|
|
||||||
class FederationRateLimitConfig(object):
|
class FederationRateLimitConfig(object):
|
||||||
_items_and_default = {
|
_items_and_default = {
|
||||||
"window_size": 1000,
|
"window_size": 10000,
|
||||||
"sleep_limit": 10,
|
"sleep_limit": 10,
|
||||||
"sleep_delay": 500,
|
"sleep_delay": 500,
|
||||||
"reject_limit": 50,
|
"reject_limit": 50,
|
||||||
@@ -54,7 +54,7 @@ class RatelimitConfig(Config):
|
|||||||
|
|
||||||
# Load the new-style federation config, if it exists. Otherwise, fall
|
# Load the new-style federation config, if it exists. Otherwise, fall
|
||||||
# back to the old method.
|
# back to the old method.
|
||||||
if "rc_federation" in config:
|
if "federation_rc" in config:
|
||||||
self.rc_federation = FederationRateLimitConfig(**config["rc_federation"])
|
self.rc_federation = FederationRateLimitConfig(**config["rc_federation"])
|
||||||
else:
|
else:
|
||||||
self.rc_federation = FederationRateLimitConfig(
|
self.rc_federation = FederationRateLimitConfig(
|
||||||
|
|||||||
@@ -71,8 +71,9 @@ class RegistrationConfig(Config):
|
|||||||
self.default_identity_server = config.get("default_identity_server")
|
self.default_identity_server = config.get("default_identity_server")
|
||||||
self.allow_guest_access = config.get("allow_guest_access", False)
|
self.allow_guest_access = config.get("allow_guest_access", False)
|
||||||
|
|
||||||
if config.get("invite_3pid_guest", False):
|
self.invite_3pid_guest = self.allow_guest_access and config.get(
|
||||||
raise ConfigError("invite_3pid_guest is no longer supported")
|
"invite_3pid_guest", False
|
||||||
|
)
|
||||||
|
|
||||||
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
||||||
for room_alias in self.auto_join_rooms:
|
for room_alias in self.auto_join_rooms:
|
||||||
@@ -84,11 +85,6 @@ class RegistrationConfig(Config):
|
|||||||
"disable_msisdn_registration", False
|
"disable_msisdn_registration", False
|
||||||
)
|
)
|
||||||
|
|
||||||
session_lifetime = config.get("session_lifetime")
|
|
||||||
if session_lifetime is not None:
|
|
||||||
session_lifetime = self.parse_duration(session_lifetime)
|
|
||||||
self.session_lifetime = session_lifetime
|
|
||||||
|
|
||||||
def generate_config_section(self, generate_secrets=False, **kwargs):
|
def generate_config_section(self, generate_secrets=False, **kwargs):
|
||||||
if generate_secrets:
|
if generate_secrets:
|
||||||
registration_shared_secret = 'registration_shared_secret: "%s"' % (
|
registration_shared_secret = 'registration_shared_secret: "%s"' % (
|
||||||
@@ -146,17 +142,6 @@ class RegistrationConfig(Config):
|
|||||||
# renew_at: 1w
|
# renew_at: 1w
|
||||||
# renew_email_subject: "Renew your %%(app)s account"
|
# renew_email_subject: "Renew your %%(app)s account"
|
||||||
|
|
||||||
# Time that a user's session remains valid for, after they log in.
|
|
||||||
#
|
|
||||||
# Note that this is not currently compatible with guest logins.
|
|
||||||
#
|
|
||||||
# Note also that this is calculated at login time: changes are not applied
|
|
||||||
# retrospectively to users who have already logged in.
|
|
||||||
#
|
|
||||||
# By default, this is infinite.
|
|
||||||
#
|
|
||||||
#session_lifetime: 24h
|
|
||||||
|
|
||||||
# The user must provide all of the below types of 3PID when registering.
|
# The user must provide all of the below types of 3PID when registering.
|
||||||
#
|
#
|
||||||
#registrations_require_3pid:
|
#registrations_require_3pid:
|
||||||
@@ -237,8 +222,7 @@ class RegistrationConfig(Config):
|
|||||||
% locals()
|
% locals()
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
def add_arguments(self, parser):
|
||||||
def add_arguments(parser):
|
|
||||||
reg_group = parser.add_argument_group("registration")
|
reg_group = parser.add_argument_group("registration")
|
||||||
reg_group.add_argument(
|
reg_group.add_argument(
|
||||||
"--enable-registration",
|
"--enable-registration",
|
||||||
|
|||||||
@@ -136,7 +136,7 @@ class ServerConfig(Config):
|
|||||||
|
|
||||||
# Whether to enable experimental MSC1849 (aka relations) support
|
# Whether to enable experimental MSC1849 (aka relations) support
|
||||||
self.experimental_msc1849_support_enabled = config.get(
|
self.experimental_msc1849_support_enabled = config.get(
|
||||||
"experimental_msc1849_support_enabled", True
|
"experimental_msc1849_support_enabled", False
|
||||||
)
|
)
|
||||||
|
|
||||||
# Options to control access by tracking MAU
|
# Options to control access by tracking MAU
|
||||||
@@ -639,8 +639,7 @@ class ServerConfig(Config):
|
|||||||
if args.print_pidfile is not None:
|
if args.print_pidfile is not None:
|
||||||
self.print_pidfile = args.print_pidfile
|
self.print_pidfile = args.print_pidfile
|
||||||
|
|
||||||
@staticmethod
|
def add_arguments(self, parser):
|
||||||
def add_arguments(parser):
|
|
||||||
server_group = parser.add_argument_group("server")
|
server_group = parser.add_argument_group("server")
|
||||||
server_group.add_argument(
|
server_group.add_argument(
|
||||||
"-D",
|
"-D",
|
||||||
|
|||||||
@@ -1,59 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2019 The Matrix.org Foundation C.I.C.d
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from ._base import Config, ConfigError
|
|
||||||
|
|
||||||
|
|
||||||
class TracerConfig(Config):
|
|
||||||
def read_config(self, config, **kwargs):
|
|
||||||
opentracing_config = config.get("opentracing")
|
|
||||||
if opentracing_config is None:
|
|
||||||
opentracing_config = {}
|
|
||||||
|
|
||||||
self.opentracer_enabled = opentracing_config.get("enabled", False)
|
|
||||||
if not self.opentracer_enabled:
|
|
||||||
return
|
|
||||||
|
|
||||||
# The tracer is enabled so sanitize the config
|
|
||||||
|
|
||||||
self.opentracer_whitelist = opentracing_config.get("homeserver_whitelist", [])
|
|
||||||
if not isinstance(self.opentracer_whitelist, list):
|
|
||||||
raise ConfigError("Tracer homeserver_whitelist config is malformed")
|
|
||||||
|
|
||||||
def generate_config_section(cls, **kwargs):
|
|
||||||
return """\
|
|
||||||
## Opentracing ##
|
|
||||||
|
|
||||||
# These settings enable opentracing, which implements distributed tracing.
|
|
||||||
# This allows you to observe the causal chains of events across servers
|
|
||||||
# including requests, key lookups etc., across any server running
|
|
||||||
# synapse or any other other services which supports opentracing
|
|
||||||
# (specifically those implemented with Jaeger).
|
|
||||||
#
|
|
||||||
opentracing:
|
|
||||||
# tracing is disabled by default. Uncomment the following line to enable it.
|
|
||||||
#
|
|
||||||
#enabled: true
|
|
||||||
|
|
||||||
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
|
||||||
# See docs/opentracing.rst
|
|
||||||
# This is a list of regexes which are matched against the server_name of the
|
|
||||||
# homeserver.
|
|
||||||
#
|
|
||||||
# By defult, it is empty, so no servers are matched.
|
|
||||||
#
|
|
||||||
#homeserver_whitelist:
|
|
||||||
# - ".*"
|
|
||||||
"""
|
|
||||||
@@ -44,16 +44,15 @@ from synapse.api.errors import (
|
|||||||
RequestSendFailed,
|
RequestSendFailed,
|
||||||
SynapseError,
|
SynapseError,
|
||||||
)
|
)
|
||||||
from synapse.logging.context import (
|
from synapse.storage.keys import FetchKeyResult
|
||||||
|
from synapse.util import logcontext, unwrapFirstError
|
||||||
|
from synapse.util.async_helpers import yieldable_gather_results
|
||||||
|
from synapse.util.logcontext import (
|
||||||
LoggingContext,
|
LoggingContext,
|
||||||
PreserveLoggingContext,
|
PreserveLoggingContext,
|
||||||
make_deferred_yieldable,
|
|
||||||
preserve_fn,
|
preserve_fn,
|
||||||
run_in_background,
|
run_in_background,
|
||||||
)
|
)
|
||||||
from synapse.storage.keys import FetchKeyResult
|
|
||||||
from synapse.util import unwrapFirstError
|
|
||||||
from synapse.util.async_helpers import yieldable_gather_results
|
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
from synapse.util.retryutils import NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
|
|
||||||
@@ -141,7 +140,7 @@ class Keyring(object):
|
|||||||
"""
|
"""
|
||||||
req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
|
req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
|
||||||
requests = (req,)
|
requests = (req,)
|
||||||
return make_deferred_yieldable(self._verify_objects(requests)[0])
|
return logcontext.make_deferred_yieldable(self._verify_objects(requests)[0])
|
||||||
|
|
||||||
def verify_json_objects_for_server(self, server_and_json):
|
def verify_json_objects_for_server(self, server_and_json):
|
||||||
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
||||||
@@ -558,7 +557,7 @@ class BaseV2KeyFetcher(object):
|
|||||||
|
|
||||||
signed_key_json_bytes = encode_canonical_json(signed_key_json)
|
signed_key_json_bytes = encode_canonical_json(signed_key_json)
|
||||||
|
|
||||||
yield make_deferred_yieldable(
|
yield logcontext.make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[
|
[
|
||||||
run_in_background(
|
run_in_background(
|
||||||
@@ -613,7 +612,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
|||||||
|
|
||||||
defer.returnValue({})
|
defer.returnValue({})
|
||||||
|
|
||||||
results = yield make_deferred_yieldable(
|
results = yield logcontext.make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[run_in_background(get_key, server) for server in self.key_servers],
|
[run_in_background(get_key, server) for server in self.key_servers],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
|
|||||||
@@ -104,17 +104,6 @@ class _EventInternalMetadata(object):
|
|||||||
"""
|
"""
|
||||||
return getattr(self, "proactively_send", True)
|
return getattr(self, "proactively_send", True)
|
||||||
|
|
||||||
def is_redacted(self):
|
|
||||||
"""Whether the event has been redacted.
|
|
||||||
|
|
||||||
This is used for efficiently checking whether an event has been
|
|
||||||
marked as redacted without needing to make another database call.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool
|
|
||||||
"""
|
|
||||||
return getattr(self, "redacted", False)
|
|
||||||
|
|
||||||
|
|
||||||
def _event_dict_property(key):
|
def _event_dict_property(key):
|
||||||
# We want to be able to use hasattr with the event dict properties.
|
# We want to be able to use hasattr with the event dict properties.
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ from frozendict import frozendict
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||||
|
|
||||||
|
|
||||||
class EventContext(object):
|
class EventContext(object):
|
||||||
|
|||||||
@@ -52,15 +52,10 @@ def prune_event(event):
|
|||||||
|
|
||||||
from . import event_type_from_format_version
|
from . import event_type_from_format_version
|
||||||
|
|
||||||
pruned_event = event_type_from_format_version(event.format_version)(
|
return event_type_from_format_version(event.format_version)(
|
||||||
pruned_event_dict, event.internal_metadata.get_dict()
|
pruned_event_dict, event.internal_metadata.get_dict()
|
||||||
)
|
)
|
||||||
|
|
||||||
# Mark the event as redacted
|
|
||||||
pruned_event.internal_metadata.redacted = True
|
|
||||||
|
|
||||||
return pruned_event
|
|
||||||
|
|
||||||
|
|
||||||
def prune_event_dict(event_dict):
|
def prune_event_dict(event_dict):
|
||||||
"""Redacts the event_dict in the same way as `prune_event`, except it
|
"""Redacts the event_dict in the same way as `prune_event`, except it
|
||||||
@@ -365,12 +360,9 @@ class EventClientSerializer(object):
|
|||||||
event_id = event.event_id
|
event_id = event.event_id
|
||||||
serialized_event = serialize_event(event, time_now, **kwargs)
|
serialized_event = serialize_event(event, time_now, **kwargs)
|
||||||
|
|
||||||
# If MSC1849 is enabled then we need to look if there are any relations
|
# If MSC1849 is enabled then we need to look if thre are any relations
|
||||||
# we need to bundle in with the event.
|
# we need to bundle in with the event
|
||||||
# Do not bundle relations if the event has been redacted
|
if self.experimental_msc1849_support_enabled and bundle_aggregations:
|
||||||
if not event.internal_metadata.is_redacted() and (
|
|
||||||
self.experimental_msc1849_support_enabled and bundle_aggregations
|
|
||||||
):
|
|
||||||
annotations = yield self.store.get_aggregation_groups_for_event(event_id)
|
annotations = yield self.store.get_aggregation_groups_for_event(event_id)
|
||||||
references = yield self.store.get_relations_for_event(
|
references = yield self.store.get_relations_for_event(
|
||||||
event_id, RelationTypes.REFERENCE, direction="f"
|
event_id, RelationTypes.REFERENCE, direction="f"
|
||||||
@@ -400,11 +392,7 @@ class EventClientSerializer(object):
|
|||||||
serialized_event["content"].pop("m.relates_to", None)
|
serialized_event["content"].pop("m.relates_to", None)
|
||||||
|
|
||||||
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
||||||
r[RelationTypes.REPLACE] = {
|
r[RelationTypes.REPLACE] = {"event_id": edit.event_id}
|
||||||
"event_id": edit.event_id,
|
|
||||||
"origin_server_ts": edit.origin_server_ts,
|
|
||||||
"sender": edit.sender,
|
|
||||||
}
|
|
||||||
|
|
||||||
defer.returnValue(serialized_event)
|
defer.returnValue(serialized_event)
|
||||||
|
|
||||||
|
|||||||
@@ -27,14 +27,8 @@ from synapse.crypto.event_signing import check_event_content_hash
|
|||||||
from synapse.events import event_type_from_format_version
|
from synapse.events import event_type_from_format_version
|
||||||
from synapse.events.utils import prune_event
|
from synapse.events.utils import prune_event
|
||||||
from synapse.http.servlet import assert_params_in_dict
|
from synapse.http.servlet import assert_params_in_dict
|
||||||
from synapse.logging.context import (
|
|
||||||
LoggingContext,
|
|
||||||
PreserveLoggingContext,
|
|
||||||
make_deferred_yieldable,
|
|
||||||
preserve_fn,
|
|
||||||
)
|
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
from synapse.util import unwrapFirstError
|
from synapse.util import logcontext, unwrapFirstError
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -79,7 +73,7 @@ class FederationBase(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_check_result(pdu, deferred):
|
def handle_check_result(pdu, deferred):
|
||||||
try:
|
try:
|
||||||
res = yield make_deferred_yieldable(deferred)
|
res = yield logcontext.make_deferred_yieldable(deferred)
|
||||||
except SynapseError:
|
except SynapseError:
|
||||||
res = None
|
res = None
|
||||||
|
|
||||||
@@ -108,10 +102,10 @@ class FederationBase(object):
|
|||||||
|
|
||||||
defer.returnValue(res)
|
defer.returnValue(res)
|
||||||
|
|
||||||
handle = preserve_fn(handle_check_result)
|
handle = logcontext.preserve_fn(handle_check_result)
|
||||||
deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
|
deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
|
||||||
|
|
||||||
valid_pdus = yield make_deferred_yieldable(
|
valid_pdus = yield logcontext.make_deferred_yieldable(
|
||||||
defer.gatherResults(deferreds2, consumeErrors=True)
|
defer.gatherResults(deferreds2, consumeErrors=True)
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError)
|
||||||
|
|
||||||
@@ -121,7 +115,7 @@ class FederationBase(object):
|
|||||||
defer.returnValue([p for p in valid_pdus if p])
|
defer.returnValue([p for p in valid_pdus if p])
|
||||||
|
|
||||||
def _check_sigs_and_hash(self, room_version, pdu):
|
def _check_sigs_and_hash(self, room_version, pdu):
|
||||||
return make_deferred_yieldable(
|
return logcontext.make_deferred_yieldable(
|
||||||
self._check_sigs_and_hashes(room_version, [pdu])[0]
|
self._check_sigs_and_hashes(room_version, [pdu])[0]
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -139,14 +133,14 @@ class FederationBase(object):
|
|||||||
* returns a redacted version of the event (if the signature
|
* returns a redacted version of the event (if the signature
|
||||||
matched but the hash did not)
|
matched but the hash did not)
|
||||||
* throws a SynapseError if the signature check failed.
|
* throws a SynapseError if the signature check failed.
|
||||||
The deferreds run their callbacks in the sentinel
|
The deferreds run their callbacks in the sentinel logcontext.
|
||||||
"""
|
"""
|
||||||
deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
|
deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
|
||||||
|
|
||||||
ctx = LoggingContext.current_context()
|
ctx = logcontext.LoggingContext.current_context()
|
||||||
|
|
||||||
def callback(_, pdu):
|
def callback(_, pdu):
|
||||||
with PreserveLoggingContext(ctx):
|
with logcontext.PreserveLoggingContext(ctx):
|
||||||
if not check_event_content_hash(pdu):
|
if not check_event_content_hash(pdu):
|
||||||
# let's try to distinguish between failures because the event was
|
# let's try to distinguish between failures because the event was
|
||||||
# redacted (which are somewhat expected) vs actual ball-tampering
|
# redacted (which are somewhat expected) vs actual ball-tampering
|
||||||
@@ -184,7 +178,7 @@ class FederationBase(object):
|
|||||||
|
|
||||||
def errback(failure, pdu):
|
def errback(failure, pdu):
|
||||||
failure.trap(SynapseError)
|
failure.trap(SynapseError)
|
||||||
with PreserveLoggingContext(ctx):
|
with logcontext.PreserveLoggingContext(ctx):
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Signature check failed for %s: %s",
|
"Signature check failed for %s: %s",
|
||||||
pdu.event_id,
|
pdu.event_id,
|
||||||
|
|||||||
@@ -39,10 +39,10 @@ from synapse.api.room_versions import (
|
|||||||
)
|
)
|
||||||
from synapse.events import builder, room_version_to_event_format
|
from synapse.events import builder, room_version_to_event_format
|
||||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
from synapse.util import logcontext, unwrapFirstError
|
||||||
from synapse.logging.utils import log_function
|
|
||||||
from synapse.util import unwrapFirstError
|
|
||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
|
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||||
|
from synapse.util.logutils import log_function
|
||||||
from synapse.util.retryutils import NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -207,7 +207,7 @@ class FederationClient(FederationBase):
|
|||||||
]
|
]
|
||||||
|
|
||||||
# FIXME: We should handle signature failures more gracefully.
|
# FIXME: We should handle signature failures more gracefully.
|
||||||
pdus[:] = yield make_deferred_yieldable(
|
pdus[:] = yield logcontext.make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True
|
self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError)
|
||||||
|
|||||||
@@ -42,8 +42,6 @@ from synapse.federation.federation_base import FederationBase, event_from_pdu_js
|
|||||||
from synapse.federation.persistence import TransactionActions
|
from synapse.federation.persistence import TransactionActions
|
||||||
from synapse.federation.units import Edu, Transaction
|
from synapse.federation.units import Edu, Transaction
|
||||||
from synapse.http.endpoint import parse_server_name
|
from synapse.http.endpoint import parse_server_name
|
||||||
from synapse.logging.context import nested_logging_context
|
|
||||||
from synapse.logging.utils import log_function
|
|
||||||
from synapse.replication.http.federation import (
|
from synapse.replication.http.federation import (
|
||||||
ReplicationFederationSendEduRestServlet,
|
ReplicationFederationSendEduRestServlet,
|
||||||
ReplicationGetQueryRestServlet,
|
ReplicationGetQueryRestServlet,
|
||||||
@@ -52,6 +50,8 @@ from synapse.types import get_domain_from_id
|
|||||||
from synapse.util import glob_to_regex
|
from synapse.util import glob_to_regex
|
||||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||||
from synapse.util.caches.response_cache import ResponseCache
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
|
from synapse.util.logcontext import nested_logging_context
|
||||||
|
from synapse.util.logutils import log_function
|
||||||
|
|
||||||
# when processing incoming transactions, we try to handle multiple rooms in
|
# when processing incoming transactions, we try to handle multiple rooms in
|
||||||
# parallel, up to this limit.
|
# parallel, up to this limit.
|
||||||
@@ -369,7 +369,7 @@ class FederationServer(FederationBase):
|
|||||||
logger.warn("Room version %s not in %s", room_version, supported_versions)
|
logger.warn("Room version %s not in %s", room_version, supported_versions)
|
||||||
raise IncompatibleRoomVersionError(room_version=room_version)
|
raise IncompatibleRoomVersionError(room_version=room_version)
|
||||||
|
|
||||||
pdu = yield self.handler.on_make_join_request(origin, room_id, user_id)
|
pdu = yield self.handler.on_make_join_request(room_id, user_id)
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
defer.returnValue(
|
defer.returnValue(
|
||||||
{"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
{"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||||
@@ -423,7 +423,7 @@ class FederationServer(FederationBase):
|
|||||||
def on_make_leave_request(self, origin, room_id, user_id):
|
def on_make_leave_request(self, origin, room_id, user_id):
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, room_id)
|
yield self.check_server_matches_acl(origin_host, room_id)
|
||||||
pdu = yield self.handler.on_make_leave_request(origin, room_id, user_id)
|
pdu = yield self.handler.on_make_leave_request(room_id, user_id)
|
||||||
|
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = yield self.store.get_room_version(room_id)
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,9 @@ These actions are mostly only used by the :py:mod:`.replication` module.
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from synapse.logging.utils import log_function
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.util.logutils import log_function
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -61,3 +63,33 @@ class TransactionActions(object):
|
|||||||
return self.store.set_received_txn_response(
|
return self.store.set_received_txn_response(
|
||||||
transaction.transaction_id, origin, code, response
|
transaction.transaction_id, origin, code, response
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
@log_function
|
||||||
|
def prepare_to_send(self, transaction):
|
||||||
|
""" Persists the `Transaction` we are about to send and works out the
|
||||||
|
correct value for the `prev_ids` key.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred
|
||||||
|
"""
|
||||||
|
transaction.prev_ids = yield self.store.prep_send_transaction(
|
||||||
|
transaction.transaction_id,
|
||||||
|
transaction.destination,
|
||||||
|
transaction.origin_server_ts,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def delivered(self, transaction, response_code, response_dict):
|
||||||
|
""" Marks the given `Transaction` as having been successfully
|
||||||
|
delivered to the remote homeserver, and what the response was.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred
|
||||||
|
"""
|
||||||
|
return self.store.delivered_txn(
|
||||||
|
transaction.transaction_id,
|
||||||
|
transaction.destination,
|
||||||
|
response_code,
|
||||||
|
response_dict,
|
||||||
|
)
|
||||||
|
|||||||
@@ -26,11 +26,6 @@ from synapse.federation.sender.per_destination_queue import PerDestinationQueue
|
|||||||
from synapse.federation.sender.transaction_manager import TransactionManager
|
from synapse.federation.sender.transaction_manager import TransactionManager
|
||||||
from synapse.federation.units import Edu
|
from synapse.federation.units import Edu
|
||||||
from synapse.handlers.presence import get_interested_remotes
|
from synapse.handlers.presence import get_interested_remotes
|
||||||
from synapse.logging.context import (
|
|
||||||
make_deferred_yieldable,
|
|
||||||
preserve_fn,
|
|
||||||
run_in_background,
|
|
||||||
)
|
|
||||||
from synapse.metrics import (
|
from synapse.metrics import (
|
||||||
LaterGauge,
|
LaterGauge,
|
||||||
event_processing_loop_counter,
|
event_processing_loop_counter,
|
||||||
@@ -38,6 +33,7 @@ from synapse.metrics import (
|
|||||||
events_processed_counter,
|
events_processed_counter,
|
||||||
)
|
)
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
|
from synapse.util import logcontext
|
||||||
from synapse.util.metrics import measure_func
|
from synapse.util.metrics import measure_func
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -214,10 +210,10 @@ class FederationSender(object):
|
|||||||
for event in events:
|
for event in events:
|
||||||
events_by_room.setdefault(event.room_id, []).append(event)
|
events_by_room.setdefault(event.room_id, []).append(event)
|
||||||
|
|
||||||
yield make_deferred_yieldable(
|
yield logcontext.make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[
|
[
|
||||||
run_in_background(handle_room_events, evs)
|
logcontext.run_in_background(handle_room_events, evs)
|
||||||
for evs in itervalues(events_by_room)
|
for evs in itervalues(events_by_room)
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
@@ -364,7 +360,7 @@ class FederationSender(object):
|
|||||||
for queue in queues:
|
for queue in queues:
|
||||||
queue.flush_read_receipts_for_room(room_id)
|
queue.flush_read_receipts_for_room(room_id)
|
||||||
|
|
||||||
@preserve_fn # the caller should not yield on this
|
@logcontext.preserve_fn # the caller should not yield on this
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_presence(self, states):
|
def send_presence(self, states):
|
||||||
"""Send the new presence states to the appropriate destinations.
|
"""Send the new presence states to the appropriate destinations.
|
||||||
|
|||||||
@@ -63,6 +63,8 @@ class TransactionManager(object):
|
|||||||
len(edus),
|
len(edus),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
logger.debug("TX [%s] Persisting transaction...", destination)
|
||||||
|
|
||||||
transaction = Transaction.create_new(
|
transaction = Transaction.create_new(
|
||||||
origin_server_ts=int(self.clock.time_msec()),
|
origin_server_ts=int(self.clock.time_msec()),
|
||||||
transaction_id=txn_id,
|
transaction_id=txn_id,
|
||||||
@@ -74,6 +76,9 @@ class TransactionManager(object):
|
|||||||
|
|
||||||
self._next_txn_id += 1
|
self._next_txn_id += 1
|
||||||
|
|
||||||
|
yield self._transaction_actions.prepare_to_send(transaction)
|
||||||
|
|
||||||
|
logger.debug("TX [%s] Persisted transaction", destination)
|
||||||
logger.info(
|
logger.info(
|
||||||
"TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
|
"TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
|
||||||
destination,
|
destination,
|
||||||
@@ -113,6 +118,10 @@ class TransactionManager(object):
|
|||||||
|
|
||||||
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
|
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
|
||||||
|
|
||||||
|
yield self._transaction_actions.delivered(transaction, code, response)
|
||||||
|
|
||||||
|
logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id)
|
||||||
|
|
||||||
if code == 200:
|
if code == 200:
|
||||||
for e_id, r in response.get("pdus", {}).items():
|
for e_id, r in response.get("pdus", {}).items():
|
||||||
if "error" in r:
|
if "error" in r:
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
from synapse.api.constants import Membership
|
from synapse.api.constants import Membership
|
||||||
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
|
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
|
||||||
from synapse.logging.utils import log_function
|
from synapse.util.logutils import log_function
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -43,9 +43,9 @@ from signedjson.sign import sign_json
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||||
from synapse.logging.context import run_in_background
|
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
|
from synapse.util.logcontext import run_in_background
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -22,10 +22,9 @@ from email.mime.text import MIMEText
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import StoreError
|
from synapse.api.errors import StoreError
|
||||||
from synapse.logging.context import make_deferred_yieldable
|
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
from synapse.util import stringutils
|
from synapse.util import stringutils
|
||||||
|
from synapse.util.logcontext import make_deferred_yieldable
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from synapse.push.mailer import load_jinja2_templates
|
from synapse.push.mailer import load_jinja2_templates
|
||||||
@@ -68,14 +67,7 @@ class AccountValidityHandler(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Check the renewal emails to send and send them every 30min.
|
# Check the renewal emails to send and send them every 30min.
|
||||||
def send_emails():
|
self.clock.looping_call(self.send_renewal_emails, 30 * 60 * 1000)
|
||||||
# run as a background process to make sure that the database transactions
|
|
||||||
# have a logcontext to report to
|
|
||||||
return run_as_background_process(
|
|
||||||
"send_renewals", self.send_renewal_emails
|
|
||||||
)
|
|
||||||
|
|
||||||
self.clock.looping_call(send_emails, 30 * 60 * 1000)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_renewal_emails(self):
|
def send_renewal_emails(self):
|
||||||
|
|||||||
@@ -17,10 +17,6 @@ import logging
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.constants import Membership
|
|
||||||
from synapse.types import RoomStreamToken
|
|
||||||
from synapse.visibility import filter_events_for_client
|
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -93,182 +89,3 @@ class AdminHandler(BaseHandler):
|
|||||||
ret = yield self.store.search_users(term)
|
ret = yield self.store.search_users(term)
|
||||||
|
|
||||||
defer.returnValue(ret)
|
defer.returnValue(ret)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def export_user_data(self, user_id, writer):
|
|
||||||
"""Write all data we have on the user to the given writer.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
user_id (str)
|
|
||||||
writer (ExfiltrationWriter)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
defer.Deferred: Resolves when all data for a user has been written.
|
|
||||||
The returned value is that returned by `writer.finished()`.
|
|
||||||
"""
|
|
||||||
# Get all rooms the user is in or has been in
|
|
||||||
rooms = yield self.store.get_rooms_for_user_where_membership_is(
|
|
||||||
user_id,
|
|
||||||
membership_list=(
|
|
||||||
Membership.JOIN,
|
|
||||||
Membership.LEAVE,
|
|
||||||
Membership.BAN,
|
|
||||||
Membership.INVITE,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# We only try and fetch events for rooms the user has been in. If
|
|
||||||
# they've been e.g. invited to a room without joining then we handle
|
|
||||||
# those seperately.
|
|
||||||
rooms_user_has_been_in = yield self.store.get_rooms_user_has_been_in(user_id)
|
|
||||||
|
|
||||||
for index, room in enumerate(rooms):
|
|
||||||
room_id = room.room_id
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
"[%s] Handling room %s, %d/%d", user_id, room_id, index + 1, len(rooms)
|
|
||||||
)
|
|
||||||
|
|
||||||
forgotten = yield self.store.did_forget(user_id, room_id)
|
|
||||||
if forgotten:
|
|
||||||
logger.info("[%s] User forgot room %d, ignoring", user_id, room_id)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if room_id not in rooms_user_has_been_in:
|
|
||||||
# If we haven't been in the rooms then the filtering code below
|
|
||||||
# won't return anything, so we need to handle these cases
|
|
||||||
# explicitly.
|
|
||||||
|
|
||||||
if room.membership == Membership.INVITE:
|
|
||||||
event_id = room.event_id
|
|
||||||
invite = yield self.store.get_event(event_id, allow_none=True)
|
|
||||||
if invite:
|
|
||||||
invited_state = invite.unsigned["invite_room_state"]
|
|
||||||
writer.write_invite(room_id, invite, invited_state)
|
|
||||||
|
|
||||||
continue
|
|
||||||
|
|
||||||
# We only want to bother fetching events up to the last time they
|
|
||||||
# were joined. We estimate that point by looking at the
|
|
||||||
# stream_ordering of the last membership if it wasn't a join.
|
|
||||||
if room.membership == Membership.JOIN:
|
|
||||||
stream_ordering = yield self.store.get_room_max_stream_ordering()
|
|
||||||
else:
|
|
||||||
stream_ordering = room.stream_ordering
|
|
||||||
|
|
||||||
from_key = str(RoomStreamToken(0, 0))
|
|
||||||
to_key = str(RoomStreamToken(None, stream_ordering))
|
|
||||||
|
|
||||||
written_events = set() # Events that we've processed in this room
|
|
||||||
|
|
||||||
# We need to track gaps in the events stream so that we can then
|
|
||||||
# write out the state at those events. We do this by keeping track
|
|
||||||
# of events whose prev events we haven't seen.
|
|
||||||
|
|
||||||
# Map from event ID to prev events that haven't been processed,
|
|
||||||
# dict[str, set[str]].
|
|
||||||
event_to_unseen_prevs = {}
|
|
||||||
|
|
||||||
# The reverse mapping to above, i.e. map from unseen event to events
|
|
||||||
# that have the unseen event in their prev_events, i.e. the unseen
|
|
||||||
# events "children". dict[str, set[str]]
|
|
||||||
unseen_to_child_events = {}
|
|
||||||
|
|
||||||
# We fetch events in the room the user could see by fetching *all*
|
|
||||||
# events that we have and then filtering, this isn't the most
|
|
||||||
# efficient method perhaps but it does guarantee we get everything.
|
|
||||||
while True:
|
|
||||||
events, _ = yield self.store.paginate_room_events(
|
|
||||||
room_id, from_key, to_key, limit=100, direction="f"
|
|
||||||
)
|
|
||||||
if not events:
|
|
||||||
break
|
|
||||||
|
|
||||||
from_key = events[-1].internal_metadata.after
|
|
||||||
|
|
||||||
events = yield filter_events_for_client(self.store, user_id, events)
|
|
||||||
|
|
||||||
writer.write_events(room_id, events)
|
|
||||||
|
|
||||||
# Update the extremity tracking dicts
|
|
||||||
for event in events:
|
|
||||||
# Check if we have any prev events that haven't been
|
|
||||||
# processed yet, and add those to the appropriate dicts.
|
|
||||||
unseen_events = set(event.prev_event_ids()) - written_events
|
|
||||||
if unseen_events:
|
|
||||||
event_to_unseen_prevs[event.event_id] = unseen_events
|
|
||||||
for unseen in unseen_events:
|
|
||||||
unseen_to_child_events.setdefault(unseen, set()).add(
|
|
||||||
event.event_id
|
|
||||||
)
|
|
||||||
|
|
||||||
# Now check if this event is an unseen prev event, if so
|
|
||||||
# then we remove this event from the appropriate dicts.
|
|
||||||
for child_id in unseen_to_child_events.pop(event.event_id, []):
|
|
||||||
event_to_unseen_prevs[child_id].discard(event.event_id)
|
|
||||||
|
|
||||||
written_events.add(event.event_id)
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
"Written %d events in room %s", len(written_events), room_id
|
|
||||||
)
|
|
||||||
|
|
||||||
# Extremities are the events who have at least one unseen prev event.
|
|
||||||
extremities = (
|
|
||||||
event_id
|
|
||||||
for event_id, unseen_prevs in event_to_unseen_prevs.items()
|
|
||||||
if unseen_prevs
|
|
||||||
)
|
|
||||||
for event_id in extremities:
|
|
||||||
if not event_to_unseen_prevs[event_id]:
|
|
||||||
continue
|
|
||||||
state = yield self.store.get_state_for_event(event_id)
|
|
||||||
writer.write_state(room_id, event_id, state)
|
|
||||||
|
|
||||||
defer.returnValue(writer.finished())
|
|
||||||
|
|
||||||
|
|
||||||
class ExfiltrationWriter(object):
|
|
||||||
"""Interface used to specify how to write exported data.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def write_events(self, room_id, events):
|
|
||||||
"""Write a batch of events for a room.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_id (str)
|
|
||||||
events (list[FrozenEvent])
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def write_state(self, room_id, event_id, state):
|
|
||||||
"""Write the state at the given event in the room.
|
|
||||||
|
|
||||||
This only gets called for backward extremities rather than for each
|
|
||||||
event.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_id (str)
|
|
||||||
event_id (str)
|
|
||||||
state (dict[tuple[str, str], FrozenEvent])
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def write_invite(self, room_id, event, state):
|
|
||||||
"""Write an invite for the room, with associated invite state.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_id (str)
|
|
||||||
event (FrozenEvent)
|
|
||||||
state (dict[tuple[str, str], dict]): A subset of the state at the
|
|
||||||
invite, with a subset of the event keys (type, state_key
|
|
||||||
content and sender)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def finished(self):
|
|
||||||
"""Called when all data has succesfully been exported and written.
|
|
||||||
|
|
||||||
This functions return value is passed to the caller of
|
|
||||||
`export_user_data`.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|||||||
@@ -23,13 +23,13 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
|
||||||
from synapse.metrics import (
|
from synapse.metrics import (
|
||||||
event_processing_loop_counter,
|
event_processing_loop_counter,
|
||||||
event_processing_loop_room_count,
|
event_processing_loop_room_count,
|
||||||
)
|
)
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.util import log_failure
|
from synapse.util import log_failure
|
||||||
|
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|||||||
@@ -15,7 +15,6 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import time
|
|
||||||
import unicodedata
|
import unicodedata
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
@@ -35,12 +34,11 @@ from synapse.api.errors import (
|
|||||||
LoginError,
|
LoginError,
|
||||||
StoreError,
|
StoreError,
|
||||||
SynapseError,
|
SynapseError,
|
||||||
UserDeactivatedError,
|
|
||||||
)
|
)
|
||||||
from synapse.api.ratelimiting import Ratelimiter
|
from synapse.api.ratelimiting import Ratelimiter
|
||||||
from synapse.logging.context import defer_to_thread
|
|
||||||
from synapse.module_api import ModuleApi
|
from synapse.module_api import ModuleApi
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
|
from synapse.util import logcontext
|
||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
@@ -560,7 +558,7 @@ class AuthHandler(BaseHandler):
|
|||||||
return self.sessions[session_id]
|
return self.sessions[session_id]
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_access_token_for_user_id(self, user_id, device_id, valid_until_ms):
|
def get_access_token_for_user_id(self, user_id, device_id=None):
|
||||||
"""
|
"""
|
||||||
Creates a new access token for the user with the given user ID.
|
Creates a new access token for the user with the given user ID.
|
||||||
|
|
||||||
@@ -574,27 +572,15 @@ class AuthHandler(BaseHandler):
|
|||||||
device_id (str|None): the device ID to associate with the tokens.
|
device_id (str|None): the device ID to associate with the tokens.
|
||||||
None to leave the tokens unassociated with a device (deprecated:
|
None to leave the tokens unassociated with a device (deprecated:
|
||||||
we should always have a device ID)
|
we should always have a device ID)
|
||||||
valid_until_ms (int|None): when the token is valid until. None for
|
|
||||||
no expiry.
|
|
||||||
Returns:
|
Returns:
|
||||||
The access token for the user's session.
|
The access token for the user's session.
|
||||||
Raises:
|
Raises:
|
||||||
StoreError if there was a problem storing the token.
|
StoreError if there was a problem storing the token.
|
||||||
"""
|
"""
|
||||||
fmt_expiry = ""
|
logger.info("Logging in user %s on device %s", user_id, device_id)
|
||||||
if valid_until_ms is not None:
|
access_token = yield self.issue_access_token(user_id, device_id)
|
||||||
fmt_expiry = time.strftime(
|
|
||||||
" until %Y-%m-%d %H:%M:%S", time.localtime(valid_until_ms / 1000.0)
|
|
||||||
)
|
|
||||||
logger.info("Logging in user %s on device %s%s", user_id, device_id, fmt_expiry)
|
|
||||||
|
|
||||||
yield self.auth.check_auth_blocking(user_id)
|
yield self.auth.check_auth_blocking(user_id)
|
||||||
|
|
||||||
access_token = self.macaroon_gen.generate_access_token(user_id)
|
|
||||||
yield self.store.add_access_token_to_user(
|
|
||||||
user_id, access_token, device_id, valid_until_ms
|
|
||||||
)
|
|
||||||
|
|
||||||
# the device *should* have been registered before we got here; however,
|
# the device *should* have been registered before we got here; however,
|
||||||
# it's possible we raced against a DELETE operation. The thing we
|
# it's possible we raced against a DELETE operation. The thing we
|
||||||
# really don't want is active access_tokens without a record of the
|
# really don't want is active access_tokens without a record of the
|
||||||
@@ -624,7 +610,6 @@ class AuthHandler(BaseHandler):
|
|||||||
Raises:
|
Raises:
|
||||||
LimitExceededError if the ratelimiter's login requests count for this
|
LimitExceededError if the ratelimiter's login requests count for this
|
||||||
user is too high too proceed.
|
user is too high too proceed.
|
||||||
UserDeactivatedError if a user is found but is deactivated.
|
|
||||||
"""
|
"""
|
||||||
self.ratelimit_login_per_account(user_id)
|
self.ratelimit_login_per_account(user_id)
|
||||||
res = yield self._find_user_id_and_pwd_hash(user_id)
|
res = yield self._find_user_id_and_pwd_hash(user_id)
|
||||||
@@ -840,19 +825,18 @@ class AuthHandler(BaseHandler):
|
|||||||
if not lookupres:
|
if not lookupres:
|
||||||
defer.returnValue(None)
|
defer.returnValue(None)
|
||||||
(user_id, password_hash) = lookupres
|
(user_id, password_hash) = lookupres
|
||||||
|
|
||||||
# If the password hash is None, the account has likely been deactivated
|
|
||||||
if not password_hash:
|
|
||||||
deactivated = yield self.store.get_user_deactivated_status(user_id)
|
|
||||||
if deactivated:
|
|
||||||
raise UserDeactivatedError("This account has been deactivated")
|
|
||||||
|
|
||||||
result = yield self.validate_hash(password, password_hash)
|
result = yield self.validate_hash(password, password_hash)
|
||||||
if not result:
|
if not result:
|
||||||
logger.warn("Failed password login for user %s", user_id)
|
logger.warn("Failed password login for user %s", user_id)
|
||||||
defer.returnValue(None)
|
defer.returnValue(None)
|
||||||
defer.returnValue(user_id)
|
defer.returnValue(user_id)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def issue_access_token(self, user_id, device_id=None):
|
||||||
|
access_token = self.macaroon_gen.generate_access_token(user_id)
|
||||||
|
yield self.store.add_access_token_to_user(user_id, access_token, device_id)
|
||||||
|
defer.returnValue(access_token)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def validate_short_term_login_token_and_get_user_id(self, login_token):
|
def validate_short_term_login_token_and_get_user_id(self, login_token):
|
||||||
auth_api = self.hs.get_auth()
|
auth_api = self.hs.get_auth()
|
||||||
@@ -1003,7 +987,7 @@ class AuthHandler(BaseHandler):
|
|||||||
bcrypt.gensalt(self.bcrypt_rounds),
|
bcrypt.gensalt(self.bcrypt_rounds),
|
||||||
).decode("ascii")
|
).decode("ascii")
|
||||||
|
|
||||||
return defer_to_thread(self.hs.get_reactor(), _do_hash)
|
return logcontext.defer_to_thread(self.hs.get_reactor(), _do_hash)
|
||||||
|
|
||||||
def validate_hash(self, password, stored_hash):
|
def validate_hash(self, password, stored_hash):
|
||||||
"""Validates that self.hash(password) == stored_hash.
|
"""Validates that self.hash(password) == stored_hash.
|
||||||
@@ -1029,7 +1013,7 @@ class AuthHandler(BaseHandler):
|
|||||||
if not isinstance(stored_hash, bytes):
|
if not isinstance(stored_hash, bytes):
|
||||||
stored_hash = stored_hash.encode("ascii")
|
stored_hash = stored_hash.encode("ascii")
|
||||||
|
|
||||||
return defer_to_thread(self.hs.get_reactor(), _do_validate_hash)
|
return logcontext.defer_to_thread(self.hs.get_reactor(), _do_validate_hash)
|
||||||
else:
|
else:
|
||||||
return defer.succeed(False)
|
return defer.succeed(False)
|
||||||
|
|
||||||
|
|||||||
@@ -22,9 +22,9 @@ from canonicaljson import encode_canonical_json, json
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import CodeMessageException, SynapseError
|
from synapse.api.errors import CodeMessageException, FederationDeniedError, SynapseError
|
||||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
|
||||||
from synapse.types import UserID, get_domain_from_id
|
from synapse.types import UserID, get_domain_from_id
|
||||||
|
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||||
from synapse.util.retryutils import NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -350,6 +350,9 @@ def _exception_to_failure(e):
|
|||||||
if isinstance(e, NotRetryingDestination):
|
if isinstance(e, NotRetryingDestination):
|
||||||
return {"status": 503, "message": "Not ready for retry"}
|
return {"status": 503, "message": "Not ready for retry"}
|
||||||
|
|
||||||
|
if isinstance(e, FederationDeniedError):
|
||||||
|
return {"status": 403, "message": "Federation Denied"}
|
||||||
|
|
||||||
# include ConnectionRefused and other errors
|
# include ConnectionRefused and other errors
|
||||||
#
|
#
|
||||||
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
|
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
|
||||||
|
|||||||
@@ -21,8 +21,8 @@ from twisted.internet import defer
|
|||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.api.errors import AuthError, SynapseError
|
from synapse.api.errors import AuthError, SynapseError
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
from synapse.logging.utils import log_function
|
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
|
from synapse.util.logutils import log_function
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
|
|||||||
@@ -45,13 +45,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
|||||||
from synapse.crypto.event_signing import compute_event_signature
|
from synapse.crypto.event_signing import compute_event_signature
|
||||||
from synapse.event_auth import auth_types_for_event
|
from synapse.event_auth import auth_types_for_event
|
||||||
from synapse.events.validator import EventValidator
|
from synapse.events.validator import EventValidator
|
||||||
from synapse.logging.context import (
|
|
||||||
make_deferred_yieldable,
|
|
||||||
nested_logging_context,
|
|
||||||
preserve_fn,
|
|
||||||
run_in_background,
|
|
||||||
)
|
|
||||||
from synapse.logging.utils import log_function
|
|
||||||
from synapse.replication.http.federation import (
|
from synapse.replication.http.federation import (
|
||||||
ReplicationCleanRoomRestServlet,
|
ReplicationCleanRoomRestServlet,
|
||||||
ReplicationFederationSendEventsRestServlet,
|
ReplicationFederationSendEventsRestServlet,
|
||||||
@@ -59,9 +52,10 @@ from synapse.replication.http.federation import (
|
|||||||
from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
|
from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
|
||||||
from synapse.state import StateResolutionStore, resolve_events_with_store
|
from synapse.state import StateResolutionStore, resolve_events_with_store
|
||||||
from synapse.types import UserID, get_domain_from_id
|
from synapse.types import UserID, get_domain_from_id
|
||||||
from synapse.util import unwrapFirstError
|
from synapse.util import logcontext, unwrapFirstError
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.distributor import user_joined_room
|
from synapse.util.distributor import user_joined_room
|
||||||
|
from synapse.util.logutils import log_function
|
||||||
from synapse.util.retryutils import NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
from synapse.visibility import filter_events_for_server
|
from synapse.visibility import filter_events_for_server
|
||||||
|
|
||||||
@@ -344,7 +338,7 @@ class FederationHandler(BaseHandler):
|
|||||||
|
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = yield self.store.get_room_version(room_id)
|
||||||
|
|
||||||
with nested_logging_context(p):
|
with logcontext.nested_logging_context(p):
|
||||||
# note that if any of the missing prevs share missing state or
|
# note that if any of the missing prevs share missing state or
|
||||||
# auth events, the requests to fetch those events are deduped
|
# auth events, the requests to fetch those events are deduped
|
||||||
# by the get_pdu_cache in federation_client.
|
# by the get_pdu_cache in federation_client.
|
||||||
@@ -538,7 +532,7 @@ class FederationHandler(BaseHandler):
|
|||||||
event_id,
|
event_id,
|
||||||
ev.event_id,
|
ev.event_id,
|
||||||
)
|
)
|
||||||
with nested_logging_context(ev.event_id):
|
with logcontext.nested_logging_context(ev.event_id):
|
||||||
try:
|
try:
|
||||||
yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
|
yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
|
||||||
except FederationError as e:
|
except FederationError as e:
|
||||||
@@ -731,10 +725,10 @@ class FederationHandler(BaseHandler):
|
|||||||
missing_auth - failed_to_fetch,
|
missing_auth - failed_to_fetch,
|
||||||
)
|
)
|
||||||
|
|
||||||
results = yield make_deferred_yieldable(
|
results = yield logcontext.make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[
|
[
|
||||||
run_in_background(
|
logcontext.run_in_background(
|
||||||
self.federation_client.get_pdu,
|
self.federation_client.get_pdu,
|
||||||
[dest],
|
[dest],
|
||||||
event_id,
|
event_id,
|
||||||
@@ -1000,8 +994,10 @@ class FederationHandler(BaseHandler):
|
|||||||
event_ids = list(extremities.keys())
|
event_ids = list(extremities.keys())
|
||||||
|
|
||||||
logger.debug("calling resolve_state_groups in _maybe_backfill")
|
logger.debug("calling resolve_state_groups in _maybe_backfill")
|
||||||
resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events)
|
resolve = logcontext.preserve_fn(
|
||||||
states = yield make_deferred_yieldable(
|
self.state_handler.resolve_state_groups_for_events
|
||||||
|
)
|
||||||
|
states = yield logcontext.make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[resolve(room_id, [e]) for e in event_ids], consumeErrors=True
|
[resolve(room_id, [e]) for e in event_ids], consumeErrors=True
|
||||||
)
|
)
|
||||||
@@ -1175,7 +1171,7 @@ class FederationHandler(BaseHandler):
|
|||||||
# lots of requests for missing prev_events which we do actually
|
# lots of requests for missing prev_events which we do actually
|
||||||
# have. Hence we fire off the deferred, but don't wait for it.
|
# have. Hence we fire off the deferred, but don't wait for it.
|
||||||
|
|
||||||
run_in_background(self._handle_queued_pdus, room_queue)
|
logcontext.run_in_background(self._handle_queued_pdus, room_queue)
|
||||||
|
|
||||||
defer.returnValue(True)
|
defer.returnValue(True)
|
||||||
|
|
||||||
@@ -1195,7 +1191,7 @@ class FederationHandler(BaseHandler):
|
|||||||
p.event_id,
|
p.event_id,
|
||||||
p.room_id,
|
p.room_id,
|
||||||
)
|
)
|
||||||
with nested_logging_context(p.event_id):
|
with logcontext.nested_logging_context(p.event_id):
|
||||||
yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
|
yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
@@ -1204,28 +1200,11 @@ class FederationHandler(BaseHandler):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def on_make_join_request(self, origin, room_id, user_id):
|
def on_make_join_request(self, room_id, user_id):
|
||||||
""" We've received a /make_join/ request, so we create a partial
|
""" We've received a /make_join/ request, so we create a partial
|
||||||
join event for the room and return that. We do *not* persist or
|
join event for the room and return that. We do *not* persist or
|
||||||
process it until the other server has signed it and sent it back.
|
process it until the other server has signed it and sent it back.
|
||||||
|
|
||||||
Args:
|
|
||||||
origin (str): The (verified) server name of the requesting server.
|
|
||||||
room_id (str): Room to create join event in
|
|
||||||
user_id (str): The user to create the join for
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[FrozenEvent]
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if get_domain_from_id(user_id) != origin:
|
|
||||||
logger.info(
|
|
||||||
"Got /make_join request for user %r from different origin %s, ignoring",
|
|
||||||
user_id,
|
|
||||||
origin,
|
|
||||||
)
|
|
||||||
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
|
|
||||||
|
|
||||||
event_content = {"membership": Membership.JOIN}
|
event_content = {"membership": Membership.JOIN}
|
||||||
|
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = yield self.store.get_room_version(room_id)
|
||||||
@@ -1428,27 +1407,11 @@ class FederationHandler(BaseHandler):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def on_make_leave_request(self, origin, room_id, user_id):
|
def on_make_leave_request(self, room_id, user_id):
|
||||||
""" We've received a /make_leave/ request, so we create a partial
|
""" We've received a /make_leave/ request, so we create a partial
|
||||||
leave event for the room and return that. We do *not* persist or
|
leave event for the room and return that. We do *not* persist or
|
||||||
process it until the other server has signed it and sent it back.
|
process it until the other server has signed it and sent it back.
|
||||||
|
|
||||||
Args:
|
|
||||||
origin (str): The (verified) server name of the requesting server.
|
|
||||||
room_id (str): Room to create leave event in
|
|
||||||
user_id (str): The user to create the leave for
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[FrozenEvent]
|
|
||||||
"""
|
"""
|
||||||
if get_domain_from_id(user_id) != origin:
|
|
||||||
logger.info(
|
|
||||||
"Got /make_leave request for user %r from different origin %s, ignoring",
|
|
||||||
user_id,
|
|
||||||
origin,
|
|
||||||
)
|
|
||||||
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
|
|
||||||
|
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = yield self.store.get_room_version(room_id)
|
||||||
builder = self.event_builder_factory.new(
|
builder = self.event_builder_factory.new(
|
||||||
room_version,
|
room_version,
|
||||||
@@ -1647,7 +1610,7 @@ class FederationHandler(BaseHandler):
|
|||||||
success = True
|
success = True
|
||||||
finally:
|
finally:
|
||||||
if not success:
|
if not success:
|
||||||
run_in_background(
|
logcontext.run_in_background(
|
||||||
self.store.remove_push_actions_from_staging, event.event_id
|
self.store.remove_push_actions_from_staging, event.event_id
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1666,7 +1629,7 @@ class FederationHandler(BaseHandler):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def prep(ev_info):
|
def prep(ev_info):
|
||||||
event = ev_info["event"]
|
event = ev_info["event"]
|
||||||
with nested_logging_context(suffix=event.event_id):
|
with logcontext.nested_logging_context(suffix=event.event_id):
|
||||||
res = yield self._prep_event(
|
res = yield self._prep_event(
|
||||||
origin,
|
origin,
|
||||||
event,
|
event,
|
||||||
@@ -1676,9 +1639,12 @@ class FederationHandler(BaseHandler):
|
|||||||
)
|
)
|
||||||
defer.returnValue(res)
|
defer.returnValue(res)
|
||||||
|
|
||||||
contexts = yield make_deferred_yieldable(
|
contexts = yield logcontext.make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[run_in_background(prep, ev_info) for ev_info in event_infos],
|
[
|
||||||
|
logcontext.run_in_background(prep, ev_info)
|
||||||
|
for ev_info in event_infos
|
||||||
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@@ -2140,10 +2106,10 @@ class FederationHandler(BaseHandler):
|
|||||||
|
|
||||||
room_version = yield self.store.get_room_version(event.room_id)
|
room_version = yield self.store.get_room_version(event.room_id)
|
||||||
|
|
||||||
different_events = yield make_deferred_yieldable(
|
different_events = yield logcontext.make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[
|
[
|
||||||
run_in_background(
|
logcontext.run_in_background(
|
||||||
self.store.get_event, d, allow_none=True, allow_rejected=False
|
self.store.get_event, d, allow_none=True, allow_rejected=False
|
||||||
)
|
)
|
||||||
for d in different_auth
|
for d in different_auth
|
||||||
|
|||||||
@@ -118,7 +118,7 @@ class IdentityHandler(BaseHandler):
|
|||||||
raise SynapseError(400, "No client_secret in creds")
|
raise SynapseError(400, "No client_secret in creds")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
data = yield self.http_client.post_json_get_json(
|
data = yield self.http_client.post_urlencoded_get_json(
|
||||||
"https://%s%s" % (id_server, "/_matrix/identity/api/v1/3pid/bind"),
|
"https://%s%s" % (id_server, "/_matrix/identity/api/v1/3pid/bind"),
|
||||||
{"sid": creds["sid"], "client_secret": client_secret, "mxid": mxid},
|
{"sid": creds["sid"], "client_secret": client_secret, "mxid": mxid},
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -21,12 +21,12 @@ from synapse.api.constants import EventTypes, Membership
|
|||||||
from synapse.api.errors import AuthError, Codes, SynapseError
|
from synapse.api.errors import AuthError, Codes, SynapseError
|
||||||
from synapse.events.validator import EventValidator
|
from synapse.events.validator import EventValidator
|
||||||
from synapse.handlers.presence import format_user_presence_state
|
from synapse.handlers.presence import format_user_presence_state
|
||||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
|
||||||
from synapse.streams.config import PaginationConfig
|
from synapse.streams.config import PaginationConfig
|
||||||
from synapse.types import StreamToken, UserID
|
from synapse.types import StreamToken, UserID
|
||||||
from synapse.util import unwrapFirstError
|
from synapse.util import unwrapFirstError
|
||||||
from synapse.util.async_helpers import concurrently_execute
|
from synapse.util.async_helpers import concurrently_execute
|
||||||
from synapse.util.caches.snapshot_cache import SnapshotCache
|
from synapse.util.caches.snapshot_cache import SnapshotCache
|
||||||
|
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
|
|||||||
@@ -23,7 +23,6 @@ from canonicaljson import encode_canonical_json, json
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.internet.defer import succeed
|
from twisted.internet.defer import succeed
|
||||||
|
|
||||||
from synapse import event_auth
|
|
||||||
from synapse.api.constants import EventTypes, Membership, RelationTypes
|
from synapse.api.constants import EventTypes, Membership, RelationTypes
|
||||||
from synapse.api.errors import (
|
from synapse.api.errors import (
|
||||||
AuthError,
|
AuthError,
|
||||||
@@ -35,13 +34,13 @@ from synapse.api.errors import (
|
|||||||
from synapse.api.room_versions import RoomVersions
|
from synapse.api.room_versions import RoomVersions
|
||||||
from synapse.api.urls import ConsentURIBuilder
|
from synapse.api.urls import ConsentURIBuilder
|
||||||
from synapse.events.validator import EventValidator
|
from synapse.events.validator import EventValidator
|
||||||
from synapse.logging.context import run_in_background
|
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import RoomAlias, UserID, create_requester
|
from synapse.types import RoomAlias, UserID, create_requester
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.frozenutils import frozendict_json_encoder
|
from synapse.util.frozenutils import frozendict_json_encoder
|
||||||
|
from synapse.util.logcontext import run_in_background
|
||||||
from synapse.util.metrics import measure_func
|
from synapse.util.metrics import measure_func
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
@@ -785,20 +784,6 @@ class EventCreationHandler(object):
|
|||||||
event.signatures.update(returned_invite.signatures)
|
event.signatures.update(returned_invite.signatures)
|
||||||
|
|
||||||
if event.type == EventTypes.Redaction:
|
if event.type == EventTypes.Redaction:
|
||||||
original_event = yield self.store.get_event(
|
|
||||||
event.redacts,
|
|
||||||
check_redacted=False,
|
|
||||||
get_prev_content=False,
|
|
||||||
allow_rejected=False,
|
|
||||||
allow_none=True,
|
|
||||||
check_room_id=event.room_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
# we can make some additional checks now if we have the original event.
|
|
||||||
if original_event:
|
|
||||||
if original_event.type == EventTypes.Create:
|
|
||||||
raise AuthError(403, "Redacting create events is not permitted")
|
|
||||||
|
|
||||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||||
auth_events_ids = yield self.auth.compute_auth_events(
|
auth_events_ids = yield self.auth.compute_auth_events(
|
||||||
event, prev_state_ids, for_verification=True
|
event, prev_state_ids, for_verification=True
|
||||||
@@ -806,18 +791,18 @@ class EventCreationHandler(object):
|
|||||||
auth_events = yield self.store.get_events(auth_events_ids)
|
auth_events = yield self.store.get_events(auth_events_ids)
|
||||||
auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
|
auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
|
||||||
room_version = yield self.store.get_room_version(event.room_id)
|
room_version = yield self.store.get_room_version(event.room_id)
|
||||||
|
if self.auth.check_redaction(room_version, event, auth_events=auth_events):
|
||||||
if event_auth.check_redaction(room_version, event, auth_events=auth_events):
|
original_event = yield self.store.get_event(
|
||||||
# this user doesn't have 'redact' rights, so we need to do some more
|
event.redacts,
|
||||||
# checks on the original event. Let's start by checking the original
|
check_redacted=False,
|
||||||
# event exists.
|
get_prev_content=False,
|
||||||
if not original_event:
|
allow_rejected=False,
|
||||||
raise NotFoundError("Could not find event %s" % (event.redacts,))
|
allow_none=False,
|
||||||
|
)
|
||||||
if event.user_id != original_event.user_id:
|
if event.user_id != original_event.user_id:
|
||||||
raise AuthError(403, "You don't have permission to redact events")
|
raise AuthError(403, "You don't have permission to redact events")
|
||||||
|
|
||||||
# all the checks are done.
|
# We've already checked.
|
||||||
event.internal_metadata.recheck_redaction = False
|
event.internal_metadata.recheck_redaction = False
|
||||||
|
|
||||||
if event.type == EventTypes.Create:
|
if event.type == EventTypes.Create:
|
||||||
|
|||||||
@@ -20,10 +20,10 @@ from twisted.python.failure import Failure
|
|||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.logging.context import run_in_background
|
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import RoomStreamToken
|
from synapse.types import RoomStreamToken
|
||||||
from synapse.util.async_helpers import ReadWriteLock
|
from synapse.util.async_helpers import ReadWriteLock
|
||||||
|
from synapse.util.logcontext import run_in_background
|
||||||
from synapse.util.stringutils import random_string
|
from synapse.util.stringutils import random_string
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
|
|||||||
@@ -34,14 +34,14 @@ from twisted.internet import defer
|
|||||||
import synapse.metrics
|
import synapse.metrics
|
||||||
from synapse.api.constants import EventTypes, Membership, PresenceState
|
from synapse.api.constants import EventTypes, Membership, PresenceState
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.logging.context import run_in_background
|
|
||||||
from synapse.logging.utils import log_function
|
|
||||||
from synapse.metrics import LaterGauge
|
from synapse.metrics import LaterGauge
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.storage.presence import UserPresenceState
|
from synapse.storage.presence import UserPresenceState
|
||||||
from synapse.types import UserID, get_domain_from_id
|
from synapse.types import UserID, get_domain_from_id
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||||
|
from synapse.util.logcontext import run_in_background
|
||||||
|
from synapse.util.logutils import log_function
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
from synapse.util.wheel_timer import WheelTimer
|
from synapse.util.wheel_timer import WheelTimer
|
||||||
|
|
||||||
|
|||||||
@@ -303,10 +303,6 @@ class BaseProfileHandler(BaseHandler):
|
|||||||
if not self.hs.config.require_auth_for_profile_requests or not requester:
|
if not self.hs.config.require_auth_for_profile_requests or not requester:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Always allow the user to query their own profile.
|
|
||||||
if target_user.to_string() == requester.to_string():
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
requester_rooms = yield self.store.get_rooms_for_user(requester.to_string())
|
requester_rooms = yield self.store.get_rooms_for_user(requester.to_string())
|
||||||
target_user_rooms = yield self.store.get_rooms_for_user(
|
target_user_rooms = yield self.store.get_rooms_for_user(
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ import logging
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.handlers._base import BaseHandler
|
from synapse.handlers._base import BaseHandler
|
||||||
from synapse.types import ReadReceipt, get_domain_from_id
|
from synapse.types import ReadReceipt
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -40,27 +40,18 @@ class ReceiptsHandler(BaseHandler):
|
|||||||
def _received_remote_receipt(self, origin, content):
|
def _received_remote_receipt(self, origin, content):
|
||||||
"""Called when we receive an EDU of type m.receipt from a remote HS.
|
"""Called when we receive an EDU of type m.receipt from a remote HS.
|
||||||
"""
|
"""
|
||||||
receipts = []
|
receipts = [
|
||||||
for room_id, room_values in content.items():
|
ReadReceipt(
|
||||||
for receipt_type, users in room_values.items():
|
room_id=room_id,
|
||||||
for user_id, user_values in users.items():
|
receipt_type=receipt_type,
|
||||||
if get_domain_from_id(user_id) != origin:
|
user_id=user_id,
|
||||||
logger.info(
|
event_ids=user_values["event_ids"],
|
||||||
"Received receipt for user %r from server %s, ignoring",
|
data=user_values.get("data", {}),
|
||||||
user_id,
|
)
|
||||||
origin,
|
for room_id, room_values in content.items()
|
||||||
)
|
for receipt_type, users in room_values.items()
|
||||||
continue
|
for user_id, user_values in users.items()
|
||||||
|
]
|
||||||
receipts.append(
|
|
||||||
ReadReceipt(
|
|
||||||
room_id=room_id,
|
|
||||||
receipt_type=receipt_type,
|
|
||||||
user_id=user_id,
|
|
||||||
event_ids=user_values["event_ids"],
|
|
||||||
data=user_values.get("data", {}),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
yield self._handle_new_receipts(receipts)
|
yield self._handle_new_receipts(receipts)
|
||||||
|
|
||||||
|
|||||||
@@ -84,8 +84,6 @@ class RegistrationHandler(BaseHandler):
|
|||||||
self.device_handler = hs.get_device_handler()
|
self.device_handler = hs.get_device_handler()
|
||||||
self.pusher_pool = hs.get_pusherpool()
|
self.pusher_pool = hs.get_pusherpool()
|
||||||
|
|
||||||
self.session_lifetime = hs.config.session_lifetime
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_username(self, localpart, guest_access_token=None, assigned_user_id=None):
|
def check_username(self, localpart, guest_access_token=None, assigned_user_id=None):
|
||||||
if types.contains_invalid_mxid_characters(localpart):
|
if types.contains_invalid_mxid_characters(localpart):
|
||||||
@@ -140,10 +138,11 @@ class RegistrationHandler(BaseHandler):
|
|||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def register_user(
|
def register(
|
||||||
self,
|
self,
|
||||||
localpart=None,
|
localpart=None,
|
||||||
password=None,
|
password=None,
|
||||||
|
generate_token=True,
|
||||||
guest_access_token=None,
|
guest_access_token=None,
|
||||||
make_guest=False,
|
make_guest=False,
|
||||||
admin=False,
|
admin=False,
|
||||||
@@ -161,6 +160,11 @@ class RegistrationHandler(BaseHandler):
|
|||||||
password (unicode) : The password to assign to this user so they can
|
password (unicode) : The password to assign to this user so they can
|
||||||
login again. This can be None which means they cannot login again
|
login again. This can be None which means they cannot login again
|
||||||
via a password (e.g. the user is an application service user).
|
via a password (e.g. the user is an application service user).
|
||||||
|
generate_token (bool): Whether a new access token should be
|
||||||
|
generated. Having this be True should be considered deprecated,
|
||||||
|
since it offers no means of associating a device_id with the
|
||||||
|
access_token. Instead you should call auth_handler.issue_access_token
|
||||||
|
after registration.
|
||||||
user_type (str|None): type of user. One of the values from
|
user_type (str|None): type of user. One of the values from
|
||||||
api.constants.UserTypes, or None for a normal user.
|
api.constants.UserTypes, or None for a normal user.
|
||||||
default_display_name (unicode|None): if set, the new user's displayname
|
default_display_name (unicode|None): if set, the new user's displayname
|
||||||
@@ -168,7 +172,7 @@ class RegistrationHandler(BaseHandler):
|
|||||||
address (str|None): the IP address used to perform the registration.
|
address (str|None): the IP address used to perform the registration.
|
||||||
bind_emails (List[str]): list of emails to bind to this account.
|
bind_emails (List[str]): list of emails to bind to this account.
|
||||||
Returns:
|
Returns:
|
||||||
Deferred[str]: user_id
|
A tuple of (user_id, access_token).
|
||||||
Raises:
|
Raises:
|
||||||
RegistrationError if there was a problem registering.
|
RegistrationError if there was a problem registering.
|
||||||
"""
|
"""
|
||||||
@@ -202,8 +206,12 @@ class RegistrationHandler(BaseHandler):
|
|||||||
elif default_display_name is None:
|
elif default_display_name is None:
|
||||||
default_display_name = localpart
|
default_display_name = localpart
|
||||||
|
|
||||||
|
token = None
|
||||||
|
if generate_token:
|
||||||
|
token = self.macaroon_gen.generate_access_token(user_id)
|
||||||
yield self.register_with_store(
|
yield self.register_with_store(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
|
token=token,
|
||||||
password_hash=password_hash,
|
password_hash=password_hash,
|
||||||
was_guest=was_guest,
|
was_guest=was_guest,
|
||||||
make_guest=make_guest,
|
make_guest=make_guest,
|
||||||
@@ -222,17 +230,21 @@ class RegistrationHandler(BaseHandler):
|
|||||||
else:
|
else:
|
||||||
# autogen a sequential user ID
|
# autogen a sequential user ID
|
||||||
attempts = 0
|
attempts = 0
|
||||||
|
token = None
|
||||||
user = None
|
user = None
|
||||||
while not user:
|
while not user:
|
||||||
localpart = yield self._generate_user_id(attempts > 0)
|
localpart = yield self._generate_user_id(attempts > 0)
|
||||||
user = UserID(localpart, self.hs.hostname)
|
user = UserID(localpart, self.hs.hostname)
|
||||||
user_id = user.to_string()
|
user_id = user.to_string()
|
||||||
yield self.check_user_id_not_appservice_exclusive(user_id)
|
yield self.check_user_id_not_appservice_exclusive(user_id)
|
||||||
|
if generate_token:
|
||||||
|
token = self.macaroon_gen.generate_access_token(user_id)
|
||||||
if default_display_name is None:
|
if default_display_name is None:
|
||||||
default_display_name = localpart
|
default_display_name = localpart
|
||||||
try:
|
try:
|
||||||
yield self.register_with_store(
|
yield self.register_with_store(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
|
token=token,
|
||||||
password_hash=password_hash,
|
password_hash=password_hash,
|
||||||
make_guest=make_guest,
|
make_guest=make_guest,
|
||||||
create_profile_with_displayname=default_display_name,
|
create_profile_with_displayname=default_display_name,
|
||||||
@@ -242,15 +254,10 @@ class RegistrationHandler(BaseHandler):
|
|||||||
# if user id is taken, just generate another
|
# if user id is taken, just generate another
|
||||||
user = None
|
user = None
|
||||||
user_id = None
|
user_id = None
|
||||||
|
token = None
|
||||||
attempts += 1
|
attempts += 1
|
||||||
|
|
||||||
if not self.hs.config.user_consent_at_registration:
|
if not self.hs.config.user_consent_at_registration:
|
||||||
yield self._auto_join_rooms(user_id)
|
yield self._auto_join_rooms(user_id)
|
||||||
else:
|
|
||||||
logger.info(
|
|
||||||
"Skipping auto-join for %s because consent is required at registration",
|
|
||||||
user_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Bind any specified emails to this account
|
# Bind any specified emails to this account
|
||||||
current_time = self.hs.get_clock().time_msec()
|
current_time = self.hs.get_clock().time_msec()
|
||||||
@@ -265,7 +272,7 @@ class RegistrationHandler(BaseHandler):
|
|||||||
# Bind email to new account
|
# Bind email to new account
|
||||||
yield self._register_email_threepid(user_id, threepid_dict, None, False)
|
yield self._register_email_threepid(user_id, threepid_dict, None, False)
|
||||||
|
|
||||||
defer.returnValue(user_id)
|
defer.returnValue((user_id, token))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _auto_join_rooms(self, user_id):
|
def _auto_join_rooms(self, user_id):
|
||||||
@@ -291,7 +298,6 @@ class RegistrationHandler(BaseHandler):
|
|||||||
count = yield self.store.count_all_users()
|
count = yield self.store.count_all_users()
|
||||||
should_auto_create_rooms = count == 1
|
should_auto_create_rooms = count == 1
|
||||||
for r in self.hs.config.auto_join_rooms:
|
for r in self.hs.config.auto_join_rooms:
|
||||||
logger.info("Auto-joining %s to %s", user_id, r)
|
|
||||||
try:
|
try:
|
||||||
if should_auto_create_rooms:
|
if should_auto_create_rooms:
|
||||||
room_alias = RoomAlias.from_string(r)
|
room_alias = RoomAlias.from_string(r)
|
||||||
@@ -499,6 +505,87 @@ class RegistrationHandler(BaseHandler):
|
|||||||
)
|
)
|
||||||
defer.returnValue(data)
|
defer.returnValue(data)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_or_create_user(self, requester, localpart, displayname, password_hash=None):
|
||||||
|
"""Creates a new user if the user does not exist,
|
||||||
|
else revokes all previous access tokens and generates a new one.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
localpart : The local part of the user ID to register. If None,
|
||||||
|
one will be randomly generated.
|
||||||
|
Returns:
|
||||||
|
A tuple of (user_id, access_token).
|
||||||
|
Raises:
|
||||||
|
RegistrationError if there was a problem registering.
|
||||||
|
|
||||||
|
NB this is only used in tests. TODO: move it to the test package!
|
||||||
|
"""
|
||||||
|
if localpart is None:
|
||||||
|
raise SynapseError(400, "Request must include user id")
|
||||||
|
yield self.auth.check_auth_blocking()
|
||||||
|
need_register = True
|
||||||
|
|
||||||
|
try:
|
||||||
|
yield self.check_username(localpart)
|
||||||
|
except SynapseError as e:
|
||||||
|
if e.errcode == Codes.USER_IN_USE:
|
||||||
|
need_register = False
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
user = UserID(localpart, self.hs.hostname)
|
||||||
|
user_id = user.to_string()
|
||||||
|
token = self.macaroon_gen.generate_access_token(user_id)
|
||||||
|
|
||||||
|
if need_register:
|
||||||
|
yield self.register_with_store(
|
||||||
|
user_id=user_id,
|
||||||
|
token=token,
|
||||||
|
password_hash=password_hash,
|
||||||
|
create_profile_with_displayname=user.localpart,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
yield self._auth_handler.delete_access_tokens_for_user(user_id)
|
||||||
|
yield self.store.add_access_token_to_user(user_id=user_id, token=token)
|
||||||
|
|
||||||
|
if displayname is not None:
|
||||||
|
logger.info("setting user display name: %s -> %s", user_id, displayname)
|
||||||
|
yield self.profile_handler.set_displayname(
|
||||||
|
user, requester, displayname, by_admin=True
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((user_id, token))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_or_register_3pid_guest(self, medium, address, inviter_user_id):
|
||||||
|
"""Get a guest access token for a 3PID, creating a guest account if
|
||||||
|
one doesn't already exist.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
medium (str)
|
||||||
|
address (str)
|
||||||
|
inviter_user_id (str): The user ID who is trying to invite the
|
||||||
|
3PID
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[(str, str)]: A 2-tuple of `(user_id, access_token)` of the
|
||||||
|
3PID guest account.
|
||||||
|
"""
|
||||||
|
access_token = yield self.store.get_3pid_guest_access_token(medium, address)
|
||||||
|
if access_token:
|
||||||
|
user_info = yield self.auth.get_user_by_access_token(access_token)
|
||||||
|
|
||||||
|
defer.returnValue((user_info["user"].to_string(), access_token))
|
||||||
|
|
||||||
|
user_id, access_token = yield self.register(
|
||||||
|
generate_token=True, make_guest=True
|
||||||
|
)
|
||||||
|
access_token = yield self.store.save_or_get_3pid_guest_access_token(
|
||||||
|
medium, address, access_token, inviter_user_id
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((user_id, access_token))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _join_user_to_room(self, requester, room_identifier):
|
def _join_user_to_room(self, requester, room_identifier):
|
||||||
room_id = None
|
room_id = None
|
||||||
@@ -528,6 +615,7 @@ class RegistrationHandler(BaseHandler):
|
|||||||
def register_with_store(
|
def register_with_store(
|
||||||
self,
|
self,
|
||||||
user_id,
|
user_id,
|
||||||
|
token=None,
|
||||||
password_hash=None,
|
password_hash=None,
|
||||||
was_guest=False,
|
was_guest=False,
|
||||||
make_guest=False,
|
make_guest=False,
|
||||||
@@ -541,6 +629,9 @@ class RegistrationHandler(BaseHandler):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
user_id (str): The desired user ID to register.
|
user_id (str): The desired user ID to register.
|
||||||
|
token (str): The desired access token to use for this user. If this
|
||||||
|
is not None, the given access token is associated with the user
|
||||||
|
id.
|
||||||
password_hash (str|None): Optional. The password hash for this user.
|
password_hash (str|None): Optional. The password hash for this user.
|
||||||
was_guest (bool): Optional. Whether this is a guest account being
|
was_guest (bool): Optional. Whether this is a guest account being
|
||||||
upgraded to a non-guest account.
|
upgraded to a non-guest account.
|
||||||
@@ -576,6 +667,7 @@ class RegistrationHandler(BaseHandler):
|
|||||||
if self.hs.config.worker_app:
|
if self.hs.config.worker_app:
|
||||||
return self._register_client(
|
return self._register_client(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
|
token=token,
|
||||||
password_hash=password_hash,
|
password_hash=password_hash,
|
||||||
was_guest=was_guest,
|
was_guest=was_guest,
|
||||||
make_guest=make_guest,
|
make_guest=make_guest,
|
||||||
@@ -586,8 +678,9 @@ class RegistrationHandler(BaseHandler):
|
|||||||
address=address,
|
address=address,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
return self.store.register_user(
|
return self.store.register(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
|
token=token,
|
||||||
password_hash=password_hash,
|
password_hash=password_hash,
|
||||||
was_guest=was_guest,
|
was_guest=was_guest,
|
||||||
make_guest=make_guest,
|
make_guest=make_guest,
|
||||||
@@ -601,8 +694,6 @@ class RegistrationHandler(BaseHandler):
|
|||||||
def register_device(self, user_id, device_id, initial_display_name, is_guest=False):
|
def register_device(self, user_id, device_id, initial_display_name, is_guest=False):
|
||||||
"""Register a device for a user and generate an access token.
|
"""Register a device for a user and generate an access token.
|
||||||
|
|
||||||
The access token will be limited by the homeserver's session_lifetime config.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
user_id (str): full canonical @user:id
|
user_id (str): full canonical @user:id
|
||||||
device_id (str|None): The device ID to check, or None to generate
|
device_id (str|None): The device ID to check, or None to generate
|
||||||
@@ -623,29 +714,20 @@ class RegistrationHandler(BaseHandler):
|
|||||||
is_guest=is_guest,
|
is_guest=is_guest,
|
||||||
)
|
)
|
||||||
defer.returnValue((r["device_id"], r["access_token"]))
|
defer.returnValue((r["device_id"], r["access_token"]))
|
||||||
|
|
||||||
valid_until_ms = None
|
|
||||||
if self.session_lifetime is not None:
|
|
||||||
if is_guest:
|
|
||||||
raise Exception(
|
|
||||||
"session_lifetime is not currently implemented for guest access"
|
|
||||||
)
|
|
||||||
valid_until_ms = self.clock.time_msec() + self.session_lifetime
|
|
||||||
|
|
||||||
device_id = yield self.device_handler.check_device_registered(
|
|
||||||
user_id, device_id, initial_display_name
|
|
||||||
)
|
|
||||||
if is_guest:
|
|
||||||
assert valid_until_ms is None
|
|
||||||
access_token = self.macaroon_gen.generate_access_token(
|
|
||||||
user_id, ["guest = true"]
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
access_token = yield self._auth_handler.get_access_token_for_user_id(
|
device_id = yield self.device_handler.check_device_registered(
|
||||||
user_id, device_id=device_id, valid_until_ms=valid_until_ms
|
user_id, device_id, initial_display_name
|
||||||
)
|
)
|
||||||
|
if is_guest:
|
||||||
|
access_token = self.macaroon_gen.generate_access_token(
|
||||||
|
user_id, ["guest = true"]
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
access_token = yield self._auth_handler.get_access_token_for_user_id(
|
||||||
|
user_id, device_id=device_id
|
||||||
|
)
|
||||||
|
|
||||||
defer.returnValue((device_id, access_token))
|
defer.returnValue((device_id, access_token))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def post_registration_actions(
|
def post_registration_actions(
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ from twisted.internet import defer
|
|||||||
import synapse.server
|
import synapse.server
|
||||||
import synapse.types
|
import synapse.types
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError
|
from synapse.api.errors import AuthError, Codes, SynapseError
|
||||||
from synapse.types import RoomID, UserID
|
from synapse.types import RoomID, UserID
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.distributor import user_joined_room, user_left_room
|
from synapse.util.distributor import user_joined_room, user_left_room
|
||||||
@@ -118,6 +118,24 @@ class RoomMemberHandler(object):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
|
||||||
|
"""Get a guest access token for a 3PID, creating a guest account if
|
||||||
|
one doesn't already exist.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requester (Requester)
|
||||||
|
medium (str)
|
||||||
|
address (str)
|
||||||
|
inviter_user_id (str): The user ID who is trying to invite the
|
||||||
|
3PID
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[(str, str)]: A 2-tuple of `(user_id, access_token)` of the
|
||||||
|
3PID guest account.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def _user_joined_room(self, target, room_id):
|
def _user_joined_room(self, target, room_id):
|
||||||
"""Notifies distributor on master process that the user has joined the
|
"""Notifies distributor on master process that the user has joined the
|
||||||
@@ -872,23 +890,24 @@ class RoomMemberHandler(object):
|
|||||||
"sender_avatar_url": inviter_avatar_url,
|
"sender_avatar_url": inviter_avatar_url,
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
if self.config.invite_3pid_guest:
|
||||||
data = yield self.simple_http_client.post_json_get_json(
|
guest_user_id, guest_access_token = yield self.get_or_register_3pid_guest(
|
||||||
is_url, invite_config
|
requester=requester,
|
||||||
)
|
medium=medium,
|
||||||
except HttpResponseException as e:
|
address=address,
|
||||||
# Some identity servers may only support application/x-www-form-urlencoded
|
inviter_user_id=inviter_user_id,
|
||||||
# types. This is especially true with old instances of Sydent, see
|
|
||||||
# https://github.com/matrix-org/sydent/pull/170
|
|
||||||
logger.info(
|
|
||||||
"Failed to POST %s with JSON, falling back to urlencoded form: %s",
|
|
||||||
is_url,
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
data = yield self.simple_http_client.post_urlencoded_get_json(
|
|
||||||
is_url, invite_config
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
invite_config.update(
|
||||||
|
{
|
||||||
|
"guest_access_token": guest_access_token,
|
||||||
|
"guest_user_id": guest_user_id,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
data = yield self.simple_http_client.post_urlencoded_get_json(
|
||||||
|
is_url, invite_config
|
||||||
|
)
|
||||||
# TODO: Check for success
|
# TODO: Check for success
|
||||||
token = data["token"]
|
token = data["token"]
|
||||||
public_keys = data.get("public_keys", [])
|
public_keys = data.get("public_keys", [])
|
||||||
@@ -991,6 +1010,12 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
|||||||
yield self.store.locally_reject_invite(target.to_string(), room_id)
|
yield self.store.locally_reject_invite(target.to_string(), room_id)
|
||||||
defer.returnValue({})
|
defer.returnValue({})
|
||||||
|
|
||||||
|
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
|
||||||
|
"""Implements RoomMemberHandler.get_or_register_3pid_guest
|
||||||
|
"""
|
||||||
|
rg = self.registration_handler
|
||||||
|
return rg.get_or_register_3pid_guest(medium, address, inviter_user_id)
|
||||||
|
|
||||||
def _user_joined_room(self, target, room_id):
|
def _user_joined_room(self, target, room_id):
|
||||||
"""Implements RoomMemberHandler._user_joined_room
|
"""Implements RoomMemberHandler._user_joined_room
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ from twisted.internet import defer
|
|||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.handlers.room_member import RoomMemberHandler
|
from synapse.handlers.room_member import RoomMemberHandler
|
||||||
from synapse.replication.http.membership import (
|
from synapse.replication.http.membership import (
|
||||||
|
ReplicationRegister3PIDGuestRestServlet as Repl3PID,
|
||||||
ReplicationRemoteJoinRestServlet as ReplRemoteJoin,
|
ReplicationRemoteJoinRestServlet as ReplRemoteJoin,
|
||||||
ReplicationRemoteRejectInviteRestServlet as ReplRejectInvite,
|
ReplicationRemoteRejectInviteRestServlet as ReplRejectInvite,
|
||||||
ReplicationUserJoinedLeftRoomRestServlet as ReplJoinedLeft,
|
ReplicationUserJoinedLeftRoomRestServlet as ReplJoinedLeft,
|
||||||
@@ -32,6 +33,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
|
|||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomMemberWorkerHandler, self).__init__(hs)
|
super(RoomMemberWorkerHandler, self).__init__(hs)
|
||||||
|
|
||||||
|
self._get_register_3pid_client = Repl3PID.make_client(hs)
|
||||||
self._remote_join_client = ReplRemoteJoin.make_client(hs)
|
self._remote_join_client = ReplRemoteJoin.make_client(hs)
|
||||||
self._remote_reject_client = ReplRejectInvite.make_client(hs)
|
self._remote_reject_client = ReplRejectInvite.make_client(hs)
|
||||||
self._notify_change_client = ReplJoinedLeft.make_client(hs)
|
self._notify_change_client = ReplJoinedLeft.make_client(hs)
|
||||||
@@ -78,3 +80,13 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
|
|||||||
return self._notify_change_client(
|
return self._notify_change_client(
|
||||||
user_id=target.to_string(), room_id=room_id, change="left"
|
user_id=target.to_string(), room_id=room_id, change="left"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
|
||||||
|
"""Implements RoomMemberHandler.get_or_register_3pid_guest
|
||||||
|
"""
|
||||||
|
return self._get_register_3pid_client(
|
||||||
|
requester=requester,
|
||||||
|
medium=medium,
|
||||||
|
address=address,
|
||||||
|
inviter_user_id=inviter_user_id,
|
||||||
|
)
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ from prometheus_client import Counter
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.logging.context import LoggingContext
|
|
||||||
from synapse.push.clientformat import format_push_rules_for_user
|
from synapse.push.clientformat import format_push_rules_for_user
|
||||||
from synapse.storage.roommember import MemberSummary
|
from synapse.storage.roommember import MemberSummary
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
@@ -34,6 +33,7 @@ from synapse.util.async_helpers import concurrently_execute
|
|||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
from synapse.util.caches.lrucache import LruCache
|
from synapse.util.caches.lrucache import LruCache
|
||||||
from synapse.util.caches.response_cache import ResponseCache
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.metrics import Measure, measure_func
|
from synapse.util.metrics import Measure, measure_func
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
|
|||||||
@@ -19,9 +19,9 @@ from collections import namedtuple
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import AuthError, SynapseError
|
from synapse.api.errors import AuthError, SynapseError
|
||||||
from synapse.logging.context import run_in_background
|
|
||||||
from synapse.types import UserID, get_domain_from_id
|
from synapse.types import UserID, get_domain_from_id
|
||||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||||
|
from synapse.util.logcontext import run_in_background
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
from synapse.util.wheel_timer import WheelTimer
|
from synapse.util.wheel_timer import WheelTimer
|
||||||
|
|
||||||
|
|||||||
@@ -45,9 +45,9 @@ from synapse.http import (
|
|||||||
cancelled_to_request_timed_out_error,
|
cancelled_to_request_timed_out_error,
|
||||||
redact_uri,
|
redact_uri,
|
||||||
)
|
)
|
||||||
from synapse.logging.context import make_deferred_yieldable
|
|
||||||
from synapse.util.async_helpers import timeout_deferred
|
from synapse.util.async_helpers import timeout_deferred
|
||||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||||
|
from synapse.util.logcontext import make_deferred_yieldable
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -30,9 +30,9 @@ from twisted.web.http_headers import Headers
|
|||||||
from twisted.web.iweb import IAgent
|
from twisted.web.iweb import IAgent
|
||||||
|
|
||||||
from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
|
from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
|
||||||
from synapse.logging.context import make_deferred_yieldable
|
|
||||||
from synapse.util import Clock
|
from synapse.util import Clock
|
||||||
from synapse.util.caches.ttlcache import TTLCache
|
from synapse.util.caches.ttlcache import TTLCache
|
||||||
|
from synapse.util.logcontext import make_deferred_yieldable
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
# period to cache .well-known results for by default
|
# period to cache .well-known results for by default
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ from twisted.internet.error import ConnectError
|
|||||||
from twisted.names import client, dns
|
from twisted.names import client, dns
|
||||||
from twisted.names.error import DNSNameError, DomainError
|
from twisted.names.error import DNSNameError, DomainError
|
||||||
|
|
||||||
from synapse.logging.context import make_deferred_yieldable
|
from synapse.util.logcontext import make_deferred_yieldable
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -36,7 +36,6 @@ from twisted.internet.task import _EPSILON, Cooperator
|
|||||||
from twisted.web._newclient import ResponseDone
|
from twisted.web._newclient import ResponseDone
|
||||||
from twisted.web.http_headers import Headers
|
from twisted.web.http_headers import Headers
|
||||||
|
|
||||||
import synapse.logging.opentracing as opentracing
|
|
||||||
import synapse.metrics
|
import synapse.metrics
|
||||||
import synapse.util.retryutils
|
import synapse.util.retryutils
|
||||||
from synapse.api.errors import (
|
from synapse.api.errors import (
|
||||||
@@ -49,8 +48,8 @@ from synapse.api.errors import (
|
|||||||
from synapse.http import QuieterFileBodyProducer
|
from synapse.http import QuieterFileBodyProducer
|
||||||
from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver
|
from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver
|
||||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||||
from synapse.logging.context import make_deferred_yieldable
|
|
||||||
from synapse.util.async_helpers import timeout_deferred
|
from synapse.util.async_helpers import timeout_deferred
|
||||||
|
from synapse.util.logcontext import make_deferred_yieldable
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -340,25 +339,9 @@ class MatrixFederationHttpClient(object):
|
|||||||
else:
|
else:
|
||||||
query_bytes = b""
|
query_bytes = b""
|
||||||
|
|
||||||
# Retreive current span
|
headers_dict = {b"User-Agent": [self.version_string_bytes]}
|
||||||
scope = opentracing.start_active_span(
|
|
||||||
"outgoing-federation-request",
|
|
||||||
tags={
|
|
||||||
opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_CLIENT,
|
|
||||||
opentracing.tags.PEER_ADDRESS: request.destination,
|
|
||||||
opentracing.tags.HTTP_METHOD: request.method,
|
|
||||||
opentracing.tags.HTTP_URL: request.path,
|
|
||||||
},
|
|
||||||
finish_on_close=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Inject the span into the headers
|
with limiter:
|
||||||
headers_dict = {}
|
|
||||||
opentracing.inject_active_span_byte_dict(headers_dict, request.destination)
|
|
||||||
|
|
||||||
headers_dict[b"User-Agent"] = [self.version_string_bytes]
|
|
||||||
|
|
||||||
with limiter, scope:
|
|
||||||
# XXX: Would be much nicer to retry only at the transaction-layer
|
# XXX: Would be much nicer to retry only at the transaction-layer
|
||||||
# (once we have reliable transactions in place)
|
# (once we have reliable transactions in place)
|
||||||
if long_retries:
|
if long_retries:
|
||||||
@@ -436,10 +419,6 @@ class MatrixFederationHttpClient(object):
|
|||||||
response.phrase.decode("ascii", errors="replace"),
|
response.phrase.decode("ascii", errors="replace"),
|
||||||
)
|
)
|
||||||
|
|
||||||
opentracing.set_tag(
|
|
||||||
opentracing.tags.HTTP_STATUS_CODE, response.code
|
|
||||||
)
|
|
||||||
|
|
||||||
if 200 <= response.code < 300:
|
if 200 <= response.code < 300:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
@@ -520,7 +499,8 @@ class MatrixFederationHttpClient(object):
|
|||||||
_flatten_response_never_received(e),
|
_flatten_response_never_received(e),
|
||||||
)
|
)
|
||||||
raise
|
raise
|
||||||
defer.returnValue(response)
|
|
||||||
|
defer.returnValue(response)
|
||||||
|
|
||||||
def build_auth_headers(
|
def build_auth_headers(
|
||||||
self, destination, method, url_bytes, content=None, destination_is=None
|
self, destination, method, url_bytes, content=None, destination_is=None
|
||||||
|
|||||||
@@ -19,8 +19,8 @@ import threading
|
|||||||
|
|
||||||
from prometheus_client.core import Counter, Histogram
|
from prometheus_client.core import Counter, Histogram
|
||||||
|
|
||||||
from synapse.logging.context import LoggingContext
|
|
||||||
from synapse.metrics import LaterGauge
|
from synapse.metrics import LaterGauge
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -39,8 +39,8 @@ from synapse.api.errors import (
|
|||||||
SynapseError,
|
SynapseError,
|
||||||
UnrecognizedRequestError,
|
UnrecognizedRequestError,
|
||||||
)
|
)
|
||||||
from synapse.logging.context import preserve_fn
|
|
||||||
from synapse.util.caches import intern_dict
|
from synapse.util.caches import intern_dict
|
||||||
|
from synapse.util.logcontext import preserve_fn
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -65,8 +65,8 @@ def wrap_json_request_handler(h):
|
|||||||
The handler method must have a signature of "handle_foo(self, request)",
|
The handler method must have a signature of "handle_foo(self, request)",
|
||||||
where "request" must be a SynapseRequest.
|
where "request" must be a SynapseRequest.
|
||||||
|
|
||||||
The handler must return a deferred or a coroutine. If the deferred succeeds
|
The handler must return a deferred. If the deferred succeeds we assume that
|
||||||
we assume that a response has been sent. If the deferred fails with a SynapseError we use
|
a response has been sent. If the deferred fails with a SynapseError we use
|
||||||
it to send a JSON response with the appropriate HTTP reponse code. If the
|
it to send a JSON response with the appropriate HTTP reponse code. If the
|
||||||
deferred fails with any other type of error we send a 500 reponse.
|
deferred fails with any other type of error we send a 500 reponse.
|
||||||
"""
|
"""
|
||||||
@@ -245,9 +245,7 @@ class JsonResource(HttpServer, resource.Resource):
|
|||||||
|
|
||||||
isLeaf = True
|
isLeaf = True
|
||||||
|
|
||||||
_PathEntry = collections.namedtuple(
|
_PathEntry = collections.namedtuple("_PathEntry", ["pattern", "callback"])
|
||||||
"_PathEntry", ["pattern", "callback", "servlet_classname"]
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self, hs, canonical_json=True):
|
def __init__(self, hs, canonical_json=True):
|
||||||
resource.Resource.__init__(self)
|
resource.Resource.__init__(self)
|
||||||
@@ -257,28 +255,12 @@ class JsonResource(HttpServer, resource.Resource):
|
|||||||
self.path_regexs = {}
|
self.path_regexs = {}
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
|
|
||||||
def register_paths(self, method, path_patterns, callback, servlet_classname):
|
def register_paths(self, method, path_patterns, callback):
|
||||||
"""
|
|
||||||
Registers a request handler against a regular expression. Later request URLs are
|
|
||||||
checked against these regular expressions in order to identify an appropriate
|
|
||||||
handler for that request.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
method (str): GET, POST etc
|
|
||||||
|
|
||||||
path_patterns (Iterable[str]): A list of regular expressions to which
|
|
||||||
the request URLs are compared.
|
|
||||||
|
|
||||||
callback (function): The handler for the request. Usually a Servlet
|
|
||||||
|
|
||||||
servlet_classname (str): The name of the handler to be used in prometheus
|
|
||||||
and opentracing logs.
|
|
||||||
"""
|
|
||||||
method = method.encode("utf-8") # method is bytes on py3
|
method = method.encode("utf-8") # method is bytes on py3
|
||||||
for path_pattern in path_patterns:
|
for path_pattern in path_patterns:
|
||||||
logger.debug("Registering for %s %s", method, path_pattern.pattern)
|
logger.debug("Registering for %s %s", method, path_pattern.pattern)
|
||||||
self.path_regexs.setdefault(method, []).append(
|
self.path_regexs.setdefault(method, []).append(
|
||||||
self._PathEntry(path_pattern, callback, servlet_classname)
|
self._PathEntry(path_pattern, callback)
|
||||||
)
|
)
|
||||||
|
|
||||||
def render(self, request):
|
def render(self, request):
|
||||||
@@ -293,9 +275,13 @@ class JsonResource(HttpServer, resource.Resource):
|
|||||||
This checks if anyone has registered a callback for that method and
|
This checks if anyone has registered a callback for that method and
|
||||||
path.
|
path.
|
||||||
"""
|
"""
|
||||||
callback, servlet_classname, group_dict = self._get_handler_for_request(request)
|
callback, group_dict = self._get_handler_for_request(request)
|
||||||
|
|
||||||
# Make sure we have a name for this handler in prometheus.
|
servlet_instance = getattr(callback, "__self__", None)
|
||||||
|
if servlet_instance is not None:
|
||||||
|
servlet_classname = servlet_instance.__class__.__name__
|
||||||
|
else:
|
||||||
|
servlet_classname = "%r" % callback
|
||||||
request.request_metrics.name = servlet_classname
|
request.request_metrics.name = servlet_classname
|
||||||
|
|
||||||
# Now trigger the callback. If it returns a response, we send it
|
# Now trigger the callback. If it returns a response, we send it
|
||||||
@@ -325,8 +311,7 @@ class JsonResource(HttpServer, resource.Resource):
|
|||||||
request (twisted.web.http.Request):
|
request (twisted.web.http.Request):
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple[Callable, str, dict[unicode, unicode]]: callback method, the
|
Tuple[Callable, dict[unicode, unicode]]: callback method, and the
|
||||||
label to use for that method in prometheus metrics, and the
|
|
||||||
dict mapping keys to path components as specified in the
|
dict mapping keys to path components as specified in the
|
||||||
handler's path match regexp.
|
handler's path match regexp.
|
||||||
|
|
||||||
@@ -335,7 +320,7 @@ class JsonResource(HttpServer, resource.Resource):
|
|||||||
None, or a tuple of (http code, response body).
|
None, or a tuple of (http code, response body).
|
||||||
"""
|
"""
|
||||||
if request.method == b"OPTIONS":
|
if request.method == b"OPTIONS":
|
||||||
return _options_handler, "options_request_handler", {}
|
return _options_handler, {}
|
||||||
|
|
||||||
# Loop through all the registered callbacks to check if the method
|
# Loop through all the registered callbacks to check if the method
|
||||||
# and path regex match
|
# and path regex match
|
||||||
@@ -343,10 +328,10 @@ class JsonResource(HttpServer, resource.Resource):
|
|||||||
m = path_entry.pattern.match(request.path.decode("ascii"))
|
m = path_entry.pattern.match(request.path.decode("ascii"))
|
||||||
if m:
|
if m:
|
||||||
# We found a match!
|
# We found a match!
|
||||||
return path_entry.callback, path_entry.servlet_classname, m.groupdict()
|
return path_entry.callback, m.groupdict()
|
||||||
|
|
||||||
# Huh. No one wanted to handle that? Fiiiiiine. Send 400.
|
# Huh. No one wanted to handle that? Fiiiiiine. Send 400.
|
||||||
return _unrecognised_request_handler, "unrecognised_request_handler", {}
|
return _unrecognised_request_handler, {}
|
||||||
|
|
||||||
def _send_response(
|
def _send_response(
|
||||||
self, request, code, response_json_object, response_code_message=None
|
self, request, code, response_json_object, response_code_message=None
|
||||||
@@ -368,22 +353,16 @@ class DirectServeResource(resource.Resource):
|
|||||||
"""
|
"""
|
||||||
Render the request, using an asynchronous render handler if it exists.
|
Render the request, using an asynchronous render handler if it exists.
|
||||||
"""
|
"""
|
||||||
async_render_callback_name = "_async_render_" + request.method.decode("ascii")
|
render_callback_name = "_async_render_" + request.method.decode("ascii")
|
||||||
|
|
||||||
# Try and get the async renderer
|
if hasattr(self, render_callback_name):
|
||||||
callback = getattr(self, async_render_callback_name, None)
|
# Call the handler
|
||||||
|
callback = getattr(self, render_callback_name)
|
||||||
|
defer.ensureDeferred(callback(request))
|
||||||
|
|
||||||
# No async renderer for this request method.
|
return NOT_DONE_YET
|
||||||
if not callback:
|
else:
|
||||||
return super().render(request)
|
super().render(request)
|
||||||
|
|
||||||
resp = callback(request)
|
|
||||||
|
|
||||||
# If it's a coroutine, turn it into a Deferred
|
|
||||||
if isinstance(resp, types.CoroutineType):
|
|
||||||
defer.ensureDeferred(resp)
|
|
||||||
|
|
||||||
return NOT_DONE_YET
|
|
||||||
|
|
||||||
|
|
||||||
def _options_handler(request):
|
def _options_handler(request):
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ import logging
|
|||||||
from canonicaljson import json
|
from canonicaljson import json
|
||||||
|
|
||||||
from synapse.api.errors import Codes, SynapseError
|
from synapse.api.errors import Codes, SynapseError
|
||||||
from synapse.logging.opentracing import trace_servlet
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -290,14 +289,8 @@ class RestServlet(object):
|
|||||||
|
|
||||||
for method in ("GET", "PUT", "POST", "OPTIONS", "DELETE"):
|
for method in ("GET", "PUT", "POST", "OPTIONS", "DELETE"):
|
||||||
if hasattr(self, "on_%s" % (method,)):
|
if hasattr(self, "on_%s" % (method,)):
|
||||||
servlet_classname = self.__class__.__name__
|
|
||||||
method_handler = getattr(self, "on_%s" % (method,))
|
method_handler = getattr(self, "on_%s" % (method,))
|
||||||
http_server.register_paths(
|
http_server.register_paths(method, patterns, method_handler)
|
||||||
method,
|
|
||||||
patterns,
|
|
||||||
trace_servlet(servlet_classname, method_handler),
|
|
||||||
servlet_classname,
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError("RestServlet must register something.")
|
raise NotImplementedError("RestServlet must register something.")
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ from twisted.web.server import Request, Site
|
|||||||
|
|
||||||
from synapse.http import redact_uri
|
from synapse.http import redact_uri
|
||||||
from synapse.http.request_metrics import RequestMetrics, requests_counter
|
from synapse.http.request_metrics import RequestMetrics, requests_counter
|
||||||
from synapse.logging.context import LoggingContext, PreserveLoggingContext
|
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -1,697 +0,0 @@
|
|||||||
# Copyright 2014-2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
""" Thread-local-alike tracking of log contexts within synapse
|
|
||||||
|
|
||||||
This module provides objects and utilities for tracking contexts through
|
|
||||||
synapse code, so that log lines can include a request identifier, and so that
|
|
||||||
CPU and database activity can be accounted for against the request that caused
|
|
||||||
them.
|
|
||||||
|
|
||||||
See doc/log_contexts.rst for details on how this works.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import threading
|
|
||||||
import types
|
|
||||||
|
|
||||||
from twisted.internet import defer, threads
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
try:
|
|
||||||
import resource
|
|
||||||
|
|
||||||
# Python doesn't ship with a definition of RUSAGE_THREAD but it's defined
|
|
||||||
# to be 1 on linux so we hard code it.
|
|
||||||
RUSAGE_THREAD = 1
|
|
||||||
|
|
||||||
# If the system doesn't support RUSAGE_THREAD then this should throw an
|
|
||||||
# exception.
|
|
||||||
resource.getrusage(RUSAGE_THREAD)
|
|
||||||
|
|
||||||
def get_thread_resource_usage():
|
|
||||||
return resource.getrusage(RUSAGE_THREAD)
|
|
||||||
|
|
||||||
|
|
||||||
except Exception:
|
|
||||||
# If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we
|
|
||||||
# won't track resource usage by returning None.
|
|
||||||
def get_thread_resource_usage():
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
# get an id for the current thread.
|
|
||||||
#
|
|
||||||
# threading.get_ident doesn't actually return an OS-level tid, and annoyingly,
|
|
||||||
# on Linux it actually returns the same value either side of a fork() call. However
|
|
||||||
# we only fork in one place, so it's not worth the hoop-jumping to get a real tid.
|
|
||||||
#
|
|
||||||
get_thread_id = threading.get_ident
|
|
||||||
|
|
||||||
|
|
||||||
class ContextResourceUsage(object):
|
|
||||||
"""Object for tracking the resources used by a log context
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
ru_utime (float): user CPU time (in seconds)
|
|
||||||
ru_stime (float): system CPU time (in seconds)
|
|
||||||
db_txn_count (int): number of database transactions done
|
|
||||||
db_sched_duration_sec (float): amount of time spent waiting for a
|
|
||||||
database connection
|
|
||||||
db_txn_duration_sec (float): amount of time spent doing database
|
|
||||||
transactions (excluding scheduling time)
|
|
||||||
evt_db_fetch_count (int): number of events requested from the database
|
|
||||||
"""
|
|
||||||
|
|
||||||
__slots__ = [
|
|
||||||
"ru_stime",
|
|
||||||
"ru_utime",
|
|
||||||
"db_txn_count",
|
|
||||||
"db_txn_duration_sec",
|
|
||||||
"db_sched_duration_sec",
|
|
||||||
"evt_db_fetch_count",
|
|
||||||
]
|
|
||||||
|
|
||||||
def __init__(self, copy_from=None):
|
|
||||||
"""Create a new ContextResourceUsage
|
|
||||||
|
|
||||||
Args:
|
|
||||||
copy_from (ContextResourceUsage|None): if not None, an object to
|
|
||||||
copy stats from
|
|
||||||
"""
|
|
||||||
if copy_from is None:
|
|
||||||
self.reset()
|
|
||||||
else:
|
|
||||||
self.ru_utime = copy_from.ru_utime
|
|
||||||
self.ru_stime = copy_from.ru_stime
|
|
||||||
self.db_txn_count = copy_from.db_txn_count
|
|
||||||
|
|
||||||
self.db_txn_duration_sec = copy_from.db_txn_duration_sec
|
|
||||||
self.db_sched_duration_sec = copy_from.db_sched_duration_sec
|
|
||||||
self.evt_db_fetch_count = copy_from.evt_db_fetch_count
|
|
||||||
|
|
||||||
def copy(self):
|
|
||||||
return ContextResourceUsage(copy_from=self)
|
|
||||||
|
|
||||||
def reset(self):
|
|
||||||
self.ru_stime = 0.0
|
|
||||||
self.ru_utime = 0.0
|
|
||||||
self.db_txn_count = 0
|
|
||||||
|
|
||||||
self.db_txn_duration_sec = 0
|
|
||||||
self.db_sched_duration_sec = 0
|
|
||||||
self.evt_db_fetch_count = 0
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return (
|
|
||||||
"<ContextResourceUsage ru_stime='%r', ru_utime='%r', "
|
|
||||||
"db_txn_count='%r', db_txn_duration_sec='%r', "
|
|
||||||
"db_sched_duration_sec='%r', evt_db_fetch_count='%r'>"
|
|
||||||
) % (
|
|
||||||
self.ru_stime,
|
|
||||||
self.ru_utime,
|
|
||||||
self.db_txn_count,
|
|
||||||
self.db_txn_duration_sec,
|
|
||||||
self.db_sched_duration_sec,
|
|
||||||
self.evt_db_fetch_count,
|
|
||||||
)
|
|
||||||
|
|
||||||
def __iadd__(self, other):
|
|
||||||
"""Add another ContextResourceUsage's stats to this one's.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
other (ContextResourceUsage): the other resource usage object
|
|
||||||
"""
|
|
||||||
self.ru_utime += other.ru_utime
|
|
||||||
self.ru_stime += other.ru_stime
|
|
||||||
self.db_txn_count += other.db_txn_count
|
|
||||||
self.db_txn_duration_sec += other.db_txn_duration_sec
|
|
||||||
self.db_sched_duration_sec += other.db_sched_duration_sec
|
|
||||||
self.evt_db_fetch_count += other.evt_db_fetch_count
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __isub__(self, other):
|
|
||||||
self.ru_utime -= other.ru_utime
|
|
||||||
self.ru_stime -= other.ru_stime
|
|
||||||
self.db_txn_count -= other.db_txn_count
|
|
||||||
self.db_txn_duration_sec -= other.db_txn_duration_sec
|
|
||||||
self.db_sched_duration_sec -= other.db_sched_duration_sec
|
|
||||||
self.evt_db_fetch_count -= other.evt_db_fetch_count
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __add__(self, other):
|
|
||||||
res = ContextResourceUsage(copy_from=self)
|
|
||||||
res += other
|
|
||||||
return res
|
|
||||||
|
|
||||||
def __sub__(self, other):
|
|
||||||
res = ContextResourceUsage(copy_from=self)
|
|
||||||
res -= other
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
class LoggingContext(object):
|
|
||||||
"""Additional context for log formatting. Contexts are scoped within a
|
|
||||||
"with" block.
|
|
||||||
|
|
||||||
If a parent is given when creating a new context, then:
|
|
||||||
- logging fields are copied from the parent to the new context on entry
|
|
||||||
- when the new context exits, the cpu usage stats are copied from the
|
|
||||||
child to the parent
|
|
||||||
|
|
||||||
Args:
|
|
||||||
name (str): Name for the context for debugging.
|
|
||||||
parent_context (LoggingContext|None): The parent of the new context
|
|
||||||
"""
|
|
||||||
|
|
||||||
__slots__ = [
|
|
||||||
"previous_context",
|
|
||||||
"name",
|
|
||||||
"parent_context",
|
|
||||||
"_resource_usage",
|
|
||||||
"usage_start",
|
|
||||||
"main_thread",
|
|
||||||
"alive",
|
|
||||||
"request",
|
|
||||||
"tag",
|
|
||||||
"scope",
|
|
||||||
]
|
|
||||||
|
|
||||||
thread_local = threading.local()
|
|
||||||
|
|
||||||
class Sentinel(object):
|
|
||||||
"""Sentinel to represent the root context"""
|
|
||||||
|
|
||||||
__slots__ = []
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return "sentinel"
|
|
||||||
|
|
||||||
def copy_to(self, record):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def add_database_transaction(self, duration_sec):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def add_database_scheduled(self, sched_sec):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def record_event_fetch(self, event_count):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def __nonzero__(self):
|
|
||||||
return False
|
|
||||||
|
|
||||||
__bool__ = __nonzero__ # python3
|
|
||||||
|
|
||||||
sentinel = Sentinel()
|
|
||||||
|
|
||||||
def __init__(self, name=None, parent_context=None, request=None):
|
|
||||||
self.previous_context = LoggingContext.current_context()
|
|
||||||
self.name = name
|
|
||||||
|
|
||||||
# track the resources used by this context so far
|
|
||||||
self._resource_usage = ContextResourceUsage()
|
|
||||||
|
|
||||||
# If alive has the thread resource usage when the logcontext last
|
|
||||||
# became active.
|
|
||||||
self.usage_start = None
|
|
||||||
|
|
||||||
self.main_thread = get_thread_id()
|
|
||||||
self.request = None
|
|
||||||
self.tag = ""
|
|
||||||
self.alive = True
|
|
||||||
self.scope = None
|
|
||||||
|
|
||||||
self.parent_context = parent_context
|
|
||||||
|
|
||||||
if self.parent_context is not None:
|
|
||||||
self.parent_context.copy_to(self)
|
|
||||||
|
|
||||||
if request is not None:
|
|
||||||
# the request param overrides the request from the parent context
|
|
||||||
self.request = request
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
if self.request:
|
|
||||||
return str(self.request)
|
|
||||||
return "%s@%x" % (self.name, id(self))
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def current_context(cls):
|
|
||||||
"""Get the current logging context from thread local storage
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
LoggingContext: the current logging context
|
|
||||||
"""
|
|
||||||
return getattr(cls.thread_local, "current_context", cls.sentinel)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def set_current_context(cls, context):
|
|
||||||
"""Set the current logging context in thread local storage
|
|
||||||
Args:
|
|
||||||
context(LoggingContext): The context to activate.
|
|
||||||
Returns:
|
|
||||||
The context that was previously active
|
|
||||||
"""
|
|
||||||
current = cls.current_context()
|
|
||||||
|
|
||||||
if current is not context:
|
|
||||||
current.stop()
|
|
||||||
cls.thread_local.current_context = context
|
|
||||||
context.start()
|
|
||||||
return current
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
"""Enters this logging context into thread local storage"""
|
|
||||||
old_context = self.set_current_context(self)
|
|
||||||
if self.previous_context != old_context:
|
|
||||||
logger.warn(
|
|
||||||
"Expected previous context %r, found %r",
|
|
||||||
self.previous_context,
|
|
||||||
old_context,
|
|
||||||
)
|
|
||||||
self.alive = True
|
|
||||||
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __exit__(self, type, value, traceback):
|
|
||||||
"""Restore the logging context in thread local storage to the state it
|
|
||||||
was before this context was entered.
|
|
||||||
Returns:
|
|
||||||
None to avoid suppressing any exceptions that were thrown.
|
|
||||||
"""
|
|
||||||
current = self.set_current_context(self.previous_context)
|
|
||||||
if current is not self:
|
|
||||||
if current is self.sentinel:
|
|
||||||
logger.warning("Expected logging context %s was lost", self)
|
|
||||||
else:
|
|
||||||
logger.warning(
|
|
||||||
"Expected logging context %s but found %s", self, current
|
|
||||||
)
|
|
||||||
self.previous_context = None
|
|
||||||
self.alive = False
|
|
||||||
|
|
||||||
# if we have a parent, pass our CPU usage stats on
|
|
||||||
if self.parent_context is not None and hasattr(
|
|
||||||
self.parent_context, "_resource_usage"
|
|
||||||
):
|
|
||||||
self.parent_context._resource_usage += self._resource_usage
|
|
||||||
|
|
||||||
# reset them in case we get entered again
|
|
||||||
self._resource_usage.reset()
|
|
||||||
|
|
||||||
def copy_to(self, record):
|
|
||||||
"""Copy logging fields from this context to a log record or
|
|
||||||
another LoggingContext
|
|
||||||
"""
|
|
||||||
|
|
||||||
# we track the current request
|
|
||||||
record.request = self.request
|
|
||||||
|
|
||||||
# we also track the current scope:
|
|
||||||
record.scope = self.scope
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
if get_thread_id() != self.main_thread:
|
|
||||||
logger.warning("Started logcontext %s on different thread", self)
|
|
||||||
return
|
|
||||||
|
|
||||||
# If we haven't already started record the thread resource usage so
|
|
||||||
# far
|
|
||||||
if not self.usage_start:
|
|
||||||
self.usage_start = get_thread_resource_usage()
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
if get_thread_id() != self.main_thread:
|
|
||||||
logger.warning("Stopped logcontext %s on different thread", self)
|
|
||||||
return
|
|
||||||
|
|
||||||
# When we stop, let's record the cpu used since we started
|
|
||||||
if not self.usage_start:
|
|
||||||
logger.warning("Called stop on logcontext %s without calling start", self)
|
|
||||||
return
|
|
||||||
|
|
||||||
utime_delta, stime_delta = self._get_cputime()
|
|
||||||
self._resource_usage.ru_utime += utime_delta
|
|
||||||
self._resource_usage.ru_stime += stime_delta
|
|
||||||
|
|
||||||
self.usage_start = None
|
|
||||||
|
|
||||||
def get_resource_usage(self):
|
|
||||||
"""Get resources used by this logcontext so far.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
ContextResourceUsage: a *copy* of the object tracking resource
|
|
||||||
usage so far
|
|
||||||
"""
|
|
||||||
# we always return a copy, for consistency
|
|
||||||
res = self._resource_usage.copy()
|
|
||||||
|
|
||||||
# If we are on the correct thread and we're currently running then we
|
|
||||||
# can include resource usage so far.
|
|
||||||
is_main_thread = get_thread_id() == self.main_thread
|
|
||||||
if self.alive and self.usage_start and is_main_thread:
|
|
||||||
utime_delta, stime_delta = self._get_cputime()
|
|
||||||
res.ru_utime += utime_delta
|
|
||||||
res.ru_stime += stime_delta
|
|
||||||
|
|
||||||
return res
|
|
||||||
|
|
||||||
def _get_cputime(self):
|
|
||||||
"""Get the cpu usage time so far
|
|
||||||
|
|
||||||
Returns: Tuple[float, float]: seconds in user mode, seconds in system mode
|
|
||||||
"""
|
|
||||||
current = get_thread_resource_usage()
|
|
||||||
|
|
||||||
utime_delta = current.ru_utime - self.usage_start.ru_utime
|
|
||||||
stime_delta = current.ru_stime - self.usage_start.ru_stime
|
|
||||||
|
|
||||||
# sanity check
|
|
||||||
if utime_delta < 0:
|
|
||||||
logger.error(
|
|
||||||
"utime went backwards! %f < %f",
|
|
||||||
current.ru_utime,
|
|
||||||
self.usage_start.ru_utime,
|
|
||||||
)
|
|
||||||
utime_delta = 0
|
|
||||||
|
|
||||||
if stime_delta < 0:
|
|
||||||
logger.error(
|
|
||||||
"stime went backwards! %f < %f",
|
|
||||||
current.ru_stime,
|
|
||||||
self.usage_start.ru_stime,
|
|
||||||
)
|
|
||||||
stime_delta = 0
|
|
||||||
|
|
||||||
return utime_delta, stime_delta
|
|
||||||
|
|
||||||
def add_database_transaction(self, duration_sec):
|
|
||||||
if duration_sec < 0:
|
|
||||||
raise ValueError("DB txn time can only be non-negative")
|
|
||||||
self._resource_usage.db_txn_count += 1
|
|
||||||
self._resource_usage.db_txn_duration_sec += duration_sec
|
|
||||||
|
|
||||||
def add_database_scheduled(self, sched_sec):
|
|
||||||
"""Record a use of the database pool
|
|
||||||
|
|
||||||
Args:
|
|
||||||
sched_sec (float): number of seconds it took us to get a
|
|
||||||
connection
|
|
||||||
"""
|
|
||||||
if sched_sec < 0:
|
|
||||||
raise ValueError("DB scheduling time can only be non-negative")
|
|
||||||
self._resource_usage.db_sched_duration_sec += sched_sec
|
|
||||||
|
|
||||||
def record_event_fetch(self, event_count):
|
|
||||||
"""Record a number of events being fetched from the db
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event_count (int): number of events being fetched
|
|
||||||
"""
|
|
||||||
self._resource_usage.evt_db_fetch_count += event_count
|
|
||||||
|
|
||||||
|
|
||||||
class LoggingContextFilter(logging.Filter):
|
|
||||||
"""Logging filter that adds values from the current logging context to each
|
|
||||||
record.
|
|
||||||
Args:
|
|
||||||
**defaults: Default values to avoid formatters complaining about
|
|
||||||
missing fields
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, **defaults):
|
|
||||||
self.defaults = defaults
|
|
||||||
|
|
||||||
def filter(self, record):
|
|
||||||
"""Add each fields from the logging contexts to the record.
|
|
||||||
Returns:
|
|
||||||
True to include the record in the log output.
|
|
||||||
"""
|
|
||||||
context = LoggingContext.current_context()
|
|
||||||
for key, value in self.defaults.items():
|
|
||||||
setattr(record, key, value)
|
|
||||||
|
|
||||||
# context should never be None, but if it somehow ends up being, then
|
|
||||||
# we end up in a death spiral of infinite loops, so let's check, for
|
|
||||||
# robustness' sake.
|
|
||||||
if context is not None:
|
|
||||||
context.copy_to(record)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
class PreserveLoggingContext(object):
|
|
||||||
"""Captures the current logging context and restores it when the scope is
|
|
||||||
exited. Used to restore the context after a function using
|
|
||||||
@defer.inlineCallbacks is resumed by a callback from the reactor."""
|
|
||||||
|
|
||||||
__slots__ = ["current_context", "new_context", "has_parent"]
|
|
||||||
|
|
||||||
def __init__(self, new_context=None):
|
|
||||||
if new_context is None:
|
|
||||||
new_context = LoggingContext.sentinel
|
|
||||||
self.new_context = new_context
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
"""Captures the current logging context"""
|
|
||||||
self.current_context = LoggingContext.set_current_context(self.new_context)
|
|
||||||
|
|
||||||
if self.current_context:
|
|
||||||
self.has_parent = self.current_context.previous_context is not None
|
|
||||||
if not self.current_context.alive:
|
|
||||||
logger.debug("Entering dead context: %s", self.current_context)
|
|
||||||
|
|
||||||
def __exit__(self, type, value, traceback):
|
|
||||||
"""Restores the current logging context"""
|
|
||||||
context = LoggingContext.set_current_context(self.current_context)
|
|
||||||
|
|
||||||
if context != self.new_context:
|
|
||||||
if context is LoggingContext.sentinel:
|
|
||||||
logger.warning("Expected logging context %s was lost", self.new_context)
|
|
||||||
else:
|
|
||||||
logger.warning(
|
|
||||||
"Expected logging context %s but found %s",
|
|
||||||
self.new_context,
|
|
||||||
context,
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.current_context is not LoggingContext.sentinel:
|
|
||||||
if not self.current_context.alive:
|
|
||||||
logger.debug("Restoring dead context: %s", self.current_context)
|
|
||||||
|
|
||||||
|
|
||||||
def nested_logging_context(suffix, parent_context=None):
|
|
||||||
"""Creates a new logging context as a child of another.
|
|
||||||
|
|
||||||
The nested logging context will have a 'request' made up of the parent context's
|
|
||||||
request, plus the given suffix.
|
|
||||||
|
|
||||||
CPU/db usage stats will be added to the parent context's on exit.
|
|
||||||
|
|
||||||
Normal usage looks like:
|
|
||||||
|
|
||||||
with nested_logging_context(suffix):
|
|
||||||
# ... do stuff
|
|
||||||
|
|
||||||
Args:
|
|
||||||
suffix (str): suffix to add to the parent context's 'request'.
|
|
||||||
parent_context (LoggingContext|None): parent context. Will use the current context
|
|
||||||
if None.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
LoggingContext: new logging context.
|
|
||||||
"""
|
|
||||||
if parent_context is None:
|
|
||||||
parent_context = LoggingContext.current_context()
|
|
||||||
return LoggingContext(
|
|
||||||
parent_context=parent_context, request=parent_context.request + "-" + suffix
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def preserve_fn(f):
|
|
||||||
"""Function decorator which wraps the function with run_in_background"""
|
|
||||||
|
|
||||||
def g(*args, **kwargs):
|
|
||||||
return run_in_background(f, *args, **kwargs)
|
|
||||||
|
|
||||||
return g
|
|
||||||
|
|
||||||
|
|
||||||
def run_in_background(f, *args, **kwargs):
|
|
||||||
"""Calls a function, ensuring that the current context is restored after
|
|
||||||
return from the function, and that the sentinel context is set once the
|
|
||||||
deferred returned by the function completes.
|
|
||||||
|
|
||||||
Useful for wrapping functions that return a deferred or coroutine, which you don't
|
|
||||||
yield or await on (for instance because you want to pass it to
|
|
||||||
deferred.gatherResults()).
|
|
||||||
|
|
||||||
Note that if you completely discard the result, you should make sure that
|
|
||||||
`f` doesn't raise any deferred exceptions, otherwise a scary-looking
|
|
||||||
CRITICAL error about an unhandled error will be logged without much
|
|
||||||
indication about where it came from.
|
|
||||||
"""
|
|
||||||
current = LoggingContext.current_context()
|
|
||||||
try:
|
|
||||||
res = f(*args, **kwargs)
|
|
||||||
except: # noqa: E722
|
|
||||||
# the assumption here is that the caller doesn't want to be disturbed
|
|
||||||
# by synchronous exceptions, so let's turn them into Failures.
|
|
||||||
return defer.fail()
|
|
||||||
|
|
||||||
if isinstance(res, types.CoroutineType):
|
|
||||||
res = defer.ensureDeferred(res)
|
|
||||||
|
|
||||||
if not isinstance(res, defer.Deferred):
|
|
||||||
return res
|
|
||||||
|
|
||||||
if res.called and not res.paused:
|
|
||||||
# The function should have maintained the logcontext, so we can
|
|
||||||
# optimise out the messing about
|
|
||||||
return res
|
|
||||||
|
|
||||||
# The function may have reset the context before returning, so
|
|
||||||
# we need to restore it now.
|
|
||||||
ctx = LoggingContext.set_current_context(current)
|
|
||||||
|
|
||||||
# The original context will be restored when the deferred
|
|
||||||
# completes, but there is nothing waiting for it, so it will
|
|
||||||
# get leaked into the reactor or some other function which
|
|
||||||
# wasn't expecting it. We therefore need to reset the context
|
|
||||||
# here.
|
|
||||||
#
|
|
||||||
# (If this feels asymmetric, consider it this way: we are
|
|
||||||
# effectively forking a new thread of execution. We are
|
|
||||||
# probably currently within a ``with LoggingContext()`` block,
|
|
||||||
# which is supposed to have a single entry and exit point. But
|
|
||||||
# by spawning off another deferred, we are effectively
|
|
||||||
# adding a new exit point.)
|
|
||||||
res.addBoth(_set_context_cb, ctx)
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
def make_deferred_yieldable(deferred):
|
|
||||||
"""Given a deferred, make it follow the Synapse logcontext rules:
|
|
||||||
|
|
||||||
If the deferred has completed (or is not actually a Deferred), essentially
|
|
||||||
does nothing (just returns another completed deferred with the
|
|
||||||
result/failure).
|
|
||||||
|
|
||||||
If the deferred has not yet completed, resets the logcontext before
|
|
||||||
returning a deferred. Then, when the deferred completes, restores the
|
|
||||||
current logcontext before running callbacks/errbacks.
|
|
||||||
|
|
||||||
(This is more-or-less the opposite operation to run_in_background.)
|
|
||||||
"""
|
|
||||||
if not isinstance(deferred, defer.Deferred):
|
|
||||||
return deferred
|
|
||||||
|
|
||||||
if deferred.called and not deferred.paused:
|
|
||||||
# it looks like this deferred is ready to run any callbacks we give it
|
|
||||||
# immediately. We may as well optimise out the logcontext faffery.
|
|
||||||
return deferred
|
|
||||||
|
|
||||||
# ok, we can't be sure that a yield won't block, so let's reset the
|
|
||||||
# logcontext, and add a callback to the deferred to restore it.
|
|
||||||
prev_context = LoggingContext.set_current_context(LoggingContext.sentinel)
|
|
||||||
deferred.addBoth(_set_context_cb, prev_context)
|
|
||||||
return deferred
|
|
||||||
|
|
||||||
|
|
||||||
def _set_context_cb(result, context):
|
|
||||||
"""A callback function which just sets the logging context"""
|
|
||||||
LoggingContext.set_current_context(context)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def defer_to_thread(reactor, f, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Calls the function `f` using a thread from the reactor's default threadpool and
|
|
||||||
returns the result as a Deferred.
|
|
||||||
|
|
||||||
Creates a new logcontext for `f`, which is created as a child of the current
|
|
||||||
logcontext (so its CPU usage metrics will get attributed to the current
|
|
||||||
logcontext). `f` should preserve the logcontext it is given.
|
|
||||||
|
|
||||||
The result deferred follows the Synapse logcontext rules: you should `yield`
|
|
||||||
on it.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
|
|
||||||
the Deferred will be invoked, and whose threadpool we should use for the
|
|
||||||
function.
|
|
||||||
|
|
||||||
Normally this will be hs.get_reactor().
|
|
||||||
|
|
||||||
f (callable): The function to call.
|
|
||||||
|
|
||||||
args: positional arguments to pass to f.
|
|
||||||
|
|
||||||
kwargs: keyword arguments to pass to f.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred: A Deferred which fires a callback with the result of `f`, or an
|
|
||||||
errback if `f` throws an exception.
|
|
||||||
"""
|
|
||||||
return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
A wrapper for twisted.internet.threads.deferToThreadpool, which handles
|
|
||||||
logcontexts correctly.
|
|
||||||
|
|
||||||
Calls the function `f` using a thread from the given threadpool and returns
|
|
||||||
the result as a Deferred.
|
|
||||||
|
|
||||||
Creates a new logcontext for `f`, which is created as a child of the current
|
|
||||||
logcontext (so its CPU usage metrics will get attributed to the current
|
|
||||||
logcontext). `f` should preserve the logcontext it is given.
|
|
||||||
|
|
||||||
The result deferred follows the Synapse logcontext rules: you should `yield`
|
|
||||||
on it.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
|
|
||||||
the Deferred will be invoked. Normally this will be hs.get_reactor().
|
|
||||||
|
|
||||||
threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for
|
|
||||||
running `f`. Normally this will be hs.get_reactor().getThreadPool().
|
|
||||||
|
|
||||||
f (callable): The function to call.
|
|
||||||
|
|
||||||
args: positional arguments to pass to f.
|
|
||||||
|
|
||||||
kwargs: keyword arguments to pass to f.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred: A Deferred which fires a callback with the result of `f`, or an
|
|
||||||
errback if `f` throws an exception.
|
|
||||||
"""
|
|
||||||
logcontext = LoggingContext.current_context()
|
|
||||||
|
|
||||||
def g():
|
|
||||||
with LoggingContext(parent_context=logcontext):
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
|
|
||||||
return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g))
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2017 New Vector Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
from six import StringIO
|
|
||||||
|
|
||||||
|
|
||||||
class LogFormatter(logging.Formatter):
|
|
||||||
"""Log formatter which gives more detail for exceptions
|
|
||||||
|
|
||||||
This is the same as the standard log formatter, except that when logging
|
|
||||||
exceptions [typically via log.foo("msg", exc_info=1)], it prints the
|
|
||||||
sequence that led up to the point at which the exception was caught.
|
|
||||||
(Normally only stack frames between the point the exception was raised and
|
|
||||||
where it was caught are logged).
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super(LogFormatter, self).__init__(*args, **kwargs)
|
|
||||||
|
|
||||||
def formatException(self, ei):
|
|
||||||
sio = StringIO()
|
|
||||||
(typ, val, tb) = ei
|
|
||||||
|
|
||||||
# log the stack above the exception capture point if possible, but
|
|
||||||
# check that we actually have an f_back attribute to work around
|
|
||||||
# https://twistedmatrix.com/trac/ticket/9305
|
|
||||||
|
|
||||||
if tb and hasattr(tb.tb_frame, "f_back"):
|
|
||||||
sio.write("Capture point (most recent call last):\n")
|
|
||||||
traceback.print_stack(tb.tb_frame.f_back, None, sio)
|
|
||||||
|
|
||||||
traceback.print_exception(typ, val, tb, None, sio)
|
|
||||||
s = sio.getvalue()
|
|
||||||
sio.close()
|
|
||||||
if s[-1:] == "\n":
|
|
||||||
s = s[:-1]
|
|
||||||
return s
|
|
||||||
@@ -1,483 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.import opentracing
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE
|
|
||||||
# This is a small wrapper around opentracing because opentracing is not currently
|
|
||||||
# packaged downstream (specifically debian). Since opentracing instrumentation is
|
|
||||||
# fairly invasive it was awkward to make it optional. As a result we opted to encapsulate
|
|
||||||
# all opentracing state in these methods which effectively noop if opentracing is
|
|
||||||
# not present. We should strongly consider encouraging the downstream distributers
|
|
||||||
# to package opentracing and making opentracing a full dependency. In order to facilitate
|
|
||||||
# this move the methods have work very similarly to opentracing's and it should only
|
|
||||||
# be a matter of few regexes to move over to opentracing's access patterns proper.
|
|
||||||
|
|
||||||
"""
|
|
||||||
============================
|
|
||||||
Using OpenTracing in Synapse
|
|
||||||
============================
|
|
||||||
|
|
||||||
Python-specific tracing concepts are at https://opentracing.io/guides/python/.
|
|
||||||
Note that Synapse wraps OpenTracing in a small module (this one) in order to make the
|
|
||||||
OpenTracing dependency optional. That means that the access patterns are
|
|
||||||
different to those demonstrated in the OpenTracing guides. However, it is
|
|
||||||
still useful to know, especially if OpenTracing is included as a full dependency
|
|
||||||
in the future or if you are modifying this module.
|
|
||||||
|
|
||||||
|
|
||||||
OpenTracing is encapsulated so that
|
|
||||||
no span objects from OpenTracing are exposed in Synapse's code. This allows
|
|
||||||
OpenTracing to be easily disabled in Synapse and thereby have OpenTracing as
|
|
||||||
an optional dependency. This does however limit the number of modifiable spans
|
|
||||||
at any point in the code to one. From here out references to `opentracing`
|
|
||||||
in the code snippets refer to the Synapses module.
|
|
||||||
|
|
||||||
Tracing
|
|
||||||
-------
|
|
||||||
|
|
||||||
In Synapse it is not possible to start a non-active span. Spans can be started
|
|
||||||
using the ``start_active_span`` method. This returns a scope (see
|
|
||||||
OpenTracing docs) which is a context manager that needs to be entered and
|
|
||||||
exited. This is usually done by using ``with``.
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
from synapse.logging.opentracing import start_active_span
|
|
||||||
|
|
||||||
with start_active_span("operation name"):
|
|
||||||
# Do something we want to tracer
|
|
||||||
|
|
||||||
Forgetting to enter or exit a scope will result in some mysterious and grievous log
|
|
||||||
context errors.
|
|
||||||
|
|
||||||
At anytime where there is an active span ``opentracing.set_tag`` can be used to
|
|
||||||
set a tag on the current active span.
|
|
||||||
|
|
||||||
Tracing functions
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
Functions can be easily traced using decorators. There is a decorator for
|
|
||||||
'normal' function and for functions which are actually deferreds. The name of
|
|
||||||
the function becomes the operation name for the span.
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
from synapse.logging.opentracing import trace, trace_deferred
|
|
||||||
|
|
||||||
# Start a span using 'normal_function' as the operation name
|
|
||||||
@trace
|
|
||||||
def normal_function(*args, **kwargs):
|
|
||||||
# Does all kinds of cool and expected things
|
|
||||||
return something_usual_and_useful
|
|
||||||
|
|
||||||
# Start a span using 'deferred_function' as the operation name
|
|
||||||
@trace_deferred
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def deferred_function(*args, **kwargs):
|
|
||||||
# We start
|
|
||||||
yield we_wait
|
|
||||||
# we finish
|
|
||||||
defer.returnValue(something_usual_and_useful)
|
|
||||||
|
|
||||||
Operation names can be explicitly set for functions by using
|
|
||||||
``trace_using_operation_name`` and
|
|
||||||
``trace_deferred_using_operation_name``
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
from synapse.logging.opentracing import (
|
|
||||||
trace_using_operation_name,
|
|
||||||
trace_deferred_using_operation_name
|
|
||||||
)
|
|
||||||
|
|
||||||
@trace_using_operation_name("A *much* better operation name")
|
|
||||||
def normal_function(*args, **kwargs):
|
|
||||||
# Does all kinds of cool and expected things
|
|
||||||
return something_usual_and_useful
|
|
||||||
|
|
||||||
@trace_deferred_using_operation_name("Another exciting operation name!")
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def deferred_function(*args, **kwargs):
|
|
||||||
# We start
|
|
||||||
yield we_wait
|
|
||||||
# we finish
|
|
||||||
defer.returnValue(something_usual_and_useful)
|
|
||||||
|
|
||||||
Contexts and carriers
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
There are a selection of wrappers for injecting and extracting contexts from
|
|
||||||
carriers provided. Unfortunately OpenTracing's three context injection
|
|
||||||
techniques are not adequate for our inject of OpenTracing span-contexts into
|
|
||||||
Twisted's http headers, EDU contents and our database tables. Also note that
|
|
||||||
the binary encoding format mandated by OpenTracing is not actually implemented
|
|
||||||
by jaeger_client v4.0.0 - it will silently noop.
|
|
||||||
Please refer to the end of ``logging/opentracing.py`` for the available
|
|
||||||
injection and extraction methods.
|
|
||||||
|
|
||||||
Homeserver whitelisting
|
|
||||||
-----------------------
|
|
||||||
|
|
||||||
Most of the whitelist checks are encapsulated in the modules's injection
|
|
||||||
and extraction method but be aware that using custom carriers or crossing
|
|
||||||
unchartered waters will require the enforcement of the whitelist.
|
|
||||||
``logging/opentracing.py`` has a ``whitelisted_homeserver`` method which takes
|
|
||||||
in a destination and compares it to the whitelist.
|
|
||||||
|
|
||||||
=======
|
|
||||||
Gotchas
|
|
||||||
=======
|
|
||||||
|
|
||||||
- Checking whitelists on span propagation
|
|
||||||
- Inserting pii
|
|
||||||
- Forgetting to enter or exit a scope
|
|
||||||
- Span source: make sure that the span you expect to be active across a
|
|
||||||
function call really will be that one. Does the current function have more
|
|
||||||
than one caller? Will all of those calling functions have be in a context
|
|
||||||
with an active span?
|
|
||||||
"""
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
from functools import wraps
|
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from synapse.config import ConfigError
|
|
||||||
|
|
||||||
try:
|
|
||||||
import opentracing
|
|
||||||
except ImportError:
|
|
||||||
opentracing = None
|
|
||||||
try:
|
|
||||||
from jaeger_client import Config as JaegerConfig
|
|
||||||
from synapse.logging.scopecontextmanager import LogContextScopeManager
|
|
||||||
except ImportError:
|
|
||||||
JaegerConfig = None
|
|
||||||
LogContextScopeManager = None
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class _DumTagNames(object):
|
|
||||||
"""wrapper of opentracings tags. We need to have them if we
|
|
||||||
want to reference them without opentracing around. Clearly they
|
|
||||||
should never actually show up in a trace. `set_tags` overwrites
|
|
||||||
these with the correct ones."""
|
|
||||||
|
|
||||||
INVALID_TAG = "invalid-tag"
|
|
||||||
COMPONENT = INVALID_TAG
|
|
||||||
DATABASE_INSTANCE = INVALID_TAG
|
|
||||||
DATABASE_STATEMENT = INVALID_TAG
|
|
||||||
DATABASE_TYPE = INVALID_TAG
|
|
||||||
DATABASE_USER = INVALID_TAG
|
|
||||||
ERROR = INVALID_TAG
|
|
||||||
HTTP_METHOD = INVALID_TAG
|
|
||||||
HTTP_STATUS_CODE = INVALID_TAG
|
|
||||||
HTTP_URL = INVALID_TAG
|
|
||||||
MESSAGE_BUS_DESTINATION = INVALID_TAG
|
|
||||||
PEER_ADDRESS = INVALID_TAG
|
|
||||||
PEER_HOSTNAME = INVALID_TAG
|
|
||||||
PEER_HOST_IPV4 = INVALID_TAG
|
|
||||||
PEER_HOST_IPV6 = INVALID_TAG
|
|
||||||
PEER_PORT = INVALID_TAG
|
|
||||||
PEER_SERVICE = INVALID_TAG
|
|
||||||
SAMPLING_PRIORITY = INVALID_TAG
|
|
||||||
SERVICE = INVALID_TAG
|
|
||||||
SPAN_KIND = INVALID_TAG
|
|
||||||
SPAN_KIND_CONSUMER = INVALID_TAG
|
|
||||||
SPAN_KIND_PRODUCER = INVALID_TAG
|
|
||||||
SPAN_KIND_RPC_CLIENT = INVALID_TAG
|
|
||||||
SPAN_KIND_RPC_SERVER = INVALID_TAG
|
|
||||||
|
|
||||||
|
|
||||||
def only_if_tracing(func):
|
|
||||||
"""Executes the function only if we're tracing. Otherwise return.
|
|
||||||
Assumes the function wrapped may return None"""
|
|
||||||
|
|
||||||
@wraps(func)
|
|
||||||
def _only_if_tracing_inner(*args, **kwargs):
|
|
||||||
if opentracing:
|
|
||||||
return func(*args, **kwargs)
|
|
||||||
else:
|
|
||||||
return
|
|
||||||
|
|
||||||
return _only_if_tracing_inner
|
|
||||||
|
|
||||||
|
|
||||||
# A regex which matches the server_names to expose traces for.
|
|
||||||
# None means 'block everything'.
|
|
||||||
_homeserver_whitelist = None
|
|
||||||
|
|
||||||
tags = _DumTagNames
|
|
||||||
|
|
||||||
|
|
||||||
def init_tracer(config):
|
|
||||||
"""Set the whitelists and initialise the JaegerClient tracer
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config (HomeserverConfig): The config used by the homeserver
|
|
||||||
"""
|
|
||||||
global opentracing
|
|
||||||
if not config.opentracer_enabled:
|
|
||||||
# We don't have a tracer
|
|
||||||
opentracing = None
|
|
||||||
return
|
|
||||||
|
|
||||||
if not opentracing or not JaegerConfig:
|
|
||||||
raise ConfigError(
|
|
||||||
"The server has been configured to use opentracing but opentracing is not "
|
|
||||||
"installed."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Include the worker name
|
|
||||||
name = config.worker_name if config.worker_name else "master"
|
|
||||||
|
|
||||||
set_homeserver_whitelist(config.opentracer_whitelist)
|
|
||||||
jaeger_config = JaegerConfig(
|
|
||||||
config={"sampler": {"type": "const", "param": 1}, "logging": True},
|
|
||||||
service_name="{} {}".format(config.server_name, name),
|
|
||||||
scope_manager=LogContextScopeManager(config),
|
|
||||||
)
|
|
||||||
jaeger_config.initialize_tracer()
|
|
||||||
|
|
||||||
# Set up tags to be opentracing's tags
|
|
||||||
global tags
|
|
||||||
tags = opentracing.tags
|
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def _noop_context_manager(*args, **kwargs):
|
|
||||||
"""Does absolutely nothing really well. Can be entered and exited arbitrarily.
|
|
||||||
Good substitute for an opentracing scope."""
|
|
||||||
yield
|
|
||||||
|
|
||||||
|
|
||||||
# Could use kwargs but I want these to be explicit
|
|
||||||
def start_active_span(
|
|
||||||
operation_name,
|
|
||||||
child_of=None,
|
|
||||||
references=None,
|
|
||||||
tags=None,
|
|
||||||
start_time=None,
|
|
||||||
ignore_active_span=False,
|
|
||||||
finish_on_close=True,
|
|
||||||
):
|
|
||||||
"""Starts an active opentracing span. Note, the scope doesn't become active
|
|
||||||
until it has been entered, however, the span starts from the time this
|
|
||||||
message is called.
|
|
||||||
Args:
|
|
||||||
See opentracing.tracer
|
|
||||||
Returns:
|
|
||||||
scope (Scope) or noop_context_manager
|
|
||||||
"""
|
|
||||||
if opentracing is None:
|
|
||||||
return _noop_context_manager()
|
|
||||||
else:
|
|
||||||
# We need to enter the scope here for the logcontext to become active
|
|
||||||
return opentracing.tracer.start_active_span(
|
|
||||||
operation_name,
|
|
||||||
child_of=child_of,
|
|
||||||
references=references,
|
|
||||||
tags=tags,
|
|
||||||
start_time=start_time,
|
|
||||||
ignore_active_span=ignore_active_span,
|
|
||||||
finish_on_close=finish_on_close,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@only_if_tracing
|
|
||||||
def close_active_span():
|
|
||||||
"""Closes the active span. This will close it's logcontext if the context
|
|
||||||
was made for the span"""
|
|
||||||
opentracing.tracer.scope_manager.active.__exit__(None, None, None)
|
|
||||||
|
|
||||||
|
|
||||||
@only_if_tracing
|
|
||||||
def set_tag(key, value):
|
|
||||||
"""Set's a tag on the active span"""
|
|
||||||
opentracing.tracer.active_span.set_tag(key, value)
|
|
||||||
|
|
||||||
|
|
||||||
@only_if_tracing
|
|
||||||
def log_kv(key_values, timestamp=None):
|
|
||||||
"""Log to the active span"""
|
|
||||||
opentracing.tracer.active_span.log_kv(key_values, timestamp)
|
|
||||||
|
|
||||||
|
|
||||||
# Note: we don't have a get baggage items because we're trying to hide all
|
|
||||||
# scope and span state from synapse. I think this method may also be useless
|
|
||||||
# as a result
|
|
||||||
@only_if_tracing
|
|
||||||
def set_baggage_item(key, value):
|
|
||||||
"""Attach baggage to the active span"""
|
|
||||||
opentracing.tracer.active_span.set_baggage_item(key, value)
|
|
||||||
|
|
||||||
|
|
||||||
@only_if_tracing
|
|
||||||
def set_operation_name(operation_name):
|
|
||||||
"""Sets the operation name of the active span"""
|
|
||||||
opentracing.tracer.active_span.set_operation_name(operation_name)
|
|
||||||
|
|
||||||
|
|
||||||
@only_if_tracing
|
|
||||||
def set_homeserver_whitelist(homeserver_whitelist):
|
|
||||||
"""Sets the whitelist
|
|
||||||
|
|
||||||
Args:
|
|
||||||
homeserver_whitelist (iterable of strings): regex of whitelisted homeservers
|
|
||||||
"""
|
|
||||||
global _homeserver_whitelist
|
|
||||||
if homeserver_whitelist:
|
|
||||||
# Makes a single regex which accepts all passed in regexes in the list
|
|
||||||
_homeserver_whitelist = re.compile(
|
|
||||||
"({})".format(")|(".join(homeserver_whitelist))
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@only_if_tracing
|
|
||||||
def whitelisted_homeserver(destination):
|
|
||||||
"""Checks if a destination matches the whitelist
|
|
||||||
Args:
|
|
||||||
destination (String)"""
|
|
||||||
if _homeserver_whitelist:
|
|
||||||
return _homeserver_whitelist.match(destination)
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def start_active_span_from_context(
|
|
||||||
headers,
|
|
||||||
operation_name,
|
|
||||||
references=None,
|
|
||||||
tags=None,
|
|
||||||
start_time=None,
|
|
||||||
ignore_active_span=False,
|
|
||||||
finish_on_close=True,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Extracts a span context from Twisted Headers.
|
|
||||||
args:
|
|
||||||
headers (twisted.web.http_headers.Headers)
|
|
||||||
returns:
|
|
||||||
span_context (opentracing.span.SpanContext)
|
|
||||||
"""
|
|
||||||
# Twisted encodes the values as lists whereas opentracing doesn't.
|
|
||||||
# So, we take the first item in the list.
|
|
||||||
# Also, twisted uses byte arrays while opentracing expects strings.
|
|
||||||
if opentracing is None:
|
|
||||||
return _noop_context_manager()
|
|
||||||
|
|
||||||
header_dict = {k.decode(): v[0].decode() for k, v in headers.getAllRawHeaders()}
|
|
||||||
context = opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, header_dict)
|
|
||||||
|
|
||||||
return opentracing.tracer.start_active_span(
|
|
||||||
operation_name,
|
|
||||||
child_of=context,
|
|
||||||
references=references,
|
|
||||||
tags=tags,
|
|
||||||
start_time=start_time,
|
|
||||||
ignore_active_span=ignore_active_span,
|
|
||||||
finish_on_close=finish_on_close,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@only_if_tracing
|
|
||||||
def inject_active_span_twisted_headers(headers, destination):
|
|
||||||
"""
|
|
||||||
Injects a span context into twisted headers inplace
|
|
||||||
|
|
||||||
Args:
|
|
||||||
headers (twisted.web.http_headers.Headers)
|
|
||||||
span (opentracing.Span)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Inplace modification of headers
|
|
||||||
|
|
||||||
Note:
|
|
||||||
The headers set by the tracer are custom to the tracer implementation which
|
|
||||||
should be unique enough that they don't interfere with any headers set by
|
|
||||||
synapse or twisted. If we're still using jaeger these headers would be those
|
|
||||||
here:
|
|
||||||
https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py
|
|
||||||
"""
|
|
||||||
|
|
||||||
if not whitelisted_homeserver(destination):
|
|
||||||
return
|
|
||||||
|
|
||||||
span = opentracing.tracer.active_span
|
|
||||||
carrier = {}
|
|
||||||
opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier)
|
|
||||||
|
|
||||||
for key, value in carrier.items():
|
|
||||||
headers.addRawHeaders(key, value)
|
|
||||||
|
|
||||||
|
|
||||||
@only_if_tracing
|
|
||||||
def inject_active_span_byte_dict(headers, destination):
|
|
||||||
"""
|
|
||||||
Injects a span context into a dict where the headers are encoded as byte
|
|
||||||
strings
|
|
||||||
|
|
||||||
Args:
|
|
||||||
headers (dict)
|
|
||||||
span (opentracing.Span)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Inplace modification of headers
|
|
||||||
|
|
||||||
Note:
|
|
||||||
The headers set by the tracer are custom to the tracer implementation which
|
|
||||||
should be unique enough that they don't interfere with any headers set by
|
|
||||||
synapse or twisted. If we're still using jaeger these headers would be those
|
|
||||||
here:
|
|
||||||
https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py
|
|
||||||
"""
|
|
||||||
if not whitelisted_homeserver(destination):
|
|
||||||
return
|
|
||||||
|
|
||||||
span = opentracing.tracer.active_span
|
|
||||||
|
|
||||||
carrier = {}
|
|
||||||
opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier)
|
|
||||||
|
|
||||||
for key, value in carrier.items():
|
|
||||||
headers[key.encode()] = [value.encode()]
|
|
||||||
|
|
||||||
|
|
||||||
def trace_servlet(servlet_name, func):
|
|
||||||
"""Decorator which traces a serlet. It starts a span with some servlet specific
|
|
||||||
tags such as the servlet_name and request information"""
|
|
||||||
|
|
||||||
@wraps(func)
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _trace_servlet_inner(request, *args, **kwargs):
|
|
||||||
with start_active_span_from_context(
|
|
||||||
request.requestHeaders,
|
|
||||||
"incoming-client-request",
|
|
||||||
tags={
|
|
||||||
"request_id": request.get_request_id(),
|
|
||||||
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
|
|
||||||
tags.HTTP_METHOD: request.get_method(),
|
|
||||||
tags.HTTP_URL: request.get_redacted_uri(),
|
|
||||||
tags.PEER_HOST_IPV6: request.getClientIP(),
|
|
||||||
"servlet_name": servlet_name,
|
|
||||||
},
|
|
||||||
):
|
|
||||||
result = yield defer.maybeDeferred(func, request, *args, **kwargs)
|
|
||||||
defer.returnValue(result)
|
|
||||||
|
|
||||||
return _trace_servlet_inner
|
|
||||||
@@ -1,138 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.import logging
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from opentracing import Scope, ScopeManager
|
|
||||||
|
|
||||||
import twisted
|
|
||||||
|
|
||||||
from synapse.logging.context import LoggingContext, nested_logging_context
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class LogContextScopeManager(ScopeManager):
|
|
||||||
"""
|
|
||||||
The LogContextScopeManager tracks the active scope in opentracing
|
|
||||||
by using the log contexts which are native to synapse. This is so
|
|
||||||
that the basic opentracing api can be used across twisted defereds.
|
|
||||||
(I would love to break logcontexts and this into an OS package. but
|
|
||||||
let's wait for twisted's contexts to be released.)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, config):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@property
|
|
||||||
def active(self):
|
|
||||||
"""
|
|
||||||
Returns the currently active Scope which can be used to access the
|
|
||||||
currently active Scope.span.
|
|
||||||
If there is a non-null Scope, its wrapped Span
|
|
||||||
becomes an implicit parent of any newly-created Span at
|
|
||||||
Tracer.start_active_span() time.
|
|
||||||
|
|
||||||
Return:
|
|
||||||
(Scope) : the Scope that is active, or None if not
|
|
||||||
available.
|
|
||||||
"""
|
|
||||||
ctx = LoggingContext.current_context()
|
|
||||||
if ctx is LoggingContext.sentinel:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return ctx.scope
|
|
||||||
|
|
||||||
def activate(self, span, finish_on_close):
|
|
||||||
"""
|
|
||||||
Makes a Span active.
|
|
||||||
Args
|
|
||||||
span (Span): the span that should become active.
|
|
||||||
finish_on_close (Boolean): whether Span should be automatically
|
|
||||||
finished when Scope.close() is called.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Scope to control the end of the active period for
|
|
||||||
*span*. It is a programming error to neglect to call
|
|
||||||
Scope.close() on the returned instance.
|
|
||||||
"""
|
|
||||||
|
|
||||||
enter_logcontext = False
|
|
||||||
ctx = LoggingContext.current_context()
|
|
||||||
|
|
||||||
if ctx is LoggingContext.sentinel:
|
|
||||||
# We don't want this scope to affect.
|
|
||||||
logger.error("Tried to activate scope outside of loggingcontext")
|
|
||||||
return Scope(None, span)
|
|
||||||
elif ctx.scope is not None:
|
|
||||||
# We want the logging scope to look exactly the same so we give it
|
|
||||||
# a blank suffix
|
|
||||||
ctx = nested_logging_context("")
|
|
||||||
enter_logcontext = True
|
|
||||||
|
|
||||||
scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close)
|
|
||||||
ctx.scope = scope
|
|
||||||
return scope
|
|
||||||
|
|
||||||
|
|
||||||
class _LogContextScope(Scope):
|
|
||||||
"""
|
|
||||||
A custom opentracing scope. The only significant difference is that it will
|
|
||||||
close the log context it's related to if the logcontext was created specifically
|
|
||||||
for this scope.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, manager, span, logcontext, enter_logcontext, finish_on_close):
|
|
||||||
"""
|
|
||||||
Args:
|
|
||||||
manager (LogContextScopeManager):
|
|
||||||
the manager that is responsible for this scope.
|
|
||||||
span (Span):
|
|
||||||
the opentracing span which this scope represents the local
|
|
||||||
lifetime for.
|
|
||||||
logcontext (LogContext):
|
|
||||||
the logcontext to which this scope is attached.
|
|
||||||
enter_logcontext (Boolean):
|
|
||||||
if True the logcontext will be entered and exited when the scope
|
|
||||||
is entered and exited respectively
|
|
||||||
finish_on_close (Boolean):
|
|
||||||
if True finish the span when the scope is closed
|
|
||||||
"""
|
|
||||||
super(_LogContextScope, self).__init__(manager, span)
|
|
||||||
self.logcontext = logcontext
|
|
||||||
self._finish_on_close = finish_on_close
|
|
||||||
self._enter_logcontext = enter_logcontext
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
if self._enter_logcontext:
|
|
||||||
self.logcontext.__enter__()
|
|
||||||
|
|
||||||
def __exit__(self, type, value, traceback):
|
|
||||||
if type == twisted.internet.defer._DefGen_Return:
|
|
||||||
super(_LogContextScope, self).__exit__(None, None, None)
|
|
||||||
else:
|
|
||||||
super(_LogContextScope, self).__exit__(type, value, traceback)
|
|
||||||
if self._enter_logcontext:
|
|
||||||
self.logcontext.__exit__(type, value, traceback)
|
|
||||||
else: # the logcontext existed before the creation of the scope
|
|
||||||
self.logcontext.scope = None
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
if self.manager.active is not self:
|
|
||||||
logger.error("Tried to close a none active scope!")
|
|
||||||
return
|
|
||||||
|
|
||||||
if self._finish_on_close:
|
|
||||||
self.span.finish()
|
|
||||||
@@ -29,16 +29,8 @@ from prometheus_client.core import REGISTRY, GaugeMetricFamily, HistogramMetricF
|
|||||||
|
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
|
|
||||||
from synapse.metrics._exposition import (
|
|
||||||
MetricsResource,
|
|
||||||
generate_latest,
|
|
||||||
start_http_server,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
METRICS_PREFIX = "/_synapse/metrics"
|
|
||||||
|
|
||||||
running_on_pypy = platform.python_implementation() == "PyPy"
|
running_on_pypy = platform.python_implementation() == "PyPy"
|
||||||
all_metrics = []
|
all_metrics = []
|
||||||
all_collectors = []
|
all_collectors = []
|
||||||
@@ -478,12 +470,3 @@ try:
|
|||||||
gc.disable()
|
gc.disable()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
__all__ = [
|
|
||||||
"MetricsResource",
|
|
||||||
"generate_latest",
|
|
||||||
"start_http_server",
|
|
||||||
"LaterGauge",
|
|
||||||
"InFlightGauge",
|
|
||||||
"BucketCollector",
|
|
||||||
]
|
|
||||||
|
|||||||
@@ -1,258 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2015-2019 Prometheus Python Client Developers
|
|
||||||
# Copyright 2019 Matrix.org Foundation C.I.C.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
This code is based off `prometheus_client/exposition.py` from version 0.7.1.
|
|
||||||
|
|
||||||
Due to the renaming of metrics in prometheus_client 0.4.0, this customised
|
|
||||||
vendoring of the code will emit both the old versions that Synapse dashboards
|
|
||||||
expect, and the newer "best practice" version of the up-to-date official client.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import math
|
|
||||||
import threading
|
|
||||||
from collections import namedtuple
|
|
||||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
|
||||||
from socketserver import ThreadingMixIn
|
|
||||||
from urllib.parse import parse_qs, urlparse
|
|
||||||
|
|
||||||
from prometheus_client import REGISTRY
|
|
||||||
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
try:
|
|
||||||
from prometheus_client.samples import Sample
|
|
||||||
except ImportError:
|
|
||||||
Sample = namedtuple("Sample", ["name", "labels", "value", "timestamp", "exemplar"])
|
|
||||||
|
|
||||||
|
|
||||||
CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8")
|
|
||||||
|
|
||||||
|
|
||||||
INF = float("inf")
|
|
||||||
MINUS_INF = float("-inf")
|
|
||||||
|
|
||||||
|
|
||||||
def floatToGoString(d):
|
|
||||||
d = float(d)
|
|
||||||
if d == INF:
|
|
||||||
return "+Inf"
|
|
||||||
elif d == MINUS_INF:
|
|
||||||
return "-Inf"
|
|
||||||
elif math.isnan(d):
|
|
||||||
return "NaN"
|
|
||||||
else:
|
|
||||||
s = repr(d)
|
|
||||||
dot = s.find(".")
|
|
||||||
# Go switches to exponents sooner than Python.
|
|
||||||
# We only need to care about positive values for le/quantile.
|
|
||||||
if d > 0 and dot > 6:
|
|
||||||
mantissa = "{0}.{1}{2}".format(s[0], s[1:dot], s[dot + 1 :]).rstrip("0.")
|
|
||||||
return "{0}e+0{1}".format(mantissa, dot - 1)
|
|
||||||
return s
|
|
||||||
|
|
||||||
|
|
||||||
def sample_line(line, name):
|
|
||||||
if line.labels:
|
|
||||||
labelstr = "{{{0}}}".format(
|
|
||||||
",".join(
|
|
||||||
[
|
|
||||||
'{0}="{1}"'.format(
|
|
||||||
k,
|
|
||||||
v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
|
|
||||||
)
|
|
||||||
for k, v in sorted(line.labels.items())
|
|
||||||
]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
labelstr = ""
|
|
||||||
timestamp = ""
|
|
||||||
if line.timestamp is not None:
|
|
||||||
# Convert to milliseconds.
|
|
||||||
timestamp = " {0:d}".format(int(float(line.timestamp) * 1000))
|
|
||||||
return "{0}{1} {2}{3}\n".format(
|
|
||||||
name, labelstr, floatToGoString(line.value), timestamp
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def nameify_sample(sample):
|
|
||||||
"""
|
|
||||||
If we get a prometheus_client<0.4.0 sample as a tuple, transform it into a
|
|
||||||
namedtuple which has the names we expect.
|
|
||||||
"""
|
|
||||||
if not isinstance(sample, Sample):
|
|
||||||
sample = Sample(*sample, None, None)
|
|
||||||
|
|
||||||
return sample
|
|
||||||
|
|
||||||
|
|
||||||
def generate_latest(registry, emit_help=False):
|
|
||||||
output = []
|
|
||||||
|
|
||||||
for metric in registry.collect():
|
|
||||||
|
|
||||||
if metric.name.startswith("__unused"):
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not metric.samples:
|
|
||||||
# No samples, don't bother.
|
|
||||||
continue
|
|
||||||
|
|
||||||
mname = metric.name
|
|
||||||
mnewname = metric.name
|
|
||||||
mtype = metric.type
|
|
||||||
|
|
||||||
# OpenMetrics -> Prometheus
|
|
||||||
if mtype == "counter":
|
|
||||||
mnewname = mnewname + "_total"
|
|
||||||
elif mtype == "info":
|
|
||||||
mtype = "gauge"
|
|
||||||
mnewname = mnewname + "_info"
|
|
||||||
elif mtype == "stateset":
|
|
||||||
mtype = "gauge"
|
|
||||||
elif mtype == "gaugehistogram":
|
|
||||||
mtype = "histogram"
|
|
||||||
elif mtype == "unknown":
|
|
||||||
mtype = "untyped"
|
|
||||||
|
|
||||||
# Output in the old format for compatibility.
|
|
||||||
if emit_help:
|
|
||||||
output.append(
|
|
||||||
"# HELP {0} {1}\n".format(
|
|
||||||
mname,
|
|
||||||
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
output.append("# TYPE {0} {1}\n".format(mname, mtype))
|
|
||||||
for sample in map(nameify_sample, metric.samples):
|
|
||||||
# Get rid of the OpenMetrics specific samples
|
|
||||||
for suffix in ["_created", "_gsum", "_gcount"]:
|
|
||||||
if sample.name.endswith(suffix):
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
newname = sample.name.replace(mnewname, mname)
|
|
||||||
if ":" in newname and newname.endswith("_total"):
|
|
||||||
newname = newname[: -len("_total")]
|
|
||||||
output.append(sample_line(sample, newname))
|
|
||||||
|
|
||||||
# Get rid of the weird colon things while we're at it
|
|
||||||
if mtype == "counter":
|
|
||||||
mnewname = mnewname.replace(":total", "")
|
|
||||||
mnewname = mnewname.replace(":", "_")
|
|
||||||
|
|
||||||
if mname == mnewname:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Also output in the new format, if it's different.
|
|
||||||
if emit_help:
|
|
||||||
output.append(
|
|
||||||
"# HELP {0} {1}\n".format(
|
|
||||||
mnewname,
|
|
||||||
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
output.append("# TYPE {0} {1}\n".format(mnewname, mtype))
|
|
||||||
for sample in map(nameify_sample, metric.samples):
|
|
||||||
# Get rid of the OpenMetrics specific samples
|
|
||||||
for suffix in ["_created", "_gsum", "_gcount"]:
|
|
||||||
if sample.name.endswith(suffix):
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
output.append(
|
|
||||||
sample_line(
|
|
||||||
sample, sample.name.replace(":total", "").replace(":", "_")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return "".join(output).encode("utf-8")
|
|
||||||
|
|
||||||
|
|
||||||
class MetricsHandler(BaseHTTPRequestHandler):
|
|
||||||
"""HTTP handler that gives metrics from ``REGISTRY``."""
|
|
||||||
|
|
||||||
registry = REGISTRY
|
|
||||||
|
|
||||||
def do_GET(self):
|
|
||||||
registry = self.registry
|
|
||||||
params = parse_qs(urlparse(self.path).query)
|
|
||||||
|
|
||||||
if "help" in params:
|
|
||||||
emit_help = True
|
|
||||||
else:
|
|
||||||
emit_help = False
|
|
||||||
|
|
||||||
try:
|
|
||||||
output = generate_latest(registry, emit_help=emit_help)
|
|
||||||
except Exception:
|
|
||||||
self.send_error(500, "error generating metric output")
|
|
||||||
raise
|
|
||||||
self.send_response(200)
|
|
||||||
self.send_header("Content-Type", CONTENT_TYPE_LATEST)
|
|
||||||
self.end_headers()
|
|
||||||
self.wfile.write(output)
|
|
||||||
|
|
||||||
def log_message(self, format, *args):
|
|
||||||
"""Log nothing."""
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def factory(cls, registry):
|
|
||||||
"""Returns a dynamic MetricsHandler class tied
|
|
||||||
to the passed registry.
|
|
||||||
"""
|
|
||||||
# This implementation relies on MetricsHandler.registry
|
|
||||||
# (defined above and defaulted to REGISTRY).
|
|
||||||
|
|
||||||
# As we have unicode_literals, we need to create a str()
|
|
||||||
# object for type().
|
|
||||||
cls_name = str(cls.__name__)
|
|
||||||
MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry})
|
|
||||||
return MyMetricsHandler
|
|
||||||
|
|
||||||
|
|
||||||
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
|
|
||||||
"""Thread per request HTTP server."""
|
|
||||||
|
|
||||||
# Make worker threads "fire and forget". Beginning with Python 3.7 this
|
|
||||||
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
|
|
||||||
# non-daemon threads in a list in order to join on them at server close.
|
|
||||||
# Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
|
|
||||||
# same as Python 3.7's ``ThreadingHTTPServer``.
|
|
||||||
daemon_threads = True
|
|
||||||
|
|
||||||
|
|
||||||
def start_http_server(port, addr="", registry=REGISTRY):
|
|
||||||
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
|
|
||||||
CustomMetricsHandler = MetricsHandler.factory(registry)
|
|
||||||
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
|
|
||||||
t = threading.Thread(target=httpd.serve_forever)
|
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
|
|
||||||
|
|
||||||
class MetricsResource(Resource):
|
|
||||||
"""
|
|
||||||
Twisted ``Resource`` that serves prometheus metrics.
|
|
||||||
"""
|
|
||||||
|
|
||||||
isLeaf = True
|
|
||||||
|
|
||||||
def __init__(self, registry=REGISTRY):
|
|
||||||
self.registry = registry
|
|
||||||
|
|
||||||
def render_GET(self, request):
|
|
||||||
request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
|
|
||||||
return generate_latest(self.registry)
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user