Compare commits

..

1 Commits

Author SHA1 Message Date
Erik Johnston
85f28744e8 Fix /keys/changes TypeError
We also use the new cache of users who share rooms with cache.
2017-02-02 13:07:52 +00:00
1696 changed files with 63175 additions and 318525 deletions

View File

@@ -1,4 +0,0 @@
---
title: CI run against latest deps is failing
---
See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}

View File

@@ -1,19 +0,0 @@
# Configuration file used for testing the 'synapse_port_db' script.
# Tells the script to connect to the postgresql database that will be available in the
# CI's Docker setup at the point where this file is considered.
server_name: "localhost:8800"
signing_key_path: ".ci/test.signing.key"
report_stats: false
database:
name: "psycopg2"
args:
user: postgres
host: localhost
password: postgres
database: synapse
# Suppress the key server warning.
trusted_key_servers: []

View File

@@ -1,31 +0,0 @@
#!/usr/bin/env python
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import psycopg2
# a very simple replacment for `psql`, to make up for the lack of the postgres client
# libraries in the synapse docker image.
# We use "postgres" as a database because it's bound to exist and the "synapse" one
# doesn't exist yet.
db_conn = psycopg2.connect(
user="postgres", host="localhost", password="postgres", dbname="postgres"
)
db_conn.autocommit = True
cur = db_conn.cursor()
for c in sys.argv[1:]:
cur.execute(c)

View File

@@ -1,52 +0,0 @@
#!/usr/bin/env bash
# Test for the export-data admin command against sqlite and postgres
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe
cd "$(dirname "$0")/../.."
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Run the export-data command on the sqlite test database
poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data
# Test that the output directory exists and contains the rooms directory
dir="/tmp/export_data/rooms"
if [ -d "$dir" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a sqlite database."
exit 1
fi
# Create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# Run the export-data command on postgres database
poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data2
# Test that the output directory exists and contains the rooms directory
dir2="/tmp/export_data2/rooms"
if [ -d "$dir2" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a postgres database."
exit 1
fi

View File

@@ -1,81 +0,0 @@
#!/usr/bin/env bash
# this script is run by GitHub Actions in a plain `focal` container; it
# - installs the minimal system requirements, and poetry;
# - patches the project definition file to refer to old versions only;
# - creates a venv with these old versions using poetry; and finally
# - invokes `trial` to run the tests with old deps.
# Prevent tzdata from asking for user input
export DEBIAN_FRONTEND=noninteractive
set -ex
apt-get update
apt-get install -y \
python3 python3-dev python3-pip python3-venv pipx \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
export LANG="C.UTF-8"
# Prevent virtualenv from auto-updating pip to an incompatible version
export VIRTUALENV_NO_DOWNLOAD=1
# TODO: in the future, we could use an implementation of
# https://github.com/python-poetry/poetry/issues/3527
# https://github.com/pypa/pip/issues/8085
# to select the lowest possible versions, rather than resorting to this sed script.
# Patch the project definitions in-place:
# - Replace all lower and tilde bounds with exact bounds
# - Make the pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Omit systemd: we're not logging to journal here.
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
# runs on 3.8 (#12343).
sed -i \
-e "s/[~>]=/==/g" \
-e "/psycopg2/d" \
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
-e '/systemd/d' \
pyproject.toml
# Use poetry to do the installation. This ensures that the versions are all mutually
# compatible (as far the package metadata declares, anyway); pip's package resolver
# is more lax.
#
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
# toml file. This means we don't have to ensure compatibility between old deps and
# dev tools.
pip install --user toml
REMOVE_DEV_DEPENDENCIES="
import toml
with open('pyproject.toml', 'r') as f:
data = toml.loads(f.read())
del data['tool']['poetry']['dev-dependencies']
with open('pyproject.toml', 'w') as f:
toml.dump(data, f)
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pipx install poetry==1.1.12
~/.local/bin/poetry lock
echo "::group::Patched pyproject.toml"
cat pyproject.toml
echo "::endgroup::"
echo "::group::Lockfile after patch"
cat poetry.lock
echo "::endgroup::"
~/.local/bin/poetry install -E "all test"
~/.local/bin/poetry run trial --jobs=2 tests

View File

@@ -1,53 +0,0 @@
#!/usr/bin/env bash
#
# Test script for 'synapse_port_db'.
# - configures synapse and a postgres server.
# - runs the port script on a prepopulated test sqlite db
# - also runs it against an new sqlite db
#
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe
cd "$(dirname "$0")/../.."
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against test database"
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
# but coverage seems unable to find the entrypoints installed by `pip install -e .`.
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# We should be able to run twice against the same database.
echo "+++ Run synapse_port_db a second time"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
#####
# Now do the same again, on an empty database.
echo "--- Prepare empty SQLite database"
# we do this by deleting the sqlite db, and then doing the same again.
rm .ci/test_db.db
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py \
"DROP DATABASE synapse" \
"CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml

View File

@@ -1,16 +0,0 @@
# Configuration file used for testing the 'synapse_port_db' script.
# Tells the 'update_database' script to connect to the test SQLite database to upgrade its
# schema and run background updates on it.
server_name: "localhost:8800"
signing_key_path: ".ci/test.signing.key"
report_stats: false
database:
name: "sqlite3"
args:
database: ".ci/test_db.db"
# Suppress the key server warning.
trusted_key_servers: []

Binary file not shown.

View File

@@ -1,4 +0,0 @@
---
title: CI run against Twisted trunk is failing
---
See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}

View File

@@ -1,2 +0,0 @@
# This file serves as a blacklist for SyTest tests that we expect will fail in
# Synapse when run under worker mode. For more details, see sytest-blacklist.

View File

@@ -1,14 +0,0 @@
comment: off
coverage:
status:
project:
default:
target: 0 # Target % coverage, can be auto. Turned off for now
threshold: null
base: auto
patch:
default:
target: 0
threshold: null
base: auto

View File

@@ -1,8 +0,0 @@
[run]
branch = True
parallel = True
include=$TOP/synapse/*
data_file = $TOP/.coverage
[report]
precision = 2

View File

@@ -1,11 +0,0 @@
# ignore everything by default
*
# things to include
!docker
!synapse
!README.rst
!pyproject.toml
!poetry.lock
**/__pycache__

View File

@@ -1,9 +0,0 @@
# EditorConfig https://EditorConfig.org
# top-most EditorConfig file
root = true
# 4 space indentation
[*.py]
indent_style = space
indent_size = 4

11
.flake8
View File

@@ -1,11 +0,0 @@
# TODO: incorporate this into pyproject.toml if flake8 supports it in the future.
# See https://github.com/PyCQA/flake8/issues/234
[flake8]
# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
# for error codes. The ones we ignore are:
# W503: line break before binary operator
# W504: line break after binary operator
# E203: whitespace before ':' (which is contrary to pep8?)
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
ignore=W503,W504,E203,E731,E501

View File

@@ -1,8 +0,0 @@
# Black reformatting (#5482).
32e7c9e7f20b57dd081023ac42d6931a8da9b3a3
# Target Python 3.5 with black (#8664).
aff1eb7c671b0a3813407321d2702ec46c71fa56
# Update black to 20.8b1 (#9381).
0a00b7ff14890987f09112a2ae696c61001e6cf1

2
.github/CODEOWNERS vendored
View File

@@ -1,2 +0,0 @@
# Automatically request reviews from the synapse-core team when a pull request comes in.
* @matrix-org/synapse-core

4
.github/FUNDING.yml vendored
View File

@@ -1,4 +0,0 @@
# One username per supported platform and one custom link
patreon: matrixdotorg
liberapay: matrixdotorg
custom: https://paypal.me/matrixdotorg

View File

@@ -1,5 +0,0 @@
**If you are looking for support** please ask in **#synapse:matrix.org**
(using a matrix.org account if necessary). We do not use GitHub issues for
support.
**If you want to report a security issue** please see https://matrix.org/security-disclosure-policy/

View File

@@ -1,72 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
---
<!--
**THIS IS NOT A SUPPORT CHANNEL!**
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**,
please ask in **#synapse:matrix.org** (using a matrix.org account if necessary)
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
This is a bug report template. By following the instructions below and
filling out the sections with your information, you will help the us to get all
the necessary data to fix your issue.
You can also preview your report before submitting it. You may remove sections
that aren't relevant to your particular case.
Text between <!-- and --> marks will be invisible in the report.
-->
### Description
<!-- Describe here the problem that you are experiencing -->
### Steps to reproduce
- list the steps
- that reproduce the bug
- using hyphens as bullet points
<!--
Describe how what happens differs from what you expected.
If you can identify any relevant log snippets from _homeserver.log_, please include
those (please be careful to remove any personal or private data). Please surround them with
``` (three backticks, on a line on their own), so that they are formatted legibly.
-->
### Version information
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
<!-- Was this issue identified on matrix.org or another homeserver? -->
- **Homeserver**:
If not matrix.org:
<!--
What version of Synapse is running?
You can find the Synapse version with this command:
$ curl http://localhost:8008/_synapse/admin/v1/server_version
(You may need to replace `localhost:8008` if Synapse is not configured to
listen on that port.)
-->
- **Version**:
- **Install method**:
<!-- examples: package manager/git clone/pip -->
- **Platform**:
<!--
Tell us about the environment in which your homeserver is operating
distro, hardware, if it's running in a vm/container, etc.
-->

View File

@@ -1,9 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
---
**Description:**
<!-- Describe here the feature you are requesting. -->

View File

@@ -1,10 +0,0 @@
---
name: Support request
about: I need support for Synapse
---
Please don't file github issues asking for support.
Instead, please join [`#synapse:matrix.org`](https://matrix.to/#/#synapse:matrix.org)
(from a matrix.org account if necessary), and ask there.

View File

@@ -1,14 +0,0 @@
### Pull Request Checklist
<!-- Please read https://matrix-org.github.io/synapse/latest/development/contributing_guide.html before submitting your pull request -->
* [ ] Pull request is based on the develop branch
* [ ] Pull request includes a [changelog file](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should:
- Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry.
* [ ] Pull request includes a [sign off](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#sign-off)
* [ ] [Code style](https://matrix-org.github.io/synapse/latest/code_style.html) is correct
(run the [linters](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))

3
.github/SUPPORT.md vendored
View File

@@ -1,3 +0,0 @@
[**#synapse:matrix.org**](https://matrix.to/#/#synapse:matrix.org) is the official support room for
Synapse, and can be accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html.
Please ask for support there, rather than filing github issues.

View File

@@ -1,57 +0,0 @@
# GitHub actions workflow which builds and publishes the docker images.
name: Build docker images
on:
push:
tags: ["v*"]
branches: [ master, main, develop ]
workflow_dispatch:
permissions:
contents: read
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Set up QEMU
id: qemu
uses: docker/setup-qemu-action@v1
with:
platforms: arm64
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
- name: Inspect builder
run: docker buildx inspect
- name: Log in to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Calculate docker image tag
id: set-tag
uses: docker/metadata-action@master
with:
images: matrixdotorg/synapse
flavor: |
latest=false
tags: |
type=raw,value=develop,enable=${{ github.ref == 'refs/heads/develop' }}
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/master' }}
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }}
type=pep440,pattern={{raw}}
- name: Build and push all platforms
uses: docker/build-push-action@v2
with:
push: true
labels: "gitsha1=${{ github.sha }}"
tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64

View File

@@ -1,65 +0,0 @@
name: Deploy the documentation
on:
push:
branches:
# For bleeding-edge documentation
- develop
# For documentation specific to a release
- 'release-v*'
# stable docs
- master
workflow_dispatch:
jobs:
pages:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup mdbook
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
with:
mdbook-version: '0.4.17'
- name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
# However, we're using docs/README.md for other purposes and need to pick a new page
# as the default. Let's opt for the welcome page instead.
run: |
mdbook build
cp book/welcome_and_overview.html book/index.html
# Figure out the target directory.
#
# The target directory depends on the name of the branch
#
- name: Get the target directory name
id: vars
run: |
# first strip the 'refs/heads/' prefix with some shell foo
branch="${GITHUB_REF#refs/heads/}"
case $branch in
release-*)
# strip 'release-' from the name for release branches.
branch="${branch#release-}"
;;
master)
# deploy to "latest" for the master branch.
branch="latest"
;;
esac
# finally, set the 'branch-version' var.
echo "::set-output name=branch-version::$branch"
# Deploy to the target directory.
- name: Deploy to gh pages
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book
destination_dir: ./${{ steps.vars.outputs.branch-version }}

View File

@@ -1,159 +0,0 @@
# People who are freshly `pip install`ing from PyPI will pull in the latest versions of
# dependencies which match the broad requirements. Since most CI runs are against
# the locked poetry environment, run specifically against the latest dependencies to
# know if there's an upcoming breaking change.
#
# As an overview this workflow:
# - checks out develop,
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
# - runs mypy and test suites in that checkout.
#
# Based on the twisted trunk CI job.
name: Latest dependencies
on:
schedule:
- cron: 0 7 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
# poetry-core versions), so we install with poetry.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.2.0b1"
extras: "all"
# Dump installed versions for debugging.
- run: poetry run pip list > before.txt
# Upgrade all runtime dependencies only. This is intended to mimic a fresh
# `pip install matrix-synapse[all]` as closely as possible.
- run: poetry update --no-dev
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
trial:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- database: "sqlite"
- database: "postgres"
postgres-version: "14"
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@v2
with:
python-version: "3.x"
- run: pip install .[all,test]
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: python -m twisted.trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:testing
volumes:
- ${{ github.workspace }}:/src
strategy:
fail-fast: false
matrix:
include:
- sytest-tag: focal
- sytest-tag: focal
postgres: postgres
workers: workers
redis: redis
env:
POSTGRES: ${{ matrix.postgres && 1}}
WORKERS: ${{ matrix.workers && 1 }}
REDIS: ${{ matrix.redis && 1 }}
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- uses: actions/checkout@v2
- name: Ensure sytest runs `pip install`
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
run: rm /src/poetry.lock
working-directory: /src
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
# TODO: run complement (as with twisted trunk, see #12473).
# open an issue if the build fails, so we know about it.
open-issue:
if: failure()
needs:
# TODO: should mypy be included here? It feels more brittle than the other two.
- mypy
- trial
- sytest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
update_existing: true
filename: .ci/latest_deps_build_failed_issue_template.md

View File

@@ -1,121 +0,0 @@
# GitHub actions workflow which builds the release artifacts.
name: Build release artifacts
on:
# we build on PRs and develop to (hopefully) get early warning
# of things breaking (but only build one set of debs)
pull_request:
push:
branches: ["develop", "release-*"]
# we do the full build on tags.
tags: ["v*"]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: write
jobs:
get-distros:
name: "Calculate list of debian distros"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- id: set-distros
run: |
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
dists='["debian:sid"]'
if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
fi
echo "::set-output name=distros::$dists"
# map the step outputs to job outputs
outputs:
distros: ${{ steps.set-distros.outputs.distros }}
# now build the packages with a matrix build.
build-debs:
needs: get-distros
name: "Build .deb packages"
runs-on: ubuntu-latest
strategy:
matrix:
distro: ${{ fromJson(needs.get-distros.outputs.distros) }}
steps:
- name: Checkout
uses: actions/checkout@v2
with:
path: src
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
install: true
- name: Set up docker layer caching
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Set up python
uses: actions/setup-python@v2
- name: Build the packages
# see https://github.com/docker/build-push-action/issues/252
# for the cache magic here
run: |
./src/scripts-dev/build_debian_packages.py \
--docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
--docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
--docker-build-arg=--progress=plain \
--docker-build-arg=--load \
"${{ matrix.distro }}"
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Upload debs as artifacts
uses: actions/upload-artifact@v2
with:
name: debs
path: debs/*
build-sdist:
name: "Build pypi distribution files"
uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1"
# if it's a tag, create a release and attach the artifacts to it
attach-assets:
name: "Attach assets to release"
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
needs:
- build-debs
- build-sdist
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
uses: actions/download-artifact@v2
- name: Build a tarball for the debs
run: tar -cvJf debs.tar.xz debs
- name: Attach to release
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
files: |
Sdist/*
Wheel/*
debs.tar.xz
# if it's not already published, keep the release as a draft.
draft: true
# mark it as a prerelease if the tag contains 'rc'.
prerelease: ${{ contains(github.ref, 'rc') }}

View File

@@ -1,385 +0,0 @@
name: Tests
on:
push:
branches: ["develop", "release-*"]
pull_request:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check-sampleconfig:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install .
- run: scripts-dev/generate_sample_config.sh --check
- run: scripts-dev/config-lint.sh
lint:
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
with:
typechecking-extras: "all"
lint-crlf:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Check line endings
run: scripts-dev/check_line_terminators.sh
lint-newsfile:
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: actions/setup-python@v2
- run: "pip install 'towncrier>=18.6.0rc1'"
- run: scripts-dev/check-newsfragment.sh
env:
PULL_REQUEST_NUMBER: ${{ github.event.number }}
# Dummy step to gate other tests on without repeating the whole list
linting-done:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig]
runs-on: ubuntu-latest
steps:
- run: "true"
trial:
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.7", "3.8", "3.9", "3.10"]
database: ["sqlite"]
extras: ["all"]
include:
# Newest Python without optional deps
- python-version: "3.10"
extras: ""
# Oldest Python with PostgreSQL
- python-version: "3.7"
database: "postgres"
postgres-version: "10"
extras: "all"
# Newest Python with newest PostgreSQL
- python-version: "3.10"
database: "postgres"
postgres-version: "14"
extras: "all"
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: ${{ matrix.extras }}
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: poetry run trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
trial-olddeps:
# Note: sqlite only; no postgres
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Test with old deps
uses: docker://ubuntu:focal # For old python and sqlite
# Note: focal seems to be using 3.8, but the oldest is 3.7?
# See https://github.com/matrix-org/synapse/issues/12343
with:
workdir: /github/workspace
entrypoint: .ci/scripts/test_old_deps.sh
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
trial-pypy:
# Very slow; only run if the branch name includes 'pypy'
# Note: sqlite only; no postgres. Completely untested since poetry move.
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.7"]
extras: ["all"]
steps:
- uses: actions/checkout@v2
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: ${{ matrix.extras }}
- run: poetry run trial --jobs=2 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
if: ${{ !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }}
volumes:
- ${{ github.workspace }}:/src
env:
SYTEST_BRANCH: ${{ github.head_ref }}
POSTGRES: ${{ matrix.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
WORKERS: ${{ matrix.workers && 1 }}
REDIS: ${{ matrix.redis && 1 }}
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
TOP: ${{ github.workspace }}
strategy:
fail-fast: false
matrix:
include:
- sytest-tag: focal
- sytest-tag: focal
postgres: postgres
- sytest-tag: testing
postgres: postgres
- sytest-tag: focal
postgres: multi-postgres
workers: workers
- sytest-tag: buster
postgres: multi-postgres
workers: workers
- sytest-tag: buster
postgres: postgres
workers: workers
redis: redis
steps:
- uses: actions/checkout@v2
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
export-data:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: [linting-done, portdb]
runs-on: ubuntu-latest
env:
TOP: ${{ github.workspace }}
services:
postgres:
image: postgres
ports:
- 5432:5432
env:
POSTGRES_PASSWORD: "postgres"
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh
portdb:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
env:
TOP: ${{ github.workspace }}
strategy:
matrix:
include:
- python-version: "3.7"
postgres-version: "10"
- python-version: "3.10"
postgres-version: "14"
services:
postgres:
image: postgres:${{ matrix.postgres-version }}
ports:
- 5432:5432
env:
POSTGRES_PASSWORD: "postgres"
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
complement:
if: ${{ !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
steps:
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
- name: "Set Go Version"
run: |
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
- name: "Install Complement Dependencies"
run: |
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
- name: Checkout complement
shell: bash
run: |
mkdir -p complement
# Attempt to use the version of complement which best matches the current
# build. Depending on whether this is a PR or release, etc. we need to
# use different fallbacks.
#
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
# for pull requests, otherwise GITHUB_REF).
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
# (GITHUB_BASE_REF for pull requests).
# 3. Use the default complement branch ("HEAD").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do
# Skip empty branch names and merge commits.
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
continue
fi
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done
- run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:
if: ${{ always() }}
needs:
- check-sampleconfig
- lint
- lint-crlf
- lint-newsfile
- trial
- trial-olddeps
- sytest
- export-data
- portdb
- complement
runs-on: ubuntu-latest
steps:
- uses: matrix-org/done-action@v2
with:
needs: ${{ toJSON(needs) }}
# The newsfile lint may be skipped on non PR builds
skippable:
lint-newsfile

View File

@@ -1,116 +0,0 @@
name: Twisted Trunk
on:
schedule:
- cron: 0 8 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
extras: "all"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
trial:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
extras: "all test"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- run: poetry run trial --jobs 2 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:buster
volumes:
- ${{ github.workspace }}:/src
steps:
- uses: actions/checkout@v2
- name: Patch dependencies
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
# but the sytest-synapse container expects it to be in /venv/.
# We symlink it before running poetry so that poetry actually
# ends up installing to `/venv`.
run: |
ln -s -T /venv /src/.venv
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
working-directory: /src
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
env:
# Use offline mode to avoid reinstalling the pinned version of
# twisted.
OFFLINE: 1
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
# open an issue if the build fails, so we know about it.
open-issue:
if: failure()
needs:
- mypy
- trial
- sytest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
update_existing: true
filename: .ci/twisted_trunk_build_failed_issue_template.md

92
.gitignore vendored
View File

@@ -1,62 +1,48 @@
# filename patterns
*~
*.pyc
.*.swp
.#*
*.deb
*.egg
*.egg-info
*.lock
*.py[cod]
*.snap
*.tac
_trial_temp/
_trial_temp*/
/out
.DS_Store
__pycache__/
_trial_temp/
logs/
dbs/
*.egg
dist/
docs/build/
*.egg-info
# We do want the poetry lockfile.
!poetry.lock
cmdclient_config.json
homeserver*.db
homeserver*.log
homeserver*.pid
homeserver*.yaml
# stuff that is likely to exist when you run a server locally
/*.db
/*.log
/*.log.*
/*.log.config
/*.pid
/.python-version
/*.signing.key
/env/
/.venv*/
/homeserver*.yaml
/logs
/media_store/
/uploads
*.signing.key
*.tls.crt
*.tls.dh
*.tls.key
# For direnv users
/.envrc
.coverage
htmlcov
# IDEs
/.idea/
/.ropeproject/
/.vscode/
demo/*/*.db
demo/*/*.log
demo/*/*.log.*
demo/*/*.pid
demo/media_store.*
demo/etc
# build products
!/.coveragerc
/.coverage*
/.mypy_cache/
/.tox
/.tox-pg-container
/build/
/coverage.*
/dist/
/docs/build/
/htmlcov
/pip-wheel-metadata/
uploads
# docs
book/
.idea/
media_store/
# complement
/complement-*
/master.tar.gz
*.tac
build/
localhost-800*/
static/client/register/register_config.js
.tox
env/
*.config

17
.travis.yml Normal file
View File

@@ -0,0 +1,17 @@
sudo: false
language: python
python: 2.7
# tell travis to cache ~/.cache/pip
cache: pip
env:
- TOX_ENV=packaging
- TOX_ENV=pep8
- TOX_ENV=py27
install:
- pip install tox
script:
- tox -e $TOX_ENV

View File

@@ -1,8 +1,34 @@
The following is an incomplete list of people outside the core team who have
contributed to Synapse. It is no longer maintained: more recent contributions
are listed in the `changelog <CHANGES.md>`_.
Erik Johnston <erik at matrix.org>
* HS core
* Federation API impl
----
Mark Haines <mark at matrix.org>
* HS core
* Crypto
* Content repository
* CS v2 API impl
Kegan Dougal <kegan at matrix.org>
* HS core
* CS v1 API impl
* AS API impl
Paul "LeoNerd" Evans <paul at matrix.org>
* HS core
* Presence
* Typing Notifications
* Performance metrics and caching layer
Dave Baker <dave at matrix.org>
* Push notifications
* Auth CS v2 impl
Matthew Hodgson <matthew at matrix.org>
* General doc & housekeeping
* Vertobot/vertobridge matrix<->verto PoC
Emmanuel Rohee <manu at matrix.org>
* Supporting iOS clients (testability and fallback registration)
Turned to Dust <dwinslow86 at gmail.com>
* ArchLinux installation instructions
@@ -34,18 +60,3 @@ Niklas Riekenbrauck <nikriek at gmail dot.com>
Christoph Witzany <christoph at web.crofting.com>
* Add LDAP support for authentication
Pierre Jaury <pierre at jaury.eu>
* Docker packaging
Serban Constantin <serban.constantin at gmail dot com>
* Small bug fix
Joseph Weston <joseph at weston.cloud>
* Add admin API for querying HS version
Benjamin Saunders <ben.e.saunders at gmail dot com>
* Documentation improvements
Werner Sembach <werner.sembach at fau dot de>
* Automatically remove a group/community when it is empty

1076
CHANGES.md

File diff suppressed because it is too large Load Diff

1750
CHANGES.rst Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +0,0 @@
# Welcome to Synapse
Please see the [contributors' guide](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html) in our rendered documentation.

118
CONTRIBUTING.rst Normal file
View File

@@ -0,0 +1,118 @@
Contributing code to Matrix
===========================
Everyone is welcome to contribute code to Matrix
(https://github.com/matrix-org), provided that they are willing to license
their contributions under the same license as the project itself. We follow a
simple 'inbound=outbound' model for contributions: the act of submitting an
'inbound' contribution means that the contributor agrees to license the code
under the same terms as the project's overall 'outbound' license - in our
case, this is almost always Apache Software License v2 (see LICENSE).
How to contribute
~~~~~~~~~~~~~~~~~
The preferred and easiest way to contribute changes to Matrix is to fork the
relevant project on github, and then create a pull request to ask us to pull
your changes into our repo
(https://help.github.com/articles/using-pull-requests/)
**The single biggest thing you need to know is: please base your changes on
the develop branch - /not/ master.**
We use the master branch to track the most recent release, so that folks who
blindly clone the repo and automatically check out master get something that
works. Develop is the unstable branch where all the development actually
happens: the workflow is that contributors should fork the develop branch to
make a 'feature' branch for a particular contribution, and then make a pull
request to merge this back into the matrix.org 'official' develop branch. We
use github's pull request workflow to review the contribution, and either ask
you to make any refinements needed or merge it and make them ourselves. The
changes will then land on master when we next do a release.
We use Jenkins for continuous integration (http://matrix.org/jenkins), and
typically all pull requests get automatically tested Jenkins: if your change breaks the build, Jenkins will yell about it in #matrix-dev:matrix.org so please lurk there and keep an eye open.
Code style
~~~~~~~~~~
All Matrix projects have a well-defined code-style - and sometimes we've even
got as far as documenting it... For instance, synapse's code style doc lives
at https://github.com/matrix-org/synapse/tree/master/docs/code_style.rst.
Please ensure your changes match the cosmetic style of the existing project,
and **never** mix cosmetic and functional changes in the same commit, as it
makes it horribly hard to review otherwise.
Attribution
~~~~~~~~~~~
Everyone who contributes anything to Matrix is welcome to be listed in the
AUTHORS.rst file for the project in question. Please feel free to include a
change to AUTHORS.rst in your pull request to list yourself and a short
description of the area(s) you've worked on. Also, we sometimes have swag to
give away to contributors - if you feel that Matrix-branded apparel is missing
from your life, please mail us your shipping address to matrix at matrix.org and we'll try to fix it :)
Sign off
~~~~~~~~
In order to have a concrete record that your contribution is intentional
and you agree to license it under the same terms as the project's license, we've adopted the
same lightweight approach that the Linux Kernel
(https://www.kernel.org/doc/Documentation/SubmittingPatches), Docker
(https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
projects use: the DCO (Developer Certificate of Origin:
http://developercertificate.org/). This is a simple declaration that you wrote
the contribution or otherwise have the right to contribute it to Matrix::
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
If you agree to this for your contribution, then all that's needed is to
include the line in your commit or pull request comment::
Signed-off-by: Your Name <your@email.example.org>
...using your real name; unfortunately pseudonyms and anonymous contributions
can't be accepted. Git makes this trivial - just use the -s flag when you do
``git commit``, having first set ``user.name`` and ``user.email`` git configs
(which you should have done anyway :)
Conclusion
~~~~~~~~~~
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!

View File

@@ -1,7 +0,0 @@
# Installation Instructions
This document has moved to the
[Synapse documentation website](https://matrix-org.github.io/synapse/latest/setup/installation.html).
Please update your links.
The markdown source is available in [docs/setup/installation.md](docs/setup/installation.md).

30
MANIFEST.in Normal file
View File

@@ -0,0 +1,30 @@
include synctl
include LICENSE
include VERSION
include *.rst
include demo/README
include demo/demo.tls.dh
include demo/*.py
include demo/*.sh
recursive-include synapse/storage/schema *.sql
recursive-include synapse/storage/schema *.py
recursive-include docs *
recursive-include res *
recursive-include scripts *
recursive-include scripts-dev *
recursive-include synapse *.pyi
recursive-include tests *.py
recursive-include synapse/static *.css
recursive-include synapse/static *.gif
recursive-include synapse/static *.html
recursive-include synapse/static *.js
exclude jenkins.sh
exclude jenkins*.sh
exclude jenkins*
recursive-exclude jenkins *.sh
prune demo/etc

35
MAP.rst Normal file
View File

@@ -0,0 +1,35 @@
Directory Structure
===================
Warning: this may be a bit stale...
::
.
├── cmdclient Basic CLI python Matrix client
├── demo Scripts for running standalone Matrix demos
├── docs All doc, including the draft Matrix API spec
│   ├── client-server The client-server Matrix API spec
│   ├── model Domain-specific elements of the Matrix API spec
│   ├── server-server The server-server model of the Matrix API spec
│   └── sphinx The internal API doc of the Synapse homeserver
├── experiments Early experiments of using Synapse's internal APIs
├── graph Visualisation of Matrix's distributed message store
├── synapse The reference Matrix homeserver implementation
│   ├── api Common building blocks for the APIs
│   │   ├── events Definition of state representation Events
│   │   └── streams Definition of streamable Event objects
│   ├── app The __main__ entry point for the homeserver
│   ├── crypto The PKI client/server used for secure federation
│   │   └── resource PKI helper objects (e.g. keys)
│   ├── federation Server-server state replication logic
│   ├── handlers The main business logic of the homeserver
│   ├── http Wrappers around Twisted's HTTP server & client
│   ├── rest Servlet-style RESTful API
│   ├── storage Persistence subsystem (currently only sqlite3)
│   │   └── schema sqlite persistence schema
│   └── util Synapse-specific utilities
├── tests Unit tests for the Synapse homeserver
└── webclient Basic AngularJS Matrix web client

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,276 @@
Upgrading Synapse
=================
This document has moved to the `Synapse documentation website <https://matrix-org.github.io/synapse/latest/upgrade>`_.
Please update your links.
Before upgrading check if any special steps are required to upgrade from the
what you currently have installed to current version of synapse. The extra
instructions that may be required are listed later in this document.
The markdown source is available in `docs/upgrade.md <docs/upgrade.md>`_.
If synapse was installed in a virtualenv then active that virtualenv before
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then run:
.. code:: bash
source ~/.synapse/bin/activate
If synapse was installed using pip then upgrade to the latest version by
running:
.. code:: bash
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
If synapse was installed using git then upgrade to the latest version by
running:
.. code:: bash
# Pull the latest version of the master branch.
git pull
# Update the versions of synapse's python dependencies.
python synapse/python_dependencies.py | xargs -n1 pip install --upgrade
Upgrading to v0.15.0
====================
If you want to use the new URL previewing API (/_matrix/media/r0/preview_url)
then you have to explicitly enable it in the config and update your dependencies
dependencies. See README.rst for details.
Upgrading to v0.11.0
====================
This release includes the option to send anonymous usage stats to matrix.org,
and requires that administrators explictly opt in or out by setting the
``report_stats`` option to either ``true`` or ``false``.
We would really appreciate it if you could help our project out by reporting
anonymized usage statistics from your homeserver. Only very basic aggregate
data (e.g. number of users) will be reported, but it helps us to track the
growth of the Matrix community, and helps us to make Matrix a success, as well
as to convince other networks that they should peer with us.
Upgrading to v0.9.0
===================
Application services have had a breaking API change in this version.
They can no longer register themselves with a home server using the AS HTTP API. This
decision was made because a compromised application service with free reign to register
any regex in effect grants full read/write access to the home server if a regex of ``.*``
is used. An attack where a compromised AS re-registers itself with ``.*`` was deemed too
big of a security risk to ignore, and so the ability to register with the HS remotely has
been removed.
It has been replaced by specifying a list of application service registrations in
``homeserver.yaml``::
app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
Where ``registration-01.yaml`` looks like::
url: <String> # e.g. "https://my.application.service.com"
as_token: <String>
hs_token: <String>
sender_localpart: <String> # This is a new field which denotes the user_id localpart when using the AS token
namespaces:
users:
- exclusive: <Boolean>
regex: <String> # e.g. "@prefix_.*"
aliases:
- exclusive: <Boolean>
regex: <String>
rooms:
- exclusive: <Boolean>
regex: <String>
Upgrading to v0.8.0
===================
Servers which use captchas will need to add their public key to::
static/client/register/register_config.js
window.matrixRegistrationConfig = {
recaptcha_public_key: "YOUR_PUBLIC_KEY"
};
This is required in order to support registration fallback (typically used on
mobile devices).
Upgrading to v0.7.0
===================
New dependencies are:
- pydenticon
- simplejson
- syutil
- matrix-angular-sdk
To pull in these dependencies in a virtual env, run::
python synapse/python_dependencies.py | xargs -n 1 pip install
Upgrading to v0.6.0
===================
To pull in new dependencies, run::
python setup.py develop --user
This update includes a change to the database schema. To upgrade you first need
to upgrade the database by running::
python scripts/upgrade_db_to_v0.6.0.py <db> <server_name> <signing_key>
Where `<db>` is the location of the database, `<server_name>` is the
server name as specified in the synapse configuration, and `<signing_key>` is
the location of the signing key as specified in the synapse configuration.
This may take some time to complete. Failures of signatures and content hashes
can safely be ignored.
Upgrading to v0.5.1
===================
Depending on precisely when you installed v0.5.0 you may have ended up with
a stale release of the reference matrix webclient installed as a python module.
To uninstall it and ensure you are depending on the latest module, please run::
$ pip uninstall syweb
Upgrading to v0.5.0
===================
The webclient has been split out into a seperate repository/pacakage in this
release. Before you restart your homeserver you will need to pull in the
webclient package by running::
python setup.py develop --user
This release completely changes the database schema and so requires upgrading
it before starting the new version of the homeserver.
The script "database-prepare-for-0.5.0.sh" should be used to upgrade the
database. This will save all user information, such as logins and profiles,
but will otherwise purge the database. This includes messages, which
rooms the home server was a member of and room alias mappings.
If you would like to keep your history, please take a copy of your database
file and ask for help in #matrix:matrix.org. The upgrade process is,
unfortunately, non trivial and requires human intervention to resolve any
resulting conflicts during the upgrade process.
Before running the command the homeserver should be first completely
shutdown. To run it, simply specify the location of the database, e.g.:
./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
Once this has successfully completed it will be safe to restart the
homeserver. You may notice that the homeserver takes a few seconds longer to
restart than usual as it reinitializes the database.
On startup of the new version, users can either rejoin remote rooms using room
aliases or by being reinvited. Alternatively, if any other homeserver sends a
message to a room that the homeserver was previously in the local HS will
automatically rejoin the room.
Upgrading to v0.4.0
===================
This release needs an updated syutil version. Run::
python setup.py develop
You will also need to upgrade your configuration as the signing key format has
changed. Run::
python -m synapse.app.homeserver --config-path <CONFIG> --generate-config
Upgrading to v0.3.0
===================
This registration API now closely matches the login API. This introduces a bit
more backwards and forwards between the HS and the client, but this improves
the overall flexibility of the API. You can now GET on /register to retrieve a list
of valid registration flows. Upon choosing one, they are submitted in the same
way as login, e.g::
{
type: m.login.password,
user: foo,
password: bar
}
The default HS supports 2 flows, with and without Identity Server email
authentication. Enabling captcha on the HS will add in an extra step to all
flows: ``m.login.recaptcha`` which must be completed before you can transition
to the next stage. There is a new login type: ``m.login.email.identity`` which
contains the ``threepidCreds`` key which were previously sent in the original
register request. For more information on this, see the specification.
Web Client
----------
The VoIP specification has changed between v0.2.0 and v0.3.0. Users should
refresh any browser tabs to get the latest web client code. Users on
v0.2.0 of the web client will not be able to call those on v0.3.0 and
vice versa.
Upgrading to v0.2.0
===================
The home server now requires setting up of SSL config before it can run. To
automatically generate default config use::
$ python synapse/app/homeserver.py \
--server-name machine.my.domain.name \
--bind-port 8448 \
--config-path homeserver.config \
--generate-config
This config can be edited if desired, for example to specify a different SSL
certificate to use. Once done you can run the home server using::
$ python synapse/app/homeserver.py --config-path homeserver.config
See the README.rst for more information.
Also note that some config options have been renamed, including:
- "host" to "server-name"
- "database" to "database-path"
- "port" to "bind-port" and "unsecure-port"
Upgrading to v0.0.1
===================
This release completely changes the database schema and so requires upgrading
it before starting the new version of the homeserver.
The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
database. This will save all user information, such as logins and profiles,
but will otherwise purge the database. This includes messages, which
rooms the home server was a member of and room alias mappings.
Before running the command the homeserver should be first completely
shutdown. To run it, simply specify the location of the database, e.g.:
./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
Once this has successfully completed it will be safe to restart the
homeserver. You may notice that the homeserver takes a few seconds longer to
restart than usual as it reinitializes the database.
On startup of the new version, users can either rejoin remote rooms using room
aliases or by being reinvited. Alternatively, if any other homeserver sends a
message to a room that the homeserver was previously in the local HS will
automatically rejoin the room.

View File

@@ -1,39 +0,0 @@
# Documentation for possible options in this file is at
# https://rust-lang.github.io/mdBook/format/config.html
[book]
title = "Synapse"
authors = ["The Matrix.org Foundation C.I.C."]
language = "en"
multilingual = false
# The directory that documentation files are stored in
src = "docs"
[build]
# Prevent markdown pages from being automatically generated when they're
# linked to in SUMMARY.md
create-missing = false
[output.html]
# The URL visitors will be directed to when they try to edit a page
edit-url-template = "https://github.com/matrix-org/synapse/edit/develop/{path}"
# Remove the numbers that appear before each item in the sidebar, as they can
# get quite messy as we nest deeper
no-section-label = true
# The source code URL of the repository
git-repository-url = "https://github.com/matrix-org/synapse"
# The path that the docs are hosted on
site-url = "/synapse/"
# Additional HTML, JS, CSS that's injected into each page of the book.
# More information available in docs/website_files/README.md
additional-css = [
"docs/website_files/table-of-contents.css",
"docs/website_files/remove-nav-buttons.css",
"docs/website_files/indent-section-headers.css",
]
additional-js = ["docs/website_files/table-of-contents.js"]
theme = "docs/website_files/theme"

View File

@@ -1 +0,0 @@
!.gitignore

View File

@@ -1 +0,0 @@
Improve event caching mechanism to avoid having multiple copies of an event in memory at a time.

View File

@@ -1 +0,0 @@
Add some type hints to datastore.

View File

@@ -1 +0,0 @@
Measure the time taken in spam-checking callbacks and expose those measurements as metrics.

View File

@@ -1 +0,0 @@
Replace string literal instances of stream key types with typed constants.

View File

@@ -1 +0,0 @@
Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect.

View File

@@ -1 +0,0 @@
Add ability to cancel disconnected requests to `SynapseRequest`.

View File

@@ -1 +0,0 @@
Add a `default_power_level_content_override` config option to set default room power levels per room preset.

View File

@@ -1 +0,0 @@
Add a helper class for testing request cancellation.

View File

@@ -1 +0,0 @@
Synapse will now reload [cache config](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#caching) when it receives a [SIGHUP](https://en.wikipedia.org/wiki/SIGHUP) signal.

View File

@@ -1 +0,0 @@
Improve documentation of the `synapse.push` module.

View File

@@ -1 +0,0 @@
Refactor functions to on `PushRuleEvaluatorForEvent`.

View File

@@ -1 +0,0 @@
Preparation for database schema simplifications: stop writing to `event_reference_hashes`.

View File

@@ -1 +0,0 @@
Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room.

View File

@@ -1 +0,0 @@
Refactor `EventContext` class.

View File

@@ -1 +0,0 @@
Remove an unneeded class in the push code.

View File

@@ -1 +0,0 @@
Consolidate parsing of relation information from events.

View File

@@ -1 +0,0 @@
Capture the `Deferred` for request cancellation in `_AsyncResource`.

View File

@@ -1 +0,0 @@
Fixes an incorrect type hint for `Filter._check_event_relations`.

View File

@@ -1 +0,0 @@
Fix a long-standing bug where an empty room would be created when a user with an insufficient power level tried to upgrade a room.

View File

@@ -1 +0,0 @@
Respect the `@cancellable` flag for `DirectServe{Html,Json}Resource`s.

View File

@@ -1 +0,0 @@
Respect the `@cancellable` flag for `RestServlet`s and `BaseFederationServlet`s.

View File

@@ -1 +0,0 @@
Respect the `@cancellable` flag for `ReplicationEndpoint`s.

View File

@@ -1 +0,0 @@
Add a config options to allow for auto-tuning of caches.

View File

@@ -1 +0,0 @@
Complain if a federation endpoint has the `@cancellable` flag, since some of the wrapper code may not handle cancellation correctly yet.

View File

@@ -1 +0,0 @@
Enable cancellation of `GET /rooms/$room_id/members`, `GET /rooms/$room_id/state` and `GET /rooms/$room_id/state/$event_type/*` requests.

View File

@@ -1 +0,0 @@
Require a body in POST requests to `/rooms/{roomId}/receipt/{receiptType}/{eventId}`, as required by the [Matrix specification](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidreceiptreceipttypeeventid). This breaks compatibility with Element Android 1.2.0 and earlier: users of those clients will be unable to send read receipts.

View File

@@ -1 +0,0 @@
Optimize private read receipt filtering.

View File

@@ -1 +0,0 @@
Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set.

View File

@@ -1 +0,0 @@
Fix a typo in the Media Admin API documentation.

View File

@@ -1 +0,0 @@
Add type annotations to increase the number of modules passing `disallow-untyped-defs`.

View File

@@ -1 +0,0 @@
Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG.

View File

@@ -1 +0,0 @@
Add type annotations to increase the number of modules passing `disallow-untyped-defs`.

View File

@@ -1 +0,0 @@
Update the OpenID Connect example for Keycloak to be compatible with newer versions of Keycloak. Contributed by @nhh.

View File

@@ -1 +0,0 @@
Update configs used by Complement to allow more invites/3PID validations during tests.

View File

@@ -1 +0,0 @@
Tidy up and type-hint the database engine modules.

View File

@@ -1 +0,0 @@
Fix typo in server listener documentation.

View File

@@ -1 +0,0 @@
Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers.

View File

@@ -1 +0,0 @@
Fix typo in 'run_background_tasks_on' option name in configuration manual documentation.

View File

@@ -1 +0,0 @@
Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar.

View File

@@ -1,10 +0,0 @@
Community Contributions
=======================
Everything in this directory are projects submitted by the community that may be useful
to others. As such, the project maintainers cannot guarantee support, stability
or backwards compatibility of these projects.
Files in this directory should *not* be relied on directly, as they may not
continue to work or exist in future. If you wish to use any of these files then
they should be copied to avoid them breaking from underneath you.

View File

@@ -15,6 +15,10 @@
# limitations under the License.
""" Starts a synapse client console. """
from twisted.internet import reactor, defer, threads
from http import TwistedHttpClient
import argparse
import cmd
import getpass
@@ -23,22 +27,18 @@ import shlex
import sys
import time
import urllib
from http import TwistedHttpClient
from typing import Optional
import nacl.encoding
import nacl.signing
import urlparse
from signedjson.sign import SignatureVerifyException, verify_signed_json
from twisted.internet import defer, reactor, threads
import nacl.signing
import nacl.encoding
from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException
CONFIG_JSON = "cmdclient_config.json"
# TODO: The concept of trusted identity servers has been deprecated. This option and checks
# should be removed
TRUSTED_ID_SERVERS = ["localhost:8001"]
TRUSTED_ID_SERVERS = [
'localhost:8001'
]
class SynapseCmd(cmd.Cmd):
@@ -58,7 +58,7 @@ class SynapseCmd(cmd.Cmd):
"token": token,
"verbose": "on",
"complete_usernames": "on",
"send_delivery_receipts": "on",
"send_delivery_receipts": "on"
}
self.path_prefix = "/_matrix/client/api/v1"
self.event_stream_token = "END"
@@ -93,7 +93,7 @@ class SynapseCmd(cmd.Cmd):
return self.config["user"].split(":")[1]
def do_config(self, line):
"""Show the config for this client: "config"
""" Show the config for this client: "config"
Edit a key value mapping: "config key value" e.g. "config token 1234"
Config variables:
user: The username to auth with.
@@ -109,7 +109,7 @@ class SynapseCmd(cmd.Cmd):
by using $. E.g. 'config roomid room1' then 'raw get /rooms/$roomid'.
"""
if len(line) == 0:
print(json.dumps(self.config, indent=4))
print json.dumps(self.config, indent=4)
return
try:
@@ -119,11 +119,12 @@ class SynapseCmd(cmd.Cmd):
config_rules = [ # key, valid_values
("verbose", ["on", "off"]),
("complete_usernames", ["on", "off"]),
("send_delivery_receipts", ["on", "off"]),
("send_delivery_receipts", ["on", "off"])
]
for key, valid_vals in config_rules:
if key == args["key"] and args["val"] not in valid_vals:
print("%s value must be one of %s" % (args["key"], valid_vals))
print "%s value must be one of %s" % (args["key"],
valid_vals)
return
# toggle the http client verbosity
@@ -132,11 +133,11 @@ class SynapseCmd(cmd.Cmd):
# assign the new config
self.config[args["key"]] = args["val"]
print(json.dumps(self.config, indent=4))
print json.dumps(self.config, indent=4)
save_config(self.config)
except Exception as e:
print(e)
print e
def do_register(self, line):
"""Registers for a new account: "register <userid> <noupdate>"
@@ -152,32 +153,33 @@ class SynapseCmd(cmd.Cmd):
pwd = getpass.getpass("Type a password for this user: ")
pwd2 = getpass.getpass("Retype the password: ")
if pwd != pwd2 or len(pwd) == 0:
print("Password mismatch.")
print "Password mismatch."
pwd = None
else:
password = pwd
body = {"type": "m.login.password"}
body = {
"type": "m.login.password"
}
if "userid" in args:
body["user"] = args["userid"]
if password:
body["password"] = password
reactor.callFromThread(self._do_register, body, "noupdate" not in args)
reactor.callFromThread(self._do_register, body,
"noupdate" not in args)
@defer.inlineCallbacks
def _do_register(self, data, update_config):
# check the registration flows
url = self._url() + "/register"
json_res = yield self.http_client.do_request("GET", url)
print(json.dumps(json_res, indent=4))
print json.dumps(json_res, indent=4)
passwordFlow = None
for flow in json_res["flows"]:
if flow["type"] == "m.login.recaptcha" or (
"stages" in flow and "m.login.recaptcha" in flow["stages"]
):
print("Unable to register: Home server requires captcha.")
if flow["type"] == "m.login.recaptcha" or ("stages" in flow and "m.login.recaptcha" in flow["stages"]):
print "Unable to register: Home server requires captcha."
return
if flow["type"] == "m.login.password" and "stages" not in flow:
passwordFlow = flow
@@ -187,7 +189,7 @@ class SynapseCmd(cmd.Cmd):
return
json_res = yield self.http_client.do_request("POST", url, data=data)
print(json.dumps(json_res, indent=4))
print json.dumps(json_res, indent=4)
if update_config and "user_id" in json_res:
self.config["user"] = json_res["user_id"]
self.config["token"] = json_res["access_token"]
@@ -199,7 +201,9 @@ class SynapseCmd(cmd.Cmd):
"""
try:
args = self._parse(line, ["user_id"], force_keys=True)
can_login = threads.blockingCallFromThread(reactor, self._check_can_login)
can_login = threads.blockingCallFromThread(
reactor,
self._check_can_login)
if can_login:
p = getpass.getpass("Enter your password: ")
user = args["user_id"]
@@ -207,25 +211,29 @@ class SynapseCmd(cmd.Cmd):
domain = self._domain()
if domain:
user = "@" + user + ":" + domain
reactor.callFromThread(self._do_login, user, p)
# print " got %s " % p
#print " got %s " % p
except Exception as e:
print(e)
print e
@defer.inlineCallbacks
def _do_login(self, user, password):
path = "/login"
data = {"user": user, "password": password, "type": "m.login.password"}
data = {
"user": user,
"password": password,
"type": "m.login.password"
}
url = self._url() + path
json_res = yield self.http_client.do_request("POST", url, data=data)
print(json_res)
print json_res
if "access_token" in json_res:
self.config["user"] = user
self.config["token"] = json_res["access_token"]
save_config(self.config)
print("Login successful.")
print "Login successful."
@defer.inlineCallbacks
def _check_can_login(self):
@@ -234,19 +242,18 @@ class SynapseCmd(cmd.Cmd):
# submitting!
url = self._url() + path
json_res = yield self.http_client.do_request("GET", url)
print(json_res)
print json_res
if "flows" not in json_res:
print("Failed to find any login flows.")
print "Failed to find any login flows."
defer.returnValue(False)
flow = json_res["flows"][0] # assume first is the one we want.
if "type" not in flow or "m.login.password" != flow["type"] or "stages" in flow:
flow = json_res["flows"][0] # assume first is the one we want.
if ("type" not in flow or "m.login.password" != flow["type"] or
"stages" in flow):
fallback_url = self._url() + "/login/fallback"
print(
"Unable to login via the command line client. Please visit "
"%s to login." % fallback_url
)
print ("Unable to login via the command line client. Please visit "
"%s to login." % fallback_url)
defer.returnValue(False)
defer.returnValue(True)
@@ -256,34 +263,21 @@ class SynapseCmd(cmd.Cmd):
<clientSecret> A string of characters generated when requesting an email that you'll supply in subsequent calls to identify yourself
<sendAttempt> The number of times the user has requested an email. Leave this the same between requests to retry the request at the transport level. Increment it to request that the email be sent again.
"""
args = self._parse(line, ["address", "clientSecret", "sendAttempt"])
args = self._parse(line, ['address', 'clientSecret', 'sendAttempt'])
postArgs = {
"email": args["address"],
"clientSecret": args["clientSecret"],
"sendAttempt": args["sendAttempt"],
}
postArgs = {'email': args['address'], 'clientSecret': args['clientSecret'], 'sendAttempt': args['sendAttempt']}
reactor.callFromThread(self._do_emailrequest, postArgs)
@defer.inlineCallbacks
def _do_emailrequest(self, args):
# TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/validate/email/requestToken"
)
url = self._identityServerUrl()+"/_matrix/identity/api/v1/validate/email/requestToken"
json_res = yield self.http_client.do_request(
"POST",
url,
data=urllib.urlencode(args),
jsonreq=False,
headers={"Content-Type": ["application/x-www-form-urlencoded"]},
)
print(json_res)
if "sid" in json_res:
print("Token sent. Your session ID is %s" % (json_res["sid"]))
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
headers={'Content-Type': ['application/x-www-form-urlencoded']})
print json_res
if 'sid' in json_res:
print "Token sent. Your session ID is %s" % (json_res['sid'])
def do_emailvalidate(self, line):
"""Validate and associate a third party ID
@@ -291,58 +285,39 @@ class SynapseCmd(cmd.Cmd):
<token> The token sent to your third party identifier address
<clientSecret> The same clientSecret you supplied in requestToken
"""
args = self._parse(line, ["sid", "token", "clientSecret"])
args = self._parse(line, ['sid', 'token', 'clientSecret'])
postArgs = {
"sid": args["sid"],
"token": args["token"],
"clientSecret": args["clientSecret"],
}
postArgs = { 'sid' : args['sid'], 'token' : args['token'], 'clientSecret': args['clientSecret'] }
reactor.callFromThread(self._do_emailvalidate, postArgs)
@defer.inlineCallbacks
def _do_emailvalidate(self, args):
# TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/validate/email/submitToken"
)
url = self._identityServerUrl()+"/_matrix/identity/api/v1/validate/email/submitToken"
json_res = yield self.http_client.do_request(
"POST",
url,
data=urllib.urlencode(args),
jsonreq=False,
headers={"Content-Type": ["application/x-www-form-urlencoded"]},
)
print(json_res)
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
headers={'Content-Type': ['application/x-www-form-urlencoded']})
print json_res
def do_3pidbind(self, line):
"""Validate and associate a third party ID
<sid> The session ID (sid) given to you in the response to requestToken
<clientSecret> The same clientSecret you supplied in requestToken
"""
args = self._parse(line, ["sid", "clientSecret"])
args = self._parse(line, ['sid', 'clientSecret'])
postArgs = {"sid": args["sid"], "clientSecret": args["clientSecret"]}
postArgs["mxid"] = self.config["user"]
postArgs = { 'sid' : args['sid'], 'clientSecret': args['clientSecret'] }
postArgs['mxid'] = self.config["user"]
reactor.callFromThread(self._do_3pidbind, postArgs)
@defer.inlineCallbacks
def _do_3pidbind(self, args):
# TODO: Update to use v2 Identity Service API endpoint
url = self._identityServerUrl() + "/_matrix/identity/api/v1/3pid/bind"
url = self._identityServerUrl()+"/_matrix/identity/api/v1/3pid/bind"
json_res = yield self.http_client.do_request(
"POST",
url,
data=urllib.urlencode(args),
jsonreq=False,
headers={"Content-Type": ["application/x-www-form-urlencoded"]},
)
print(json_res)
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
headers={'Content-Type': ['application/x-www-form-urlencoded']})
print json_res
def do_join(self, line):
"""Joins a room: "join <roomid>" """
@@ -350,7 +325,7 @@ class SynapseCmd(cmd.Cmd):
args = self._parse(line, ["roomid"], force_keys=True)
self._do_membership_change(args["roomid"], "join", self._usr())
except Exception as e:
print(e)
print e
def do_joinalias(self, line):
try:
@@ -358,34 +333,36 @@ class SynapseCmd(cmd.Cmd):
path = "/join/%s" % urllib.quote(args["roomname"])
reactor.callFromThread(self._run_and_pprint, "POST", path, {})
except Exception as e:
print(e)
print e
def do_topic(self, line):
""" "topic [set|get] <roomid> [<newtopic>]"
""""topic [set|get] <roomid> [<newtopic>]"
Set the topic for a room: topic set <roomid> <newtopic>
Get the topic for a room: topic get <roomid>
"""
try:
args = self._parse(line, ["action", "roomid", "topic"])
if "action" not in args or "roomid" not in args:
print("Must specify set|get and a room ID.")
print "Must specify set|get and a room ID."
return
if args["action"].lower() not in ["set", "get"]:
print("Must specify set|get, not %s" % args["action"])
print "Must specify set|get, not %s" % args["action"]
return
path = "/rooms/%s/topic" % urllib.quote(args["roomid"])
if args["action"].lower() == "set":
if "topic" not in args:
print("Must specify a new topic.")
print "Must specify a new topic."
return
body = {"topic": args["topic"]}
body = {
"topic": args["topic"]
}
reactor.callFromThread(self._run_and_pprint, "PUT", path, body)
elif args["action"].lower() == "get":
reactor.callFromThread(self._run_and_pprint, "GET", path)
except Exception as e:
print(e)
print e
def do_invite(self, line):
"""Invite a user to a room: "invite <userid> <roomid>" """
@@ -396,66 +373,49 @@ class SynapseCmd(cmd.Cmd):
reactor.callFromThread(self._do_invite, args["roomid"], user_id)
except Exception as e:
print(e)
print e
@defer.inlineCallbacks
def _do_invite(self, roomid, userstring):
if not userstring.startswith("@") and self._is_on("complete_usernames"):
# TODO: Update to use v2 Identity Service API endpoint
url = self._identityServerUrl() + "/_matrix/identity/api/v1/lookup"
if (not userstring.startswith('@') and
self._is_on("complete_usernames")):
url = self._identityServerUrl()+"/_matrix/identity/api/v1/lookup"
json_res = yield self.http_client.do_request(
"GET", url, qparams={"medium": "email", "address": userstring}
)
json_res = yield self.http_client.do_request("GET", url, qparams={'medium':'email','address':userstring})
mxid = None
if "mxid" in json_res and "signatures" in json_res:
# TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/pubkey/ed25519"
)
if 'mxid' in json_res and 'signatures' in json_res:
url = self._identityServerUrl()+"/_matrix/identity/api/v1/pubkey/ed25519"
pubKey = None
pubKeyObj = yield self.http_client.do_request("GET", url)
if "public_key" in pubKeyObj:
pubKey = nacl.signing.VerifyKey(
pubKeyObj["public_key"], encoder=nacl.encoding.HexEncoder
)
if 'public_key' in pubKeyObj:
pubKey = nacl.signing.VerifyKey(pubKeyObj['public_key'], encoder=nacl.encoding.HexEncoder)
else:
print("No public key found in pubkey response!")
print "No public key found in pubkey response!"
sigValid = False
if pubKey:
for signame in json_res["signatures"]:
for signame in json_res['signatures']:
if signame not in TRUSTED_ID_SERVERS:
print(
"Ignoring signature from untrusted server %s"
% (signame)
)
print "Ignoring signature from untrusted server %s" % (signame)
else:
try:
verify_signed_json(json_res, signame, pubKey)
sigValid = True
print(
"Mapping %s -> %s correctly signed by %s"
% (userstring, json_res["mxid"], signame)
)
print "Mapping %s -> %s correctly signed by %s" % (userstring, json_res['mxid'], signame)
break
except SignatureVerifyException as e:
print("Invalid signature from %s" % (signame))
print(e)
print "Invalid signature from %s" % (signame)
print e
if sigValid:
print("Resolved 3pid %s to %s" % (userstring, json_res["mxid"]))
mxid = json_res["mxid"]
print "Resolved 3pid %s to %s" % (userstring, json_res['mxid'])
mxid = json_res['mxid']
else:
print(
"Got association for %s but couldn't verify signature"
% (userstring)
)
print "Got association for %s but couldn't verify signature" % (userstring)
if not mxid:
mxid = "@" + userstring + ":" + self._domain()
@@ -468,17 +428,18 @@ class SynapseCmd(cmd.Cmd):
args = self._parse(line, ["roomid"], force_keys=True)
self._do_membership_change(args["roomid"], "leave", self._usr())
except Exception as e:
print(e)
print e
def do_send(self, line):
"""Sends a message. "send <roomid> <body>" """
args = self._parse(line, ["roomid", "body"])
txn_id = "txn%s" % int(time.time())
path = "/rooms/%s/send/m.room.message/%s" % (
urllib.quote(args["roomid"]),
txn_id,
)
body_json = {"msgtype": "m.text", "body": args["body"]}
path = "/rooms/%s/send/m.room.message/%s" % (urllib.quote(args["roomid"]),
txn_id)
body_json = {
"msgtype": "m.text",
"body": args["body"]
}
reactor.callFromThread(self._run_and_pprint, "PUT", path, body_json)
def do_list(self, line):
@@ -491,11 +452,11 @@ class SynapseCmd(cmd.Cmd):
"list messages <roomid> from=END&to=START&limit=3"
"""
args = self._parse(line, ["type", "roomid", "qp"])
if "type" not in args or "roomid" not in args:
print("Must specify type and room ID.")
if not "type" in args or not "roomid" in args:
print "Must specify type and room ID."
return
if args["type"] not in ["members", "messages"]:
print("Unrecognised type: %s" % args["type"])
print "Unrecognised type: %s" % args["type"]
return
room_id = args["roomid"]
path = "/rooms/%s/%s" % (urllib.quote(room_id), args["type"])
@@ -506,11 +467,12 @@ class SynapseCmd(cmd.Cmd):
try:
key_value = key_value_str.split("=")
qp[key_value[0]] = key_value[1]
except Exception:
print("Bad query param: %s" % key_value)
except:
print "Bad query param: %s" % key_value
return
reactor.callFromThread(self._run_and_pprint, "GET", path, query_params=qp)
reactor.callFromThread(self._run_and_pprint, "GET", path,
query_params=qp)
def do_create(self, line):
"""Creates a room.
@@ -546,22 +508,14 @@ class SynapseCmd(cmd.Cmd):
args = self._parse(line, ["method", "path", "data"])
# sanity check
if "method" not in args or "path" not in args:
print("Must specify path and method.")
print "Must specify path and method."
return
args["method"] = args["method"].upper()
valid_methods = [
"PUT",
"GET",
"POST",
"DELETE",
"XPUT",
"XGET",
"XPOST",
"XDELETE",
]
valid_methods = ["PUT", "GET", "POST", "DELETE",
"XPUT", "XGET", "XPOST", "XDELETE"]
if args["method"] not in valid_methods:
print("Unsupported method: %s" % args["method"])
print "Unsupported method: %s" % args["method"]
return
if "data" not in args:
@@ -570,7 +524,7 @@ class SynapseCmd(cmd.Cmd):
try:
args["data"] = json.loads(args["data"])
except Exception as e:
print("Data is not valid JSON. %s" % e)
print "Data is not valid JSON. %s" % e
return
qp = {"access_token": self._tok()}
@@ -583,16 +537,13 @@ class SynapseCmd(cmd.Cmd):
parsed_url = urlparse.urlparse(args["path"])
qp.update(urlparse.parse_qs(parsed_url.query))
args["path"] = parsed_url.path
except Exception:
except:
pass
reactor.callFromThread(
self._run_and_pprint,
args["method"],
args["path"],
args["data"],
query_params=qp,
)
reactor.callFromThread(self._run_and_pprint, args["method"],
args["path"],
args["data"],
query_params=qp)
def do_stream(self, line):
"""Stream data from the server: "stream <longpoll timeout ms>" """
@@ -602,31 +553,26 @@ class SynapseCmd(cmd.Cmd):
try:
timeout = int(args["timeout"])
except ValueError:
print("Timeout must be in milliseconds.")
print "Timeout must be in milliseconds."
return
reactor.callFromThread(self._do_event_stream, timeout)
@defer.inlineCallbacks
def _do_event_stream(self, timeout):
res = yield defer.ensureDeferred(
self.http_client.get_json(
res = yield self.http_client.get_json(
self._url() + "/events",
{
"access_token": self._tok(),
"timeout": str(timeout),
"from": self.event_stream_token,
},
)
)
print(json.dumps(res, indent=4))
"from": self.event_stream_token
})
print json.dumps(res, indent=4)
if "chunk" in res:
for event in res["chunk"]:
if (
event["type"] == "m.room.message"
and self._is_on("send_delivery_receipts")
and event["user_id"] != self._usr()
): # not sent by us
if (event["type"] == "m.room.message" and
self._is_on("send_delivery_receipts") and
event["user_id"] != self._usr()): # not sent by us
self._send_receipt(event, "d")
# update the position in the stram
@@ -634,28 +580,18 @@ class SynapseCmd(cmd.Cmd):
self.event_stream_token = res["end"]
def _send_receipt(self, event, feedback_type):
path = "/rooms/%s/messages/%s/%s/feedback/%s/%s" % (
urllib.quote(event["room_id"]),
event["user_id"],
event["msg_id"],
self._usr(),
feedback_type,
)
path = ("/rooms/%s/messages/%s/%s/feedback/%s/%s" %
(urllib.quote(event["room_id"]), event["user_id"], event["msg_id"],
self._usr(), feedback_type))
data = {}
reactor.callFromThread(
self._run_and_pprint,
"PUT",
path,
data=data,
alt_text="Sent receipt for %s" % event["msg_id"],
)
reactor.callFromThread(self._run_and_pprint, "PUT", path, data=data,
alt_text="Sent receipt for %s" % event["msg_id"])
def _do_membership_change(self, roomid, membership, userid):
path = "/rooms/%s/state/m.room.member/%s" % (
urllib.quote(roomid),
urllib.quote(userid),
)
data = {"membership": membership}
path = "/rooms/%s/state/m.room.member/%s" % (urllib.quote(roomid), urllib.quote(userid))
data = {
"membership": membership
}
reactor.callFromThread(self._run_and_pprint, "PUT", path, data=data)
def do_displayname(self, line):
@@ -691,7 +627,7 @@ class SynapseCmd(cmd.Cmd):
self._do_presence_state(2, line)
def _parse(self, line, keys, force_keys=False):
"""Parses the given line.
""" Parses the given line.
Args:
line : The line to parse
@@ -708,21 +644,16 @@ class SynapseCmd(cmd.Cmd):
for i, arg in enumerate(line_args):
for config_key in self.config:
if ("$" + config_key) in arg:
arg = arg.replace("$" + config_key, self.config[config_key])
arg = arg.replace("$" + config_key,
self.config[config_key])
line_args[i] = arg
return dict(zip(keys, line_args))
@defer.inlineCallbacks
def _run_and_pprint(
self,
method,
path,
data=None,
query_params: Optional[dict] = None,
alt_text=None,
):
"""Runs an HTTP request and pretty prints the output.
def _run_and_pprint(self, method, path, data=None,
query_params={"access_token": None}, alt_text=None):
""" Runs an HTTP request and pretty prints the output.
Args:
method: HTTP method
@@ -730,37 +661,35 @@ class SynapseCmd(cmd.Cmd):
data: Raw JSON data if any
query_params: dict of query parameters to add to the url
"""
query_params = query_params or {"access_token": None}
url = self._url() + path
if "access_token" in query_params:
query_params["access_token"] = self._tok()
json_res = yield self.http_client.do_request(
method, url, data=data, qparams=query_params
)
json_res = yield self.http_client.do_request(method, url,
data=data,
qparams=query_params)
if alt_text:
print(alt_text)
print alt_text
else:
print(json.dumps(json_res, indent=4))
print json.dumps(json_res, indent=4)
def save_config(config):
with open(CONFIG_JSON, "w") as out:
with open(CONFIG_JSON, 'w') as out:
json.dump(config, out)
def main(server_url, identity_server_url, username, token, config_path):
print("Synapse command line client")
print("===========================")
print("Server: %s" % server_url)
print("Type 'help' to get started.")
print("Close this console with CTRL+C then CTRL+D.")
print "Synapse command line client"
print "==========================="
print "Server: %s" % server_url
print "Type 'help' to get started."
print "Close this console with CTRL+C then CTRL+D."
if not username or not token:
print("- 'register <username>' - Register an account")
print("- 'stream' - Connect to the event stream")
print("- 'create <roomid>' - Create a room")
print("- 'send <roomid> <message>' - Send a message")
print "- 'register <username>' - Register an account"
print "- 'stream' - Connect to the event stream"
print "- 'create <roomid>' - Create a room"
print "- 'send <roomid> <message>' - Send a message"
http_client = TwistedHttpClient()
# the command line client
@@ -770,14 +699,14 @@ def main(server_url, identity_server_url, username, token, config_path):
global CONFIG_JSON
CONFIG_JSON = config_path # bit cheeky, but just overwrite the global
try:
with open(config_path, "r") as config:
with open(config_path, 'r') as config:
syn_cmd.config = json.load(config)
try:
http_client.verbose = "on" == syn_cmd.config["verbose"]
except Exception:
except:
pass
print("Loaded config from %s" % config_path)
except Exception:
print "Loaded config from %s" % config_path
except:
pass
# Twisted-specific: Runs the command processor in Twisted's event loop
@@ -787,37 +716,27 @@ def main(server_url, identity_server_url, username, token, config_path):
reactor.run()
if __name__ == "__main__":
if __name__ == '__main__':
parser = argparse.ArgumentParser("Starts a synapse client.")
parser.add_argument(
"-s",
"--server",
dest="server",
default="http://localhost:8008",
help="The URL of the home server to talk to.",
)
"-s", "--server", dest="server", default="http://localhost:8008",
help="The URL of the home server to talk to.")
parser.add_argument(
"-i",
"--identity-server",
dest="identityserver",
default="http://localhost:8090",
help="The URL of the identity server to talk to.",
)
"-i", "--identity-server", dest="identityserver", default="http://localhost:8090",
help="The URL of the identity server to talk to.")
parser.add_argument(
"-u", "--username", dest="username", help="Your username on the server."
)
parser.add_argument("-t", "--token", dest="token", help="Your access token.")
"-u", "--username", dest="username",
help="Your username on the server.")
parser.add_argument(
"-c",
"--config",
dest="config",
default=CONFIG_JSON,
help="The location of the config.json file to read from.",
)
"-t", "--token", dest="token",
help="Your access token.")
parser.add_argument(
"-c", "--config", dest="config", default=CONFIG_JSON,
help="The location of the config.json file to read from.")
args = parser.parse_args()
if not args.server:
print("You must supply a server URL to communicate with.")
print "You must supply a server URL to communicate with."
parser.print_help()
sys.exit(1)

View File

@@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,21 +13,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import urllib
from pprint import pformat
from typing import Optional
from twisted.internet import defer, reactor
from twisted.web.client import Agent, readBody
from twisted.web.http_headers import Headers
from twisted.internet import defer, reactor
from pprint import pformat
import json
import urllib
class HttpClient:
"""Interface for talking json over http"""
class HttpClient(object):
""" Interface for talking json over http
"""
def put_json(self, url, data):
"""Sends the specifed json data using PUT
""" Sends the specifed json data using PUT
Args:
url (str): The URL to PUT data to.
@@ -34,13 +36,15 @@ class HttpClient:
the request body. This will be encoded as JSON.
Returns:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
Deferred: Succeeds when we get *any* HTTP response.
The result of the deferred is a tuple of `(code, response)`,
where `response` is a dict representing the decoded JSON body.
"""
pass
def get_json(self, url, args=None):
"""Gets some json from the given host homeserver and path
""" Get's some json from the given host homeserver and path
Args:
url (str): The URL to GET data from.
@@ -50,14 +54,16 @@ class HttpClient:
and *not* a string.
Returns:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
Deferred: Succeeds when we get *any* HTTP response.
The result of the deferred is a tuple of `(code, response)`,
where `response` is a dict representing the decoded JSON body.
"""
pass
class TwistedHttpClient(HttpClient):
"""Wrapper around the twisted HTTP client api.
""" Wrapper around the twisted HTTP client api.
Attributes:
agent (twisted.web.client.Agent): The twisted Agent used to send the
@@ -70,7 +76,9 @@ class TwistedHttpClient(HttpClient):
@defer.inlineCallbacks
def put_json(self, url, data):
response = yield self._create_put_request(
url, data, headers_dict={"Content-Type": ["application/json"]}
url,
data,
headers_dict={"Content-Type": ["application/json"]}
)
body = yield readBody(response)
defer.returnValue((response.code, body))
@@ -85,46 +93,45 @@ class TwistedHttpClient(HttpClient):
body = yield readBody(response)
defer.returnValue(json.loads(body))
def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None):
"""Wrapper of _create_request to issue a PUT request"""
headers_dict = headers_dict or {}
def _create_put_request(self, url, json_data, headers_dict={}):
""" Wrapper of _create_request to issue a PUT request
"""
if "Content-Type" not in headers_dict:
raise defer.error(RuntimeError("Must include Content-Type header for PUTs"))
raise defer.error(
RuntimeError("Must include Content-Type header for PUTs"))
return self._create_request(
"PUT", url, producer=_JsonProducer(json_data), headers_dict=headers_dict
"PUT",
url,
producer=_JsonProducer(json_data),
headers_dict=headers_dict
)
def _create_get_request(self, url, headers_dict: Optional[dict] = None):
"""Wrapper of _create_request to issue a GET request"""
return self._create_request("GET", url, headers_dict=headers_dict or {})
def _create_get_request(self, url, headers_dict={}):
""" Wrapper of _create_request to issue a GET request
"""
return self._create_request(
"GET",
url,
headers_dict=headers_dict
)
@defer.inlineCallbacks
def do_request(
self,
method,
url,
data=None,
qparams=None,
jsonreq=True,
headers: Optional[dict] = None,
):
headers = headers or {}
def do_request(self, method, url, data=None, qparams=None, jsonreq=True, headers={}):
if qparams:
url = "%s?%s" % (url, urllib.urlencode(qparams, True))
if jsonreq:
prod = _JsonProducer(data)
headers["Content-Type"] = ["application/json"]
headers['Content-Type'] = ["application/json"];
else:
prod = _RawProducer(data)
if method in ["POST", "PUT"]:
response = yield self._create_request(
method, url, producer=prod, headers_dict=headers
)
response = yield self._create_request(method, url,
producer=prod,
headers_dict=headers)
else:
response = yield self._create_request(method, url)
@@ -132,33 +139,33 @@ class TwistedHttpClient(HttpClient):
defer.returnValue(json.loads(body))
@defer.inlineCallbacks
def _create_request(
self, method, url, producer=None, headers_dict: Optional[dict] = None
):
"""Creates and sends a request to the given url"""
headers_dict = headers_dict or {}
def _create_request(self, method, url, producer=None, headers_dict={}):
""" Creates and sends a request to the given url
"""
headers_dict["User-Agent"] = ["Synapse Cmd Client"]
retries_left = 5
print("%s to %s with headers %s" % (method, url, headers_dict))
print "%s to %s with headers %s" % (method, url, headers_dict)
if self.verbose and producer:
if "password" in producer.data:
temp = producer.data["password"]
producer.data["password"] = "[REDACTED]"
print(json.dumps(producer.data, indent=4))
print json.dumps(producer.data, indent=4)
producer.data["password"] = temp
else:
print(json.dumps(producer.data, indent=4))
print json.dumps(producer.data, indent=4)
while True:
try:
response = yield self.agent.request(
method, url.encode("UTF8"), Headers(headers_dict), producer
method,
url.encode("UTF8"),
Headers(headers_dict),
producer
)
break
except Exception as e:
print("uh oh: %s" % e)
print "uh oh: %s" % e
if retries_left:
yield self.sleep(2 ** (5 - retries_left))
retries_left -= 1
@@ -166,8 +173,8 @@ class TwistedHttpClient(HttpClient):
raise e
if self.verbose:
print("Status %s %s" % (response.code, response.phrase))
print(pformat(list(response.headers.getAllRawHeaders())))
print "Status %s %s" % (response.code, response.phrase)
print pformat(list(response.headers.getAllRawHeaders()))
defer.returnValue(response)
def sleep(self, seconds):
@@ -175,8 +182,7 @@ class TwistedHttpClient(HttpClient):
reactor.callLater(seconds, d.callback, seconds)
return d
class _RawProducer:
class _RawProducer(object):
def __init__(self, data):
self.data = data
self.body = data
@@ -192,10 +198,9 @@ class _RawProducer:
def stopProducing(self):
pass
class _JsonProducer:
"""Used by the twisted http client to create the HTTP body from json"""
class _JsonProducer(object):
""" Used by the twisted http client to create the HTTP body from json
"""
def __init__(self, jsn):
self.data = jsn
self.body = json.dumps(jsn).encode("utf8")
@@ -209,4 +214,4 @@ class _JsonProducer:
pass
def stopProducing(self):
pass
pass

View File

@@ -1,32 +0,0 @@
# Synapse Docker
### Configuration
A sample ``docker-compose.yml`` is provided, including example labels for
reverse proxying and other artifacts. The docker-compose file is an example,
please comment/uncomment sections that are not suitable for your usecase.
Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path,
to use manual configuration.
To generate a fresh `homeserver.yaml`, you can use the `generate` command.
(See the [documentation](../../docker/README.md#generating-a-configuration-file)
for more information.) You will need to specify appropriate values for at least the
`SYNAPSE_SERVER_NAME` and `SYNAPSE_REPORT_STATS` environment variables. For example:
```
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host -e SYNAPSE_REPORT_STATS=yes synapse generate
```
(This will also generate necessary signing keys.)
Then, customize your configuration and run the server:
```
docker-compose up -d
```
### More information
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)

View File

@@ -1,66 +0,0 @@
# This compose file is compatible with Compose itself, it might need some
# adjustments to run properly with stack.
version: '3'
services:
synapse:
build:
context: ../..
dockerfile: docker/Dockerfile
image: docker.io/matrixdotorg/synapse:latest
# Since synapse does not retry to connect to the database, restart upon
# failure
restart: unless-stopped
# See the readme for a full documentation of the environment settings
# NOTE: You must edit homeserver.yaml to use postgres, it defaults to sqlite
environment:
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
volumes:
# You may either store all the files in a local folder
- ./files:/data
# .. or you may split this between different storage points
# - ./files:/data
# - /path/to/ssd:/data/uploads
# - /path/to/large_hdd:/data/media
depends_on:
- db
# In order to expose Synapse, remove one of the following, you might for
# instance expose the TLS port directly:
ports:
- 8448:8448/tcp
# ... or use a reverse proxy, here is an example for traefik:
labels:
# The following lines are valid for Traefik version 1.x:
- traefik.enable=true
- traefik.frontend.rule=Host:my.matrix.Host
- traefik.port=8008
# Alternatively, for Traefik version 2.0:
- traefik.enable=true
- traefik.http.routers.http-synapse.entryPoints=http
- traefik.http.routers.http-synapse.rule=Host(`my.matrix.host`)
- traefik.http.middlewares.https_redirect.redirectscheme.scheme=https
- traefik.http.middlewares.https_redirect.redirectscheme.permanent=true
- traefik.http.routers.http-synapse.middlewares=https_redirect
- traefik.http.routers.https-synapse.entryPoints=https
- traefik.http.routers.https-synapse.rule=Host(`my.matrix.host`)
- traefik.http.routers.https-synapse.service=synapse
- traefik.http.routers.https-synapse.tls=true
- traefik.http.services.synapse.loadbalancer.server.port=8008
- traefik.http.routers.https-synapse.tls.certResolver=le-ssl
db:
image: docker.io/postgres:12-alpine
# Change that password, of course!
environment:
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=changeme
# ensure the database gets created correctly
# https://matrix-org.github.io/synapse/latest/postgres.html#set-up-database
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
volumes:
# You may store the database tables in a local folder..
- ./schemas:/var/lib/postgresql/data
# .. or store them on some high performance storage for better results
# - /path/to/ssd/storage:/var/lib/postgresql/data

View File

@@ -1,51 +0,0 @@
# Example log_config file for synapse. To enable, point `log_config` to it in
# `homeserver.yaml`, and restart synapse.
#
# This configuration will produce similar results to the defaults within
# synapse, but can be edited to give more flexibility.
version: 1
formatters:
fmt:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
# example output to console
console:
class: logging.StreamHandler
formatter: fmt
filters: [context]
# example output to file - to enable, edit 'root' config below.
file:
class: logging.handlers.RotatingFileHandler
formatter: fmt
filename: /var/log/synapse/homeserver.log
maxBytes: 100000000
backupCount: 3
filters: [context]
encoding: utf8
root:
level: INFO
handlers: [console] # to use file handler instead, switch to [file]
loggers:
synapse:
level: INFO
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: INFO
# example of enabling debugging for a component:
#
# synapse.federation.transport.server:
# level: DEBUG

View File

@@ -19,13 +19,13 @@ from curses.ascii import isprint
from twisted.internet import reactor
class CursesStdIO:
class CursesStdIO():
def __init__(self, stdscr, callback=None):
self.statusText = "Synapse test app -"
self.searchText = ""
self.searchText = ''
self.stdscr = stdscr
self.logLine = ""
self.logLine = ''
self.callback = callback
@@ -46,14 +46,14 @@ class CursesStdIO:
self.callback = callback
def fileno(self):
"""We want to select on FD 0"""
""" We want to select on FD 0 """
return 0
def connectionLost(self, reason):
self.close()
def print_line(self, text):
"""add a line to the internal list of lines"""
""" add a line to the internal list of lines"""
self.lines.append(text)
self.redraw()
@@ -63,14 +63,16 @@ class CursesStdIO:
self.redraw()
def redraw(self):
"""method for redisplaying lines based on internal list of lines"""
""" method for redisplaying lines
based on internal list of lines """
self.stdscr.clear()
self.paintStatus(self.statusText)
i = 0
index = len(self.lines) - 1
while i < (self.rows - 3) and index >= 0:
self.stdscr.addstr(self.rows - 3 - i, 0, self.lines[index], curses.A_NORMAL)
self.stdscr.addstr(self.rows - 3 - i, 0, self.lines[index],
curses.A_NORMAL)
i = i + 1
index = index - 1
@@ -83,16 +85,18 @@ class CursesStdIO:
raise RuntimeError("TextTooLongError")
self.stdscr.addstr(
self.rows - 2, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
self.rows - 2, 0,
text + ' ' * (self.cols - len(text)),
curses.A_STANDOUT)
def printLogLine(self, text):
self.stdscr.addstr(
0, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
0, 0,
text + ' ' * (self.cols - len(text)),
curses.A_STANDOUT)
def doRead(self):
"""Input is ready!"""
""" Input is ready! """
curses.noecho()
c = self.stdscr.getch() # read a character
@@ -101,7 +105,7 @@ class CursesStdIO:
elif c == curses.KEY_ENTER or c == 10:
text = self.searchText
self.searchText = ""
self.searchText = ''
self.print_line(">> %s" % text)
@@ -118,13 +122,11 @@ class CursesStdIO:
return
self.searchText = self.searchText + chr(c)
self.stdscr.addstr(
self.rows - 1,
0,
self.searchText + (" " * (self.cols - len(self.searchText) - 2)),
)
self.stdscr.addstr(self.rows - 1, 0,
self.searchText + (' ' * (
self.cols - len(self.searchText) - 2)))
self.paintStatus(self.statusText + " %d" % len(self.searchText))
self.paintStatus(self.statusText + ' %d' % len(self.searchText))
self.stdscr.move(self.rows - 1, len(self.searchText))
self.stdscr.refresh()
@@ -132,7 +134,7 @@ class CursesStdIO:
return "CursesStdIO"
def close(self):
"""clean up"""
""" clean up """
curses.nocbreak()
self.stdscr.keypad(0)
@@ -140,7 +142,8 @@ class CursesStdIO:
curses.endwin()
class Callback:
class Callback(object):
def __init__(self, stdio):
self.stdio = stdio
@@ -149,7 +152,7 @@ class Callback:
def main(stdscr):
screen = CursesStdIO(stdscr) # create Screen object
screen = CursesStdIO(stdscr) # create Screen object
callback = Callback(screen)
@@ -161,5 +164,5 @@ def main(stdscr):
screen.close()
if __name__ == "__main__":
if __name__ == '__main__':
curses.wrapper(main)

View File

@@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -27,24 +28,29 @@ Currently assumes the local address is localhost:<port>
"""
from synapse.federation import (
ReplicationHandler
)
from synapse.federation.units import Pdu
from synapse.util import origin_from_ucid
from synapse.app.homeserver import SynapseHomeServer
#from synapse.util.logutils import log_function
from twisted.internet import reactor, defer
from twisted.python import log
import argparse
import curses.wrapper
import json
import logging
import os
import re
import cursesio
from twisted.internet import defer, reactor
from twisted.python import log
from synapse.app.homeserver import SynapseHomeServer
from synapse.federation import ReplicationHandler
from synapse.federation.units import Pdu
from synapse.util import origin_from_ucid
# from synapse.logging.utils import log_function
import curses.wrapper
logger = logging.getLogger("example")
@@ -54,8 +60,8 @@ def excpetion_errback(failure):
logging.exception(failure)
class InputOutput:
"""This is responsible for basic I/O so that a user can interact with
class InputOutput(object):
""" This is responsible for basic I/O so that a user can interact with
the example app.
"""
@@ -67,40 +73,41 @@ class InputOutput:
self.server = server
def on_line(self, line):
"""This is where we process commands."""
""" This is where we process commands.
"""
try:
m = re.match(r"^join (\S+)$", line)
m = re.match("^join (\S+)$", line)
if m:
# The `sender` wants to join a room.
(room_name,) = m.groups()
room_name, = m.groups()
self.print_line("%s joining %s" % (self.user, room_name))
self.server.join_room(room_name, self.user, self.user)
# self.print_line("OK.")
#self.print_line("OK.")
return
m = re.match(r"^invite (\S+) (\S+)$", line)
m = re.match("^invite (\S+) (\S+)$", line)
if m:
# `sender` wants to invite someone to a room
room_name, invitee = m.groups()
self.print_line("%s invited to %s" % (invitee, room_name))
self.server.invite_to_room(room_name, self.user, invitee)
# self.print_line("OK.")
#self.print_line("OK.")
return
m = re.match(r"^send (\S+) (.*)$", line)
m = re.match("^send (\S+) (.*)$", line)
if m:
# `sender` wants to message a room
room_name, body = m.groups()
self.print_line("%s send to %s" % (self.user, room_name))
self.server.send_message(room_name, self.user, body)
# self.print_line("OK.")
#self.print_line("OK.")
return
m = re.match(r"^backfill (\S+)$", line)
m = re.match("^backfill (\S+)$", line)
if m:
# we want to backfill a room
(room_name,) = m.groups()
room_name, = m.groups()
self.print_line("backfill %s" % room_name)
self.server.backfill(room_name)
return
@@ -118,6 +125,7 @@ class InputOutput:
class IOLoggerHandler(logging.Handler):
def __init__(self, io):
logging.Handler.__init__(self)
self.io = io
@@ -130,11 +138,10 @@ class IOLoggerHandler(logging.Handler):
self.io.print_log(msg)
class Room:
"""Used to store (in memory) the current membership state of a room, and
class Room(object):
""" Used to store (in memory) the current membership state of a room, and
which home servers we should send PDUs associated with the room to.
"""
def __init__(self, room_name):
self.room_name = room_name
self.invited = set()
@@ -146,7 +153,8 @@ class Room:
self.have_got_metadata = False
def add_participant(self, participant):
"""Someone has joined the room"""
""" Someone has joined the room
"""
self.participants.add(participant)
self.invited.discard(participant)
@@ -157,16 +165,16 @@ class Room:
self.oldest_server = server
def add_invited(self, invitee):
"""Someone has been invited to the room"""
""" Someone has been invited to the room
"""
self.invited.add(invitee)
self.servers.add(origin_from_ucid(invitee))
class HomeServer(ReplicationHandler):
"""A very basic home server implentation that allows people to join a
""" A very basic home server implentation that allows people to join a
room and then invite other people.
"""
def __init__(self, server_name, replication_layer, output):
self.server_name = server_name
self.replication_layer = replication_layer
@@ -177,7 +185,8 @@ class HomeServer(ReplicationHandler):
self.output = output
def on_receive_pdu(self, pdu):
"""We just received a PDU"""
""" We just received a PDU
"""
pdu_type = pdu.pdu_type
if pdu_type == "sy.room.message":
@@ -188,30 +197,46 @@ class HomeServer(ReplicationHandler):
elif pdu.content["membership"] == "invite":
self._on_invite(pdu.origin, pdu.context, pdu.state_key)
else:
self.output.print_line(
"#%s (unrec) %s = %s"
% (pdu.context, pdu.pdu_type, json.dumps(pdu.content))
self.output.print_line("#%s (unrec) %s = %s" %
(pdu.context, pdu.pdu_type, json.dumps(pdu.content))
)
#def on_state_change(self, pdu):
##self.output.print_line("#%s (state) %s *** %s" %
##(pdu.context, pdu.state_key, pdu.pdu_type)
##)
#if "joinee" in pdu.content:
#self._on_join(pdu.context, pdu.content["joinee"])
#elif "invitee" in pdu.content:
#self._on_invite(pdu.origin, pdu.context, pdu.content["invitee"])
def _on_message(self, pdu):
"""We received a message"""
self.output.print_line(
"#%s %s %s" % (pdu.context, pdu.content["sender"], pdu.content["body"])
)
""" We received a message
"""
self.output.print_line("#%s %s %s" %
(pdu.context, pdu.content["sender"], pdu.content["body"])
)
def _on_join(self, context, joinee):
"""Someone has joined a room, either a remote user or a local user"""
""" Someone has joined a room, either a remote user or a local user
"""
room = self._get_or_create_room(context)
room.add_participant(joinee)
self.output.print_line("#%s %s %s" % (context, joinee, "*** JOINED"))
self.output.print_line("#%s %s %s" %
(context, joinee, "*** JOINED")
)
def _on_invite(self, origin, context, invitee):
"""Someone has been invited"""
""" Someone has been invited
"""
room = self._get_or_create_room(context)
room.add_invited(invitee)
self.output.print_line("#%s %s %s" % (context, invitee, "*** INVITED"))
self.output.print_line("#%s %s %s" %
(context, invitee, "*** INVITED")
)
if not room.have_got_metadata and origin is not self.server_name:
logger.debug("Get room state")
@@ -220,7 +245,8 @@ class HomeServer(ReplicationHandler):
@defer.inlineCallbacks
def send_message(self, room_name, sender, body):
"""Send a message to a room!"""
""" Send a message to a room!
"""
destinations = yield self.get_servers_for_context(room_name)
try:
@@ -238,28 +264,30 @@ class HomeServer(ReplicationHandler):
@defer.inlineCallbacks
def join_room(self, room_name, sender, joinee):
"""Join a room!"""
""" Join a room!
"""
self._on_join(room_name, joinee)
destinations = yield self.get_servers_for_context(room_name)
try:
pdu = Pdu.create_new(
context=room_name,
pdu_type="sy.room.member",
is_state=True,
state_key=joinee,
content={"membership": "join"},
origin=self.server_name,
destinations=destinations,
)
context=room_name,
pdu_type="sy.room.member",
is_state=True,
state_key=joinee,
content={"membership": "join"},
origin=self.server_name,
destinations=destinations,
)
yield self.replication_layer.send_pdu(pdu)
except Exception as e:
logger.exception(e)
@defer.inlineCallbacks
def invite_to_room(self, room_name, sender, invitee):
"""Invite someone to a room!"""
""" Invite someone to a room!
"""
self._on_invite(self.server_name, room_name, invitee)
destinations = yield self.get_servers_for_context(room_name)
@@ -290,33 +318,32 @@ class HomeServer(ReplicationHandler):
return self.replication_layer.backfill(dest, room_name, limit)
def _get_room_remote_servers(self, room_name):
return list(self.joined_rooms.setdefault(room_name).servers)
return [i for i in self.joined_rooms.setdefault(room_name,).servers]
def _get_or_create_room(self, room_name):
return self.joined_rooms.setdefault(room_name, Room(room_name))
def get_servers_for_context(self, context):
return defer.succeed(
self.joined_rooms.setdefault(context, Room(context)).servers
)
self.joined_rooms.setdefault(context, Room(context)).servers
)
def main(stdscr):
parser = argparse.ArgumentParser()
parser.add_argument("user", type=str)
parser.add_argument("-v", "--verbose", action="count")
parser.add_argument('user', type=str)
parser.add_argument('-v', '--verbose', action='count')
args = parser.parse_args()
user = args.user
server_name = origin_from_ucid(user)
# Set up logging
## Set up logging ##
root_logger = logging.getLogger()
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(lineno)d - '
'%(levelname)s - %(message)s')
if not os.path.exists("logs"):
os.makedirs("logs")
fh = logging.FileHandler("logs/%s" % user)
@@ -330,7 +357,7 @@ def main(stdscr):
observer = log.PythonLoggingObserver()
observer.start()
# Set up synapse server
## Set up synapse server
curses_stdio = cursesio.CursesStdIO(stdscr)
input_output = InputOutput(curses_stdio, user)
@@ -344,16 +371,16 @@ def main(stdscr):
input_output.set_home_server(hs)
# Add input_output logger
## Add input_output logger
io_logger = IOLoggerHandler(input_output)
io_logger.setFormatter(formatter)
root_logger.addHandler(io_logger)
# Start!
## Start! ##
try:
port = int(server_name.split(":")[1])
except Exception:
except:
port = 12345
app_hs.get_http_server().start_listening(port)

View File

@@ -1,6 +0,0 @@
# Using the Synapse Grafana dashboard
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
1. Have your Prometheus scrape your Synapse. https://matrix-org.github.io/synapse/latest/metrics-howto.html
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
3. Set up required recording rules. [contrib/prometheus](../prometheus)

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,3 @@
import argparse
import cgi
import datetime
import json
import pydot
import urllib2
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -21,6 +13,15 @@ import urllib2
# limitations under the License.
import sqlite3
import pydot
import cgi
import json
import datetime
import argparse
import urllib2
def make_name(pdu_id, origin):
return "%s@%s" % (pdu_id, origin)
@@ -30,7 +31,7 @@ def make_graph(pdus, room, filename_prefix):
node_map = {}
origins = set()
colors = {"red", "green", "blue", "yellow", "purple"}
colors = set(("red", "green", "blue", "yellow", "purple"))
for pdu in pdus:
origins.add(pdu.get("origin"))
@@ -46,8 +47,8 @@ def make_graph(pdus, room, filename_prefix):
try:
c = colors.pop()
color_map[o] = c
except Exception:
print("Run out of colours!")
except:
print "Run out of colours!"
color_map[o] = "black"
graph = pydot.Dot(graph_name="Test")
@@ -56,9 +57,9 @@ def make_graph(pdus, room, filename_prefix):
name = make_name(pdu.get("pdu_id"), pdu.get("origin"))
pdu_map[name] = pdu
t = datetime.datetime.fromtimestamp(float(pdu["ts"]) / 1000).strftime(
"%Y-%m-%d %H:%M:%S,%f"
)
t = datetime.datetime.fromtimestamp(
float(pdu["ts"]) / 1000
).strftime('%Y-%m-%d %H:%M:%S,%f')
label = (
"<"
@@ -78,7 +79,11 @@ def make_graph(pdus, room, filename_prefix):
"depth": pdu.get("depth"),
}
node = pydot.Node(name=name, label=label, color=color_map[pdu.get("origin")])
node = pydot.Node(
name=name,
label=label,
color=color_map[pdu.get("origin")]
)
node_map[name] = node
graph.add_node(node)
@@ -88,7 +93,7 @@ def make_graph(pdus, room, filename_prefix):
end_name = make_name(i, o)
if end_name not in node_map:
print("%s not in nodes" % end_name)
print "%s not in nodes" % end_name
continue
edge = pydot.Edge(node_map[start_name], node_map[end_name])
@@ -102,13 +107,14 @@ def make_graph(pdus, room, filename_prefix):
if prev_state_name in node_map:
state_edge = pydot.Edge(
node_map[start_name], node_map[prev_state_name], style="dotted"
node_map[start_name], node_map[prev_state_name],
style='dotted'
)
graph.add_edge(state_edge)
graph.write("%s.dot" % filename_prefix, format="raw", prog="dot")
# graph.write_png("%s.png" % filename_prefix, prog='dot')
graph.write_svg("%s.svg" % filename_prefix, prog="dot")
graph.write('%s.dot' % filename_prefix, format='raw', prog='dot')
# graph.write_png("%s.png" % filename_prefix, prog='dot')
graph.write_svg("%s.svg" % filename_prefix, prog='dot')
def get_pdus(host, room):
@@ -124,14 +130,15 @@ def get_pdus(host, room):
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDU graph for a given room by talking "
"to the given homeserver to get the list of PDUs. \n"
"Requires pydot."
"to the given homeserver to get the list of PDUs. \n"
"Requires pydot."
)
parser.add_argument(
"-p", "--prefix", dest="prefix", help="String to prefix output files with"
"-p", "--prefix", dest="prefix",
help="String to prefix output files with"
)
parser.add_argument("host")
parser.add_argument("room")
parser.add_argument('host')
parser.add_argument('room')
args = parser.parse_args()

View File

@@ -13,13 +13,12 @@
# limitations under the License.
import argparse
import cgi
import datetime
import json
import sqlite3
import pydot
import cgi
import json
import datetime
import argparse
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
@@ -37,7 +36,10 @@ def make_graph(db_name, room_id, file_prefix, limit):
args = [room_id]
if limit:
sql += " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
sql += (
" ORDER BY topological_ordering DESC, stream_ordering DESC "
"LIMIT ?"
)
args.append(limit)
@@ -54,8 +56,9 @@ def make_graph(db_name, room_id, file_prefix, limit):
for event in events:
c = conn.execute(
"SELECT state_group FROM event_to_state_groups WHERE event_id = ?",
(event.event_id,),
"SELECT state_group FROM event_to_state_groups "
"WHERE event_id = ?",
(event.event_id,)
)
res = c.fetchone()
@@ -66,7 +69,7 @@ def make_graph(db_name, room_id, file_prefix, limit):
t = datetime.datetime.fromtimestamp(
float(event.origin_server_ts) / 1000
).strftime("%Y-%m-%d %H:%M:%S,%f")
).strftime('%Y-%m-%d %H:%M:%S,%f')
content = json.dumps(unfreeze(event.get_dict()["content"]))
@@ -90,7 +93,10 @@ def make_graph(db_name, room_id, file_prefix, limit):
"state_group": state_group,
}
node = pydot.Node(name=event.event_id, label=label)
node = pydot.Node(
name=event.event_id,
label=label,
)
node_map[event.event_id] = node
graph.add_node(node)
@@ -99,8 +105,11 @@ def make_graph(db_name, room_id, file_prefix, limit):
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
except:
end_node = pydot.Node(
name=prev_id,
label="<<b>%s</b>>" % (prev_id,),
)
node_map[prev_id] = end_node
graph.add_node(end_node)
@@ -112,33 +121,36 @@ def make_graph(db_name, room_id, file_prefix, limit):
if len(event_ids) <= 1:
continue
cluster = pydot.Cluster(str(group), label="<State Group: %s>" % (str(group),))
cluster = pydot.Cluster(
str(group),
label="<State Group: %s>" % (str(group),)
)
for event_id in event_ids:
cluster.add_node(node_map[event_id])
graph.add_subgraph(cluster)
graph.write("%s.dot" % file_prefix, format="raw", prog="dot")
graph.write_svg("%s.svg" % file_prefix, prog="dot")
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
graph.write_svg("%s.svg" % file_prefix, prog='dot')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDU graph for a given room by talking "
"to the given homeserver to get the list of PDUs. \n"
"Requires pydot."
"to the given homeserver to get the list of PDUs. \n"
"Requires pydot."
)
parser.add_argument(
"-p",
"--prefix",
dest="prefix",
"-p", "--prefix", dest="prefix",
help="String to prefix output files with",
default="graph_output",
default="graph_output"
)
parser.add_argument("-l", "--limit", help="Only retrieve the last N events.")
parser.add_argument("db")
parser.add_argument("room")
parser.add_argument(
"-l", "--limit",
help="Only retrieve the last N events.",
)
parser.add_argument('db')
parser.add_argument('room')
args = parser.parse_args()

View File

@@ -1,13 +1,3 @@
import argparse
import cgi
import datetime
import pydot
import simplejson as json
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -23,23 +13,33 @@ from synapse.util.frozenutils import unfreeze
# limitations under the License.
import pydot
import cgi
import simplejson as json
import datetime
import argparse
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
def make_graph(file_name, room_id, file_prefix, limit):
print("Reading lines")
print "Reading lines"
with open(file_name) as f:
lines = f.readlines()
print("Read lines")
print "Read lines"
events = [FrozenEvent(json.loads(line)) for line in lines]
print("Loaded events.")
print "Loaded events."
events.sort(key=lambda e: e.depth)
print("Sorted events")
print "Sorted events"
if limit:
events = events[-int(limit) :]
events = events[-int(limit):]
node_map = {}
@@ -48,32 +48,31 @@ def make_graph(file_name, room_id, file_prefix, limit):
for event in events:
t = datetime.datetime.fromtimestamp(
float(event.origin_server_ts) / 1000
).strftime("%Y-%m-%d %H:%M:%S,%f")
).strftime('%Y-%m-%d %H:%M:%S,%f')
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
content = content.replace("\n", "<br/>\n")
print(content)
print content
content = []
for key, value in unfreeze(event.get_dict()["content"]).items():
if value is None:
value = "<null>"
elif isinstance(value, str):
elif isinstance(value, basestring):
pass
else:
value = json.dumps(value)
content.append(
"<b>%s</b>: %s,"
% (
cgi.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"),
cgi.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"),
"<b>%s</b>: %s," % (
cgi.escape(key, quote=True).encode("ascii", 'xmlcharrefreplace'),
cgi.escape(value, quote=True).encode("ascii", 'xmlcharrefreplace'),
)
)
content = "<br/>\n".join(content)
print(content)
print content
label = (
"<"
@@ -93,19 +92,25 @@ def make_graph(file_name, room_id, file_prefix, limit):
"depth": event.depth,
}
node = pydot.Node(name=event.event_id, label=label)
node = pydot.Node(
name=event.event_id,
label=label,
)
node_map[event.event_id] = node
graph.add_node(node)
print("Created Nodes")
print "Created Nodes"
for event in events:
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
except:
end_node = pydot.Node(
name=prev_id,
label="<<b>%s</b>>" % (prev_id,),
)
node_map[prev_id] = end_node
graph.add_node(end_node)
@@ -113,33 +118,33 @@ def make_graph(file_name, room_id, file_prefix, limit):
edge = pydot.Edge(node_map[event.event_id], end_node)
graph.add_edge(edge)
print("Created edges")
print "Created edges"
graph.write("%s.dot" % file_prefix, format="raw", prog="dot")
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
print("Created Dot")
print "Created Dot"
graph.write_svg("%s.svg" % file_prefix, prog="dot")
print("Created svg")
graph.write_svg("%s.svg" % file_prefix, prog='dot')
print "Created svg"
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDU graph for a given room by reading "
"from a file with line deliminated events. \n"
"Requires pydot."
"from a file with line deliminated events. \n"
"Requires pydot."
)
parser.add_argument(
"-p",
"--prefix",
dest="prefix",
"-p", "--prefix", dest="prefix",
help="String to prefix output files with",
default="graph_output",
default="graph_output"
)
parser.add_argument("-l", "--limit", help="Only retrieve the last N events.")
parser.add_argument("event_file")
parser.add_argument("room")
parser.add_argument(
"-l", "--limit",
help="Only retrieve the last N events.",
)
parser.add_argument('event_file')
parser.add_argument('room')
args = parser.parse_args()

View File

@@ -8,35 +8,35 @@ we set the remote SDP at which point the stream ends. Our video never gets to
the bridge.
Requires:
npm install jquery jsdom
npm install jquery jsdom
"""
import json
import subprocess
import time
import gevent
import grequests
from BeautifulSoup import BeautifulSoup
import json
import urllib
import subprocess
import time
ACCESS_TOKEN = ""
#ACCESS_TOKEN="" #
MATRIXBASE = "https://matrix.org/_matrix/client/api/v1/"
MYUSERNAME = "@davetest:matrix.org"
MATRIXBASE = 'https://matrix.org/_matrix/client/api/v1/'
MYUSERNAME = '@davetest:matrix.org'
HTTPBIND = "https://meet.jit.si/http-bind"
# HTTPBIND = 'https://jitsi.vuc.me/http-bind'
# ROOMNAME = "matrix"
HTTPBIND = 'https://meet.jit.si/http-bind'
#HTTPBIND = 'https://jitsi.vuc.me/http-bind'
#ROOMNAME = "matrix"
ROOMNAME = "pibble"
HOST = "guest.jit.si"
# HOST="jitsi.vuc.me"
HOST="guest.jit.si"
#HOST="jitsi.vuc.me"
TURNSERVER = "turn.guest.jit.si"
# TURNSERVER="turn.jitsi.vuc.me"
ROOMDOMAIN = "meet.jit.si"
# ROOMDOMAIN="conference.jitsi.vuc.me"
TURNSERVER="turn.guest.jit.si"
#TURNSERVER="turn.jitsi.vuc.me"
ROOMDOMAIN="meet.jit.si"
#ROOMDOMAIN="conference.jitsi.vuc.me"
class TrivialMatrixClient:
def __init__(self, access_token):
@@ -45,50 +45,38 @@ class TrivialMatrixClient:
def getEvent(self):
while True:
url = (
MATRIXBASE
+ "events?access_token="
+ self.access_token
+ "&timeout=60000"
)
url = MATRIXBASE+'events?access_token='+self.access_token+"&timeout=60000"
if self.token:
url += "&from=" + self.token
url += "&from="+self.token
req = grequests.get(url)
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print("incoming from matrix", obj)
if "end" not in obj:
print "incoming from matrix",obj
if 'end' not in obj:
continue
self.token = obj["end"]
if len(obj["chunk"]):
return obj["chunk"][0]
self.token = obj['end']
if len(obj['chunk']):
return obj['chunk'][0]
def joinRoom(self, roomId):
url = MATRIXBASE + "rooms/" + roomId + "/join?access_token=" + self.access_token
print(url)
headers = {"Content-Type": "application/json"}
req = grequests.post(url, headers=headers, data="{}")
url = MATRIXBASE+'rooms/'+roomId+'/join?access_token='+self.access_token
print url
headers={ 'Content-Type': 'application/json' }
req = grequests.post(url, headers=headers, data='{}')
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print("response: ", obj)
print "response: ",obj
def sendEvent(self, roomId, evType, event):
url = (
MATRIXBASE
+ "rooms/"
+ roomId
+ "/send/"
+ evType
+ "?access_token="
+ self.access_token
)
print(url)
print(json.dumps(event))
headers = {"Content-Type": "application/json"}
url = MATRIXBASE+'rooms/'+roomId+'/send/'+evType+'?access_token='+self.access_token
print url
print json.dumps(event)
headers={ 'Content-Type': 'application/json' }
req = grequests.post(url, headers=headers, data=json.dumps(event))
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print("response: ", obj)
print "response: ",obj
xmppClients = {}
@@ -97,40 +85,39 @@ xmppClients = {}
def matrixLoop():
while True:
ev = matrixCli.getEvent()
print(ev)
if ev["type"] == "m.room.member":
print("membership event")
if ev["membership"] == "invite" and ev["state_key"] == MYUSERNAME:
roomId = ev["room_id"]
print("joining room %s" % (roomId))
print ev
if ev['type'] == 'm.room.member':
print 'membership event'
if ev['membership'] == 'invite' and ev['state_key'] == MYUSERNAME:
roomId = ev['room_id']
print "joining room %s" % (roomId)
matrixCli.joinRoom(roomId)
elif ev["type"] == "m.room.message":
if ev["room_id"] in xmppClients:
print("already have a bridge for that user, ignoring")
elif ev['type'] == 'm.room.message':
if ev['room_id'] in xmppClients:
print "already have a bridge for that user, ignoring"
continue
print("got message, connecting")
xmppClients[ev["room_id"]] = TrivialXmppClient(ev["room_id"], ev["user_id"])
gevent.spawn(xmppClients[ev["room_id"]].xmppLoop)
elif ev["type"] == "m.call.invite":
print("Incoming call")
# sdp = ev['content']['offer']['sdp']
# print "sdp: %s" % (sdp)
# xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
# gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
elif ev["type"] == "m.call.answer":
print("Call answered")
sdp = ev["content"]["answer"]["sdp"]
if ev["room_id"] not in xmppClients:
print("We didn't have a call for that room")
print "got message, connecting"
xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
elif ev['type'] == 'm.call.invite':
print "Incoming call"
#sdp = ev['content']['offer']['sdp']
#print "sdp: %s" % (sdp)
#xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
#gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
elif ev['type'] == 'm.call.answer':
print "Call answered"
sdp = ev['content']['answer']['sdp']
if ev['room_id'] not in xmppClients:
print "We didn't have a call for that room"
continue
# should probably check call ID too
xmppCli = xmppClients[ev["room_id"]]
xmppCli = xmppClients[ev['room_id']]
xmppCli.sendAnswer(sdp)
elif ev["type"] == "m.call.hangup":
if ev["room_id"] in xmppClients:
xmppClients[ev["room_id"]].stop()
del xmppClients[ev["room_id"]]
elif ev['type'] == 'm.call.hangup':
if ev['room_id'] in xmppClients:
xmppClients[ev['room_id']].stop()
del xmppClients[ev['room_id']]
class TrivialXmppClient:
def __init__(self, matrixRoom, userId):
@@ -144,155 +131,130 @@ class TrivialXmppClient:
def nextRid(self):
self.rid += 1
return "%d" % (self.rid)
return '%d' % (self.rid)
def sendIq(self, xml):
fullXml = (
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s'>%s</body>"
% (self.nextRid(), self.sid, xml)
)
# print "\t>>>%s" % (fullXml)
fullXml = "<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s'>%s</body>" % (self.nextRid(), self.sid, xml)
#print "\t>>>%s" % (fullXml)
return self.xmppPoke(fullXml)
def xmppPoke(self, xml):
headers = {"Content-Type": "application/xml"}
headers = {'Content-Type': 'application/xml'}
req = grequests.post(HTTPBIND, verify=False, headers=headers, data=xml)
resps = grequests.map([req])
obj = BeautifulSoup(resps[0].content)
return obj
def sendAnswer(self, answer):
print("sdp from matrix client", answer)
p = subprocess.Popen(
["node", "unjingle/unjingle.js", "--sdp"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
print "sdp from matrix client",answer
p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--sdp'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
jingle, out_err = p.communicate(answer)
jingle = jingle % {
"tojid": self.callfrom,
"action": "session-accept",
"initiator": self.callfrom,
"responder": self.jid,
"sid": self.callsid,
'tojid': self.callfrom,
'action': 'session-accept',
'initiator': self.callfrom,
'responder': self.jid,
'sid': self.callsid
}
print("answer jingle from sdp", jingle)
print "answer jingle from sdp",jingle
res = self.sendIq(jingle)
print("reply from answer: ", res)
print "reply from answer: ",res
self.ssrcs = {}
jingleSoup = BeautifulSoup(jingle)
for cont in jingleSoup.iq.jingle.findAll("content"):
for cont in jingleSoup.iq.jingle.findAll('content'):
if cont.description:
self.ssrcs[cont["name"]] = cont.description["ssrc"]
print("my ssrcs:", self.ssrcs)
self.ssrcs[cont['name']] = cont.description['ssrc']
print "my ssrcs:",self.ssrcs
gevent.joinall([gevent.spawn(self.advertiseSsrcs)])
gevent.joinall([
gevent.spawn(self.advertiseSsrcs)
])
def advertiseSsrcs(self):
time.sleep(7)
print("SSRC spammer started")
time.sleep(7)
print "SSRC spammer started"
while self.running:
ssrcMsg = (
"<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>"
% {
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
"nick": self.userId,
"assrc": self.ssrcs["audio"],
"vssrc": self.ssrcs["video"],
}
)
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % { 'tojid': "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid), 'nick': self.userId, 'assrc': self.ssrcs['audio'], 'vssrc': self.ssrcs['video'] }
res = self.sendIq(ssrcMsg)
print("reply from ssrc announce: ", res)
print "reply from ssrc announce: ",res
time.sleep(10)
def xmppLoop(self):
self.matrixCallId = time.time()
res = self.xmppPoke(
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' to='%s' xml:lang='en' wait='60' hold='1' content='text/xml; charset=utf-8' ver='1.6' xmpp:version='1.0' xmlns:xmpp='urn:xmpp:xbosh'/>"
% (self.nextRid(), HOST)
)
res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' to='%s' xml:lang='en' wait='60' hold='1' content='text/xml; charset=utf-8' ver='1.6' xmpp:version='1.0' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), HOST))
print(res)
self.sid = res.body["sid"]
print("sid %s" % (self.sid))
print res
self.sid = res.body['sid']
print "sid %s" % (self.sid)
res = self.sendIq(
"<auth xmlns='urn:ietf:params:xml:ns:xmpp-sasl' mechanism='ANONYMOUS'/>"
)
res = self.sendIq("<auth xmlns='urn:ietf:params:xml:ns:xmpp-sasl' mechanism='ANONYMOUS'/>")
res = self.xmppPoke(
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s' to='%s' xml:lang='en' xmpp:restart='true' xmlns:xmpp='urn:xmpp:xbosh'/>"
% (self.nextRid(), self.sid, HOST)
)
res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s' to='%s' xml:lang='en' xmpp:restart='true' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), self.sid, HOST))
res = self.sendIq(
"<iq type='set' id='_bind_auth_2' xmlns='jabber:client'><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'/></iq>"
)
print(res)
res = self.sendIq("<iq type='set' id='_bind_auth_2' xmlns='jabber:client'><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'/></iq>")
print res
self.jid = res.body.iq.bind.jid.string
print("jid: %s" % (self.jid))
self.shortJid = self.jid.split("-")[0]
print "jid: %s" % (self.jid)
self.shortJid = self.jid.split('-')[0]
res = self.sendIq(
"<iq type='set' id='_session_auth_2' xmlns='jabber:client'><session xmlns='urn:ietf:params:xml:ns:xmpp-session'/></iq>"
)
res = self.sendIq("<iq type='set' id='_session_auth_2' xmlns='jabber:client'><session xmlns='urn:ietf:params:xml:ns:xmpp-session'/></iq>")
# randomthing = res.body.iq['to']
# whatsitpart = randomthing.split('-')[0]
#randomthing = res.body.iq['to']
#whatsitpart = randomthing.split('-')[0]
# print "other random bind thing: %s" % (randomthing)
#print "other random bind thing: %s" % (randomthing)
# advertise preence to the jitsi room, with our nick
res = self.sendIq(
"<iq type='get' to='%s' xmlns='jabber:client' id='1:sendIQ'><services xmlns='urn:xmpp:extdisco:1'><service host='%s'/></services></iq><presence to='%s@%s/d98f6c40' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%s</nick></presence>"
% (HOST, TURNSERVER, ROOMNAME, ROOMDOMAIN, self.userId)
)
self.muc = {"users": []}
for p in res.body.findAll("presence"):
res = self.sendIq("<iq type='get' to='%s' xmlns='jabber:client' id='1:sendIQ'><services xmlns='urn:xmpp:extdisco:1'><service host='%s'/></services></iq><presence to='%s@%s/d98f6c40' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%s</nick></presence>" % (HOST, TURNSERVER, ROOMNAME, ROOMDOMAIN, self.userId))
self.muc = {'users': []}
for p in res.body.findAll('presence'):
u = {}
u["shortJid"] = p["from"].split("/")[1]
u['shortJid'] = p['from'].split('/')[1]
if p.c and p.c.nick:
u["nick"] = p.c.nick.string
self.muc["users"].append(u)
print("muc: ", self.muc)
u['nick'] = p.c.nick.string
self.muc['users'].append(u)
print "muc: ",self.muc
# wait for stuff
while True:
print("waiting...")
print "waiting..."
res = self.sendIq("")
print("got from stream: ", res)
print "got from stream: ",res
if res.body.iq:
jingles = res.body.iq.findAll("jingle")
jingles = res.body.iq.findAll('jingle')
if len(jingles):
self.callfrom = res.body.iq["from"]
self.callfrom = res.body.iq['from']
self.handleInvite(jingles[0])
elif "type" in res.body and res.body["type"] == "terminate":
elif 'type' in res.body and res.body['type'] == 'terminate':
self.running = False
del xmppClients[self.matrixRoom]
return
return
def handleInvite(self, jingle):
self.initiator = jingle["initiator"]
self.callsid = jingle["sid"]
p = subprocess.Popen(
["node", "unjingle/unjingle.js", "--jingle"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
print("raw jingle invite", str(jingle))
self.initiator = jingle['initiator']
self.callsid = jingle['sid']
p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--jingle'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
print "raw jingle invite",str(jingle)
sdp, out_err = p.communicate(str(jingle))
print("transformed remote offer sdp", sdp)
print "transformed remote offer sdp",sdp
inviteEvent = {
"offer": {"type": "offer", "sdp": sdp},
"call_id": self.matrixCallId,
"version": 0,
"lifetime": 30000,
'offer': {
'type': 'offer',
'sdp': sdp
},
'call_id': self.matrixCallId,
'version': 0,
'lifetime': 30000
}
matrixCli.sendEvent(self.matrixRoom, "m.call.invite", inviteEvent)
matrixCli.sendEvent(self.matrixRoom, 'm.call.invite', inviteEvent)
matrixCli = TrivialMatrixClient(ACCESS_TOKEN)
matrixCli = TrivialMatrixClient(ACCESS_TOKEN) # Undefined name
gevent.joinall([
gevent.spawn(matrixLoop)
])
gevent.joinall([gevent.spawn(matrixLoop)])

View File

@@ -1,48 +0,0 @@
This directory contains some sample monitoring config for using the
'Prometheus' monitoring server against synapse.
To use it, first install prometheus by following the instructions at
http://prometheus.io/
### for Prometheus v1
Add a new job to the main prometheus.conf file:
```yaml
job: {
name: "synapse"
target_group: {
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
}
}
```
### for Prometheus v2
Add a new job to the main prometheus.yml file:
```yaml
- job_name: "synapse"
metrics_path: "/_synapse/metrics"
# when endpoint uses https:
scheme: "https"
static_configs:
- targets: ["my.server.here:port"]
```
An example of a Prometheus configuration with workers can be found in
[metrics-howto.md](https://matrix-org.github.io/synapse/latest/metrics-howto.html).
To use `synapse.rules` add
```yaml
rule_files:
- "/PATH/TO/synapse-v2.rules"
```
Metrics are disabled by default when running synapse; they must be enabled
with the 'enable-metrics' option, either in the synapse config file or as a
command-line option.

View File

@@ -1,378 +0,0 @@
{{ template "head" . }}
{{ template "prom_content_head" . }}
<h1>System Resources</h1>
<h3>CPU</h3>
<div id="process_resource_utime"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#process_resource_utime"),
expr: "rate(process_cpu_seconds_total[2m]) * 100",
name: "[[job]]-[[index]]",
min: 0,
max: 100,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "%",
yTitle: "CPU Usage"
})
</script>
<h3>Memory</h3>
<div id="process_resident_memory_bytes"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#process_resident_memory_bytes"),
expr: "process_resident_memory_bytes",
name: "[[job]]-[[index]]",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "bytes",
yTitle: "Usage"
})
</script>
<h3>File descriptors</h3>
<div id="process_fds"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#process_fds"),
expr: "process_open_fds",
name: "[[job]]-[[index]]",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "",
yTitle: "Descriptors"
})
</script>
<h1>Reactor</h1>
<h3>Total reactor time</h3>
<div id="reactor_total_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_total_time"),
expr: "rate(python_twisted_reactor_tick_time_sum[2m])",
name: "[[job]]-[[index]]",
max: 1,
min: 0,
renderer: "area",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
yTitle: "Usage"
})
</script>
<h3>Average reactor tick time</h3>
<div id="reactor_average_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_average_time"),
expr: "rate(python_twisted_reactor_tick_time_sum[2m]) / rate(python_twisted_reactor_tick_time_count[2m])",
name: "[[job]]-[[index]]",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s",
yTitle: "Time"
})
</script>
<h1>Storage</h1>
<h3>Queries</h3>
<div id="synapse_storage_query_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_query_time"),
expr: "sum(rate(synapse_storage_query_time_count[2m])) by (verb)",
name: "[[verb]]",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "queries/s",
yTitle: "Queries"
})
</script>
<h3>Transactions</h3>
<div id="synapse_storage_transaction_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_transaction_time"),
expr: "topk(10, rate(synapse_storage_transaction_time_count[2m]))",
name: "[[job]]-[[index]] [[desc]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "txn/s",
yTitle: "Transactions"
})
</script>
<h3>Transaction execution time</h3>
<div id="synapse_storage_transactions_time_sec"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_transactions_time_sec"),
expr: "rate(synapse_storage_transaction_time_sum[2m])",
name: "[[job]]-[[index]] [[desc]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
yTitle: "Usage"
})
</script>
<h3>Average time waiting for database connection</h3>
<div id="synapse_storage_avg_waiting_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_avg_waiting_time"),
expr: "rate(synapse_storage_schedule_time_sum[2m]) / rate(synapse_storage_schedule_time_count[2m])",
name: "[[job]]-[[index]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s",
yTitle: "Time"
})
</script>
<h3>Cache request rate</h3>
<div id="synapse_cache_request_rate"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_cache_request_rate"),
expr: "rate(synapse_util_caches_cache:total[2m])",
name: "[[job]]-[[index]] [[name]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "rps",
yTitle: "Cache request rate"
})
</script>
<h3>Cache size</h3>
<div id="synapse_cache_size"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_cache_size"),
expr: "synapse_util_caches_cache:size",
name: "[[job]]-[[index]] [[name]]",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "",
yTitle: "Items"
})
</script>
<h1>Requests</h1>
<h3>Requests by Servlet</h3>
<div id="synapse_http_server_request_count_servlet"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_request_count_servlet"),
expr: "rate(synapse_http_server_in_flight_requests_count[2m])",
name: "[[job]]-[[index]] [[method]] [[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h4>&nbsp;(without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
<div id="synapse_http_server_request_count_servlet_minus_events"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
expr: "rate(synapse_http_server_in_flight_requests_count{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
name: "[[job]]-[[index]] [[method]] [[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>Average response times</h3>
<div id="synapse_http_server_response_time_avg"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_time_avg"),
expr: "rate(synapse_http_server_response_time_seconds_sum[2m]) / rate(synapse_http_server_response_count[2m])",
name: "[[job]]-[[index]] [[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/req",
yTitle: "Response time"
})
</script>
<h3>All responses by code</h3>
<div id="synapse_http_server_responses"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_responses"),
expr: "rate(synapse_http_server_responses[2m])",
name: "[[method]] / [[code]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>Error responses by code</h3>
<div id="synapse_http_server_responses_err"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_responses_err"),
expr: "rate(synapse_http_server_responses{code=~\"[45]..\"}[2m])",
name: "[[method]] / [[code]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>CPU Usage</h3>
<div id="synapse_http_server_response_ru_utime"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_ru_utime"),
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
name: "[[job]]-[[index]] [[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
yTitle: "CPU Usage"
})
</script>
<h3>DB Usage</h3>
<div id="synapse_http_server_response_db_txn_duration"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
name: "[[job]]-[[index]] [[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
yTitle: "DB Usage"
})
</script>
<h3>Average event send times</h3>
<div id="synapse_http_server_send_time_avg"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_send_time_avg"),
expr: "rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m])",
name: "[[job]]-[[index]] [[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/req",
yTitle: "Response time"
})
</script>
<h1>Federation</h1>
<h3>Sent Messages</h3>
<div id="synapse_federation_client_sent"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_federation_client_sent"),
expr: "rate(synapse_federation_client_sent[2m])",
name: "[[job]]-[[index]] [[type]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>Received Messages</h3>
<div id="synapse_federation_server_received"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_federation_server_received"),
expr: "rate(synapse_federation_server_received[2m])",
name: "[[job]]-[[index]] [[type]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
yTitle: "Requests"
})
</script>
<h3>Pending</h3>
<div id="synapse_federation_transaction_queue_pending"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_federation_transaction_queue_pending"),
expr: "synapse_federation_transaction_queue_pending",
name: "[[type]]",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "",
yTitle: "Units"
})
</script>
<h1>Clients</h1>
<h3>Notifiers</h3>
<div id="synapse_notifier_listeners"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_notifier_listeners"),
expr: "synapse_notifier_listeners",
name: "[[job]]-[[index]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "",
yTitle: "Listeners"
})
</script>
<h3>Notified Events</h3>
<div id="synapse_notifier_notified_events"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_notifier_notified_events"),
expr: "rate(synapse_notifier_notified_events[2m])",
name: "[[job]]-[[index]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "events/s",
yTitle: "Event rate"
})
</script>
{{ template "prom_content_tail" . }}
{{ template "tail" }}

View File

@@ -1,21 +0,0 @@
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0

Some files were not shown because too many files have changed in this diff Show More