Compare commits

..

1 Commits

Author SHA1 Message Date
Andrew Morgan
9690e9dbac WIP 2019-01-31 18:31:17 +00:00
1733 changed files with 97126 additions and 308115 deletions

View File

@@ -1,91 +0,0 @@
{{- /*gotype: github.com/haveyoudebuggedit/gotestfmt/parser.Package*/ -}}
{{- /*
This template contains the format for an individual package. GitHub actions does not currently support nested groups so
we are creating a stylized header for each package.
This template is based on https://github.com/haveyoudebuggedit/gotestfmt/blob/f179b0e462a9dcf7101515d87eec4e4d7e58b92a/.gotestfmt/github/package.gotpl
which is under the Unlicense licence.
*/ -}}
{{- $settings := .Settings -}}
{{- if and (or (not $settings.HideSuccessfulPackages) (ne .Result "PASS")) (or (not $settings.HideEmptyPackages) (ne .Result "SKIP") (ne (len .TestCases) 0)) -}}
{{- if eq .Result "PASS" -}}
{{ "\033" }}[0;32m
{{- else if eq .Result "SKIP" -}}
{{ "\033" }}[0;33m
{{- else -}}
{{ "\033" }}[0;31m
{{- end -}}
📦 {{ .Name }}{{- "\033" }}[0m
{{- with .Coverage -}}
{{- "\033" -}}[0;37m ({{ . }}% coverage){{- "\033" -}}[0m
{{- end -}}
{{- "\n" -}}
{{- with .Reason -}}
{{- " " -}}🛑 {{ . -}}{{- "\n" -}}
{{- end -}}
{{- with .Output -}}
{{- . -}}{{- "\n" -}}
{{- end -}}
{{- with .TestCases -}}
{{- /* Passing tests are first */ -}}
{{- range . -}}
{{- if eq .Result "PASS" -}}
::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- /* Then skipped tests are second */ -}}
{{- range . -}}
{{- if eq .Result "SKIP" -}}
::group::{{ "\033" }}[0;33m🚧{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- /* and failing tests are last */ -}}
{{- range . -}}
{{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
{{- end -}})
{{- "\033" -}}[0m
{{- "\n" -}}
{{- with .Output -}}
{{- formatTestOutput . $settings -}}
{{- "\n" -}}
{{- end -}}
::endgroup::{{- "\n" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- "\n" -}}
{{- end -}}

View File

@@ -1,4 +0,0 @@
---
title: CI run against latest deps is failing
---
See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}

View File

@@ -1,19 +0,0 @@
# Configuration file used for testing the 'synapse_port_db' script.
# Tells the script to connect to the postgresql database that will be available in the
# CI's Docker setup at the point where this file is considered.
server_name: "localhost:8800"
signing_key_path: ".ci/test.signing.key"
report_stats: false
database:
name: "psycopg2"
args:
user: postgres
host: localhost
password: postgres
database: synapse
# Suppress the key server warning.
trusted_key_servers: []

View File

@@ -1,132 +0,0 @@
#!/usr/bin/env python
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
# compatible wheel, if so rename the wheel before repairing it.
import argparse
import os
import subprocess
from typing import Optional
from zipfile import ZipFile
from packaging.tags import Tag
from packaging.utils import parse_wheel_filename
from packaging.version import Version
def check_is_abi3_compatible(wheel_file: str) -> None:
"""Check the contents of the built wheel for any `.so` files that are *not*
abi3 compatible.
"""
with ZipFile(wheel_file, "r") as wheel:
for file in wheel.namelist():
if not file.endswith(".so"):
continue
if not file.endswith(".abi3.so"):
raise Exception(f"Found non-abi3 lib: {file}")
def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
"""Replaces the cpython wheel file with a ABI3 compatible wheel"""
if tag.abi == "abi3":
# Nothing to do.
return wheel_file
check_is_abi3_compatible(wheel_file)
abi3_tag = Tag(tag.interpreter, "abi3", tag.platform)
dirname = os.path.dirname(wheel_file)
new_wheel_file = os.path.join(
dirname,
f"{name}-{version}-{abi3_tag}.whl",
)
os.rename(wheel_file, new_wheel_file)
print("Renamed wheel to", new_wheel_file)
return new_wheel_file
def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None:
"""Entry point"""
# Parse the wheel file name into its parts. Note that `parse_wheel_filename`
# normalizes the package name (i.e. it converts matrix_synapse ->
# matrix-synapse), which is not what we want.
_, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
name = os.path.basename(wheel_file).split("-")[0]
if len(tags) != 1:
# We expect only a wheel file with only a single tag
raise Exception(f"Unexpectedly found multiple tags: {tags}")
tag = next(iter(tags))
if build:
# We don't use build tags in Synapse
raise Exception(f"Unexpected build tag: {build}")
# If the wheel is for cpython then convert it into an abi3 wheel.
if tag.interpreter.startswith("cp"):
wheel_file = cpython(wheel_file, name, version, tag)
# Finally, repair the wheel.
if archs is not None:
# If we are given archs then we are on macos and need to use
# `delocate-listdeps`.
subprocess.run(["delocate-listdeps", wheel_file], check=True)
subprocess.run(
["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
check=True,
)
else:
subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
parser.add_argument(
"--wheel-dir",
"-w",
metavar="WHEEL_DIR",
help="Directory to store delocated wheels",
required=True,
)
parser.add_argument(
"--require-archs",
metavar="archs",
default=None,
)
parser.add_argument(
"wheel_file",
metavar="WHEEL_FILE",
)
args = parser.parse_args()
wheel_file = args.wheel_file
wheel_dir = args.wheel_dir
archs = args.require_archs
main(wheel_file, wheel_dir, archs)

View File

@@ -1,135 +0,0 @@
#!/usr/bin/env python
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Calculate the trial jobs to run based on if we're in a PR or not.
import json
import os
def set_output(key: str, value: str):
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-output-parameter
with open(os.environ["GITHUB_OUTPUT"], "at") as f:
print(f"{key}={value}", file=f)
IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
#
# For each type of test we only run on Py3.7 on PRs
trial_sqlite_tests = [
{
"python-version": "3.7",
"database": "sqlite",
"extras": "all",
}
]
if not IS_PR:
trial_sqlite_tests.extend(
{
"python-version": version,
"database": "sqlite",
"extras": "all",
}
for version in ("3.8", "3.9", "3.10")
)
trial_postgres_tests = [
{
"python-version": "3.7",
"database": "postgres",
"postgres-version": "10",
"extras": "all",
}
]
if not IS_PR:
trial_postgres_tests.append(
{
"python-version": "3.10",
"database": "postgres",
"postgres-version": "14",
"extras": "all",
}
)
trial_no_extra_tests = [
{
"python-version": "3.7",
"database": "sqlite",
"extras": "",
}
]
print("::group::Calculated trial jobs")
print(
json.dumps(
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests, indent=4
)
)
print("::endgroup::")
test_matrix = json.dumps(
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests
)
set_output("trial_test_matrix", test_matrix)
# First calculate the various sytest jobs.
#
# For each type of test we only run on focal on PRs
sytest_tests = [
{
"sytest-tag": "focal",
},
{
"sytest-tag": "focal",
"postgres": "postgres",
},
{
"sytest-tag": "focal",
"postgres": "multi-postgres",
"workers": "workers",
},
]
if not IS_PR:
sytest_tests.extend(
[
{
"sytest-tag": "testing",
"postgres": "postgres",
},
{
"sytest-tag": "buster",
"postgres": "multi-postgres",
"workers": "workers",
},
]
)
print("::group::Calculated sytest jobs")
print(json.dumps(sytest_tests, indent=4))
print("::endgroup::")
test_matrix = json.dumps(sytest_tests)
set_output("sytest_test_matrix", test_matrix)

View File

@@ -1,25 +0,0 @@
#!/bin/bash
#
# Fetches a version of complement which best matches the current build.
#
# The tarball is unpacked into `./complement`.
set -e
mkdir -p complement
# Pick an appropriate version of complement. Depending on whether this is a PR or release,
# etc. we need to use different fallbacks:
#
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
# for pull requests, otherwise GITHUB_REF).
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
# (GITHUB_BASE_REF for pull requests).
# 3. Use the default complement branch ("HEAD").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do
# Skip empty branch names and merge commits.
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
continue
fi
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done

View File

@@ -1,21 +0,0 @@
#!/bin/bash
#
# wraps `gotestfmt`, hiding output from successful packages unless
# all tests passed.
set -o pipefail
set -e
# tee the test results to a log, whilst also piping them into gotestfmt,
# telling it to hide successful results, so that we can clearly see
# unsuccessful results.
tee complement.log | gotestfmt -hide successful-packages
# gotestfmt will exit non-zero if there were any failures, so if we got to this
# point, we must have had a successful result.
echo "All tests successful; showing all test results"
# Pipe the test results back through gotestfmt, showing all results.
# The log file consists of JSON lines giving the test results, interspersed
# with regular stdout lines (including reports of downloaded packages).
grep '^{"Time":' complement.log | gotestfmt

View File

@@ -1,64 +0,0 @@
#!/usr/bin/env bash
# this script is run by GitHub Actions in a plain `focal` container; it
# - installs the minimal system requirements, and poetry;
# - patches the project definition file to refer to old versions only;
# - creates a venv with these old versions using poetry; and finally
# - invokes `trial` to run the tests with old deps.
set -ex
# Prevent virtualenv from auto-updating pip to an incompatible version
export VIRTUALENV_NO_DOWNLOAD=1
# TODO: in the future, we could use an implementation of
# https://github.com/python-poetry/poetry/issues/3527
# https://github.com/pypa/pip/issues/8085
# to select the lowest possible versions, rather than resorting to this sed script.
# Patch the project definitions in-place:
# - Replace all lower and tilde bounds with exact bounds
# - Replace all caret bounds---but not the one that defines the supported Python version!
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Use pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Omit systemd: we're not logging to journal here.
sed -i \
-e "s/[~>]=/==/g" \
-e '/^python = "^/!s/\^/==/g' \
-e "/psycopg2/d" \
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
-e '/systemd/d' \
pyproject.toml
# Use poetry to do the installation. This ensures that the versions are all mutually
# compatible (as far the package metadata declares, anyway); pip's package resolver
# is more lax.
#
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
# toml file. This means we don't have to ensure compatibility between old deps and
# dev tools.
pip install toml wheel
REMOVE_DEV_DEPENDENCIES="
import toml
with open('pyproject.toml', 'r') as f:
data = toml.loads(f.read())
del data['tool']['poetry']['dev-dependencies']
with open('pyproject.toml', 'w') as f:
toml.dump(data, f)
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pip install poetry==1.2.0
poetry lock
echo "::group::Patched pyproject.toml"
cat pyproject.toml
echo "::endgroup::"
echo "::group::Lockfile after patch"
cat poetry.lock
echo "::endgroup::"

View File

@@ -1,36 +0,0 @@
#!/bin/sh
#
# Common commands to set up Complement's prerequisites in a GitHub Actions CI run.
#
# Must be called after Synapse has been checked out to `synapse/`.
#
set -eu
alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func'
alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func'
block Set Go Version
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
endblock
block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
go get -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
endblock
block Install custom gotestfmt template
mkdir .gotestfmt/github -p
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
endblock
block Check out Complement
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
synapse/.ci/scripts/checkout_complement.sh
endblock

View File

@@ -1,52 +0,0 @@
#!/usr/bin/env bash
# Test for the export-data admin command against sqlite and postgres
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe
cd "$(dirname "$0")/../.."
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Run the export-data command on the sqlite test database
poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data
# Test that the output directory exists and contains the rooms directory
dir="/tmp/export_data/rooms"
if [ -d "$dir" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a sqlite database."
exit 1
fi
# Create the PostgreSQL database.
psql -c "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# Run the export-data command on postgres database
poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
--output-directory /tmp/export_data2
# Test that the output directory exists and contains the rooms directory
dir2="/tmp/export_data2/rooms"
if [ -d "$dir2" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a postgres database."
exit 1
fi

View File

@@ -1,67 +0,0 @@
#!/usr/bin/env bash
#
# Test script for 'synapse_port_db'.
# - configures synapse and a postgres server.
# - runs the port script on a prepopulated test sqlite db. Checks that the
# return code is zero.
# - reruns the port script on the same sqlite db, targetting the same postgres db.
# Checks that the return code is zero.
# - runs the port script against a new sqlite db. Checks the return code is zero.
#
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe -o pipefail
cd "$(dirname "$0")/../.."
echo "--- Generate the signing key"
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background updates.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database.
psql -c "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against test database"
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
# but coverage seems unable to find the entrypoints installed by `pip install -e .`.
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
# We should be able to run twice against the same database.
echo "+++ Run synapse_port_db a second time"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
#####
# Now do the same again, on an empty database.
echo "--- Prepare empty SQLite database"
# we do this by deleting the sqlite db, and then doing the same again.
rm .ci/test_db.db
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database.
psql \
-c "DROP DATABASE synapse" \
-c "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
echo "--- Create a brand new postgres database from schema"
cp .ci/postgres-config.yaml .ci/postgres-config-unported.yaml
sed -i -e 's/database: synapse/database: synapse_unported/' .ci/postgres-config-unported.yaml
psql -c "CREATE DATABASE synapse_unported"
poetry run update_synapse_database --database-config .ci/postgres-config-unported.yaml --run-background-updates
echo "+++ Comparing ported schema with unported schema"
# Ignore the tables that portdb creates. (Should it tidy them up when the porting is completed?)
psql synapse -c "DROP TABLE port_from_sqlite3;"
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse_unported > unported.sql
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse > ported.sql
# By default, `diff` returns zero if there are no changes and nonzero otherwise
diff -u unported.sql ported.sql | tee schema_diff

View File

@@ -1,16 +0,0 @@
# Configuration file used for testing the 'synapse_port_db' script.
# Tells the 'update_database' script to connect to the test SQLite database to upgrade its
# schema and run background updates on it.
server_name: "localhost:8800"
signing_key_path: ".ci/test.signing.key"
report_stats: false
database:
name: "sqlite3"
args:
database: ".ci/test_db.db"
# Suppress the key server warning.
trusted_key_servers: []

Binary file not shown.

View File

@@ -1,4 +0,0 @@
---
title: CI run against Twisted trunk is failing
---
See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}

View File

@@ -1,2 +0,0 @@
# This file serves as a blacklist for SyTest tests that we expect will fail in
# Synapse when run under worker mode. For more details, see sytest-blacklist.

168
.circleci/config.yml Normal file
View File

@@ -0,0 +1,168 @@
version: 2
jobs:
dockerhubuploadrelease:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} .
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
dockerhubuploadlatest:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest .
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest-py3 --build-arg PYTHON_VERSION=3.6 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker push matrixdotorg/synapse:latest
- run: docker push matrixdotorg/synapse:latest-py3
sytestpy2:
docker:
- image: matrixdotorg/sytest-synapsepy2
working_directory: /src
steps:
- checkout
- run: /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy2postgres:
docker:
- image: matrixdotorg/sytest-synapsepy2
working_directory: /src
steps:
- checkout
- run: POSTGRES=1 /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy2merged:
docker:
- image: matrixdotorg/sytest-synapsepy2
working_directory: /src
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy2postgresmerged:
docker:
- image: matrixdotorg/sytest-synapsepy2
working_directory: /src
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: POSTGRES=1 /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy3:
docker:
- image: matrixdotorg/sytest-synapsepy3
working_directory: /src
steps:
- checkout
- run: /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy3postgres:
docker:
- image: matrixdotorg/sytest-synapsepy3
working_directory: /src
steps:
- checkout
- run: POSTGRES=1 /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy3merged:
docker:
- image: matrixdotorg/sytest-synapsepy3
working_directory: /src
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy3postgresmerged:
docker:
- image: matrixdotorg/sytest-synapsepy3
working_directory: /src
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: POSTGRES=1 /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
workflows:
version: 2
build:
jobs:
- sytestpy2:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy2postgres:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy3:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy3postgres:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy2merged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy2postgresmerged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy3merged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy3postgresmerged:
filters:
branches:
ignore: /develop|master|release-.*/
- dockerhubuploadrelease:
filters:
tags:
only: /v[0-9].[0-9]+.[0-9]+.*/
branches:
ignore: /.*/
- dockerhubuploadlatest:
filters:
branches:
only: master

34
.circleci/merge_base_branch.sh Executable file
View File

@@ -0,0 +1,34 @@
#!/usr/bin/env bash
set -e
# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV
source $BASH_ENV
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
then
echo "Can't figure out what the PR number is! Assuming merge target is develop."
# It probably hasn't had a PR opened yet. Since all PRs land on develop, we
# can probably assume it's based on it and will be merged into it.
GITBASE="develop"
else
# Get the reference, using the GitHub API
GITBASE=`wget -O- https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
fi
# Show what we are before
git --no-pager show -s
# Set up username so it can do a merge
git config --global user.email bot@matrix.org
git config --global user.name "A robot"
# Fetch and merge. If it doesn't work, it will raise due to set -e.
git fetch -u origin $GITBASE
git merge --no-edit origin/$GITBASE
# Show what we are after.
git --no-pager show -s

View File

@@ -1,4 +1,5 @@
comment: off
comment:
layout: "diff"
coverage:
status:

View File

@@ -1,8 +1,7 @@
[run]
branch = True
parallel = True
include=$TOP/synapse/*
data_file = $TOP/.coverage
include = synapse/*
[report]
precision = 2

View File

@@ -1,18 +1,9 @@
# ignore everything by default
*
# things to include
!docker
!synapse
!rust
!README.rst
!pyproject.toml
!poetry.lock
!Cargo.lock
!Cargo.toml
!build_rust.py
rust/target
synapse/*.so
**/__pycache__
Dockerfile
.travis.yml
.gitignore
demo/etc
tox.ini
.git/*
.tox/*
debian/matrix-synapse/
debian/matrix-synapse-*/

View File

@@ -7,4 +7,3 @@ root = true
[*.py]
indent_style = space
indent_size = 4
max_line_length = 88

11
.flake8
View File

@@ -1,11 +0,0 @@
# TODO: incorporate this into pyproject.toml if flake8 supports it in the future.
# See https://github.com/PyCQA/flake8/issues/234
[flake8]
# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
# for error codes. The ones we ignore are:
# W503: line break before binary operator
# W504: line break after binary operator
# E203: whitespace before ':' (which is contrary to pep8?)
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
ignore=W503,W504,E203,E731,E501

View File

@@ -1,24 +0,0 @@
# Commits in this file will be removed from GitHub blame results.
#
# To use this file locally, use:
# git blame --ignore-revs-file="path/to/.git-blame-ignore-revs" <files>
#
# or configure the `blame.ignoreRevsFile` option in your git config.
#
# If ignoring a pull request that was not squash merged, only the merge
# commit needs to be put here. Child commits will be resolved from it.
# Run black (#3679).
8b3d9b6b199abb87246f982d5db356f1966db925
# Black reformatting (#5482).
32e7c9e7f20b57dd081023ac42d6931a8da9b3a3
# Target Python 3.5 with black (#8664).
aff1eb7c671b0a3813407321d2702ec46c71fa56
# Update black to 20.8b1 (#9381).
0a00b7ff14890987f09112a2ae696c61001e6cf1
# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
c4268e3da64f1abb5b31deaeb5769adb6510c0a7

2
.github/CODEOWNERS vendored
View File

@@ -1,2 +0,0 @@
# Automatically request reviews from the synapse-core team when a pull request comes in.
* @matrix-org/synapse-core

4
.github/FUNDING.yml vendored
View File

@@ -1,4 +0,0 @@
# One username per supported platform and one custom link
patreon: matrixdotorg
liberapay: matrixdotorg
custom: https://paypal.me/matrixdotorg

View File

@@ -1,5 +0,0 @@
**If you are looking for support** please ask in **#synapse:matrix.org**
(using a matrix.org account if necessary). We do not use GitHub issues for
support.
**If you want to report a security issue** please see https://matrix.org/security-disclosure-policy/

66
.github/ISSUE_TEMPLATE/BUG_REPORT.md vendored Normal file
View File

@@ -0,0 +1,66 @@
---
name: Bug report
about: Create a report to help us improve
---
<!--
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
This is a bug report template. By following the instructions below and
filling out the sections with your information, you will help the us to get all
the necessary data to fix your issue.
You can also preview your report before submitting it. You may remove sections
that aren't relevant to your particular case.
Text between <!-- and --> marks will be invisible in the report.
-->
### Description
<!-- Describe here the problem that you are experiencing -->
### Steps to reproduce
- list the steps
- that reproduce the bug
- using hyphens as bullet points
<!--
Describe how what happens differs from what you expected.
If you can identify any relevant log snippets from _homeserver.log_, please include
those (please be careful to remove any personal or private data). Please surround them with
``` (three backticks, on a line on their own), so that they are formatted legibly.
-->
### Version information
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
<!-- Was this issue identified on matrix.org or another homeserver? -->
- **Homeserver**:
If not matrix.org:
<!--
What version of Synapse is running?
You can find the Synapse version by inspecting the server headers (replace matrix.org with
your own homeserver domain):
$ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
-->
- **Version**:
- **Install method**:
<!-- examples: package manager/git clone/pip -->
- **Platform**:
<!--
Tell us about the environment in which your homeserver is operating
distro, hardware, if it's running in a vm/container, etc.
-->

View File

@@ -1,103 +0,0 @@
name: Bug report
description: Create a report to help us improve
body:
- type: markdown
attributes:
value: |
**THIS IS NOT A SUPPORT CHANNEL!**
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.
You can also preview your report before submitting it.
- type: textarea
id: description
attributes:
label: Description
description: Describe the problem that you are experiencing
validations:
required: true
- type: textarea
id: reproduction_steps
attributes:
label: Steps to reproduce
description: |
Describe the series of steps that leads you to the problem.
Describe how what happens differs from what you expected.
placeholder: Tell us what you see!
value: |
- list the steps
- that reproduce the bug
- using hyphens as bullet points
validations:
required: true
- type: markdown
attributes:
value: |
---
**IMPORTANT**: please answer the following questions, to help us narrow down the problem.
- type: input
id: homeserver
attributes:
label: Homeserver
description: Which homeserver was this issue identified on? (matrix.org, another homeserver, etc)
validations:
required: true
- type: input
id: version
attributes:
label: Synapse Version
description: |
What version of Synapse is this homeserver running?
You can find the Synapse version by visiting https://yourserver.example.com/_matrix/federation/v1/version
or with this command:
```
$ curl http://localhost:8008/_synapse/admin/v1/server_version
```
(You may need to replace `localhost:8008` if Synapse is not configured to listen on that port.)
validations:
required: true
- type: dropdown
id: install_method
attributes:
label: Installation Method
options:
- Docker (matrixdotorg/synapse)
- Debian packages from packages.matrix.org
- pip (from PyPI)
- Other (please mention below)
- type: textarea
id: platform
attributes:
label: Platform
description: |
Tell us about the environment in which your homeserver is operating...
e.g. distro, hardware, if it's running in a vm/container, etc.
validations:
required: true
- type: textarea
id: logs
attributes:
label: Relevant log output
description: |
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
This will be automatically formatted into code, so there is no need for backticks.
Please be careful to remove any personal or private data.
**Bug reports are usually very difficult to diagnose without logging.**
render: shell
validations:
required: true
- type: textarea
id: anything_else
attributes:
label: Anything else that would be useful to know?

View File

@@ -4,7 +4,6 @@ about: I need support for Synapse
---
Please don't file github issues asking for support.
# Please ask for support in [**#matrix:matrix.org**](https://matrix.to/#/#matrix:matrix.org)
Instead, please join [`#synapse:matrix.org`](https://matrix.to/#/#synapse:matrix.org)
(from a matrix.org account if necessary), and ask there.
## Don't file an issue as a support request.

View File

@@ -1,14 +1,7 @@
### Pull Request Checklist
<!-- Please read https://matrix-org.github.io/synapse/latest/development/contributing_guide.html before submitting your pull request -->
<!-- Please read CONTRIBUTING.rst before submitting your pull request -->
* [ ] Pull request is based on the develop branch
* [ ] Pull request includes a [changelog file](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should:
- Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry.
* [ ] Pull request includes a [sign off](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#sign-off)
* [ ] [Code style](https://matrix-org.github.io/synapse/latest/code_style.html) is correct
(run the [linters](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#changelog)
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#sign-off)

6
.github/SUPPORT.md vendored
View File

@@ -1,3 +1,3 @@
[**#synapse:matrix.org**](https://matrix.to/#/#synapse:matrix.org) is the official support room for
Synapse, and can be accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html.
Please ask for support there, rather than filing github issues.
[**#matrix:matrix.org**](https://matrix.to/#/#matrix:matrix.org) is the official support room for Matrix, and can be accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html
It can also be access via IRC bridge at irc://irc.freenode.net/matrix or on the web here: https://webchat.freenode.net/?channels=matrix

View File

@@ -1,22 +0,0 @@
version: 2
updates:
- # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
package-ecosystem: "pip"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "docker"
directory: "/docker"
schedule:
interval: "weekly"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "weekly"

View File

@@ -1,46 +0,0 @@
name: Write changelog for dependabot PR
on:
pull_request:
types:
- opened
- reopened # For debugging!
permissions:
# Needed to be able to push the commit. See
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
# for a similar example
contents: write
jobs:
add-changelog:
runs-on: 'ubuntu-latest'
if: ${{ github.actor == 'dependabot[bot]' }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.ref }}
- name: Write, commit and push changelog
run: |
echo "${{ github.event.pull_request.title }}." > "changelog.d/${{ github.event.pull_request.number }}".misc
git add changelog.d
git config user.email "github-actions[bot]@users.noreply.github.com"
git config user.name "GitHub Actions"
git commit -m "Changelog"
git push
shell: bash
# The `git push` above does not trigger CI on the dependabot PR.
#
# By default, workflows can't trigger other workflows when they're just using the
# default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
# recursive workflow loops by accident, because that'll get very expensive very
# quickly.) Instead, you have to manually call out to another workflow, or else
# make your changes (i.e. the `git push` above) using a personal access token.
# See
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
#
# I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
# See git commit history for previous attempts. If anyone desperately wants to try
# again in the future, make a matrix-bot account and use its access token to git push.
# THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
# are sufficiently locked down to dependabot only as above.

View File

@@ -1,62 +0,0 @@
# GitHub actions workflow which builds and publishes the docker images.
name: Build docker images
on:
push:
tags: ["v*"]
branches: [ master, main, develop ]
workflow_dispatch:
permissions:
contents: read
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Set up QEMU
id: qemu
uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Inspect builder
run: docker buildx inspect
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Calculate docker image tag
id: set-tag
uses: docker/metadata-action@master
with:
images: matrixdotorg/synapse
flavor: |
latest=false
tags: |
type=raw,value=develop,enable=${{ github.ref == 'refs/heads/develop' }}
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/master' }}
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }}
type=pep440,pattern={{raw}}
- name: Build and push all platforms
uses: docker/build-push-action@v3
with:
push: true
labels: "gitsha1=${{ github.sha }}"
tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64
# arm64 builds OOM without the git fetch setting. c.f.
# https://github.com/rust-lang/cargo/issues/10583
build-args: |
CARGO_NET_GIT_FETCH_WITH_CLI=true

View File

@@ -1,65 +0,0 @@
name: Deploy the documentation
on:
push:
branches:
# For bleeding-edge documentation
- develop
# For documentation specific to a release
- 'release-v*'
# stable docs
- master
workflow_dispatch:
jobs:
pages:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
with:
mdbook-version: '0.4.17'
- name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
# However, we're using docs/README.md for other purposes and need to pick a new page
# as the default. Let's opt for the welcome page instead.
run: |
mdbook build
cp book/welcome_and_overview.html book/index.html
# Figure out the target directory.
#
# The target directory depends on the name of the branch
#
- name: Get the target directory name
id: vars
run: |
# first strip the 'refs/heads/' prefix with some shell foo
branch="${GITHUB_REF#refs/heads/}"
case $branch in
release-*)
# strip 'release-' from the name for release branches.
branch="${branch#release-}"
;;
master)
# deploy to "latest" for the master branch.
branch="latest"
;;
esac
# finally, set the 'branch-version' var.
echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
# Deploy to the target directory.
- name: Deploy to gh pages
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book
destination_dir: ./${{ steps.vars.outputs.branch-version }}

View File

@@ -1,219 +0,0 @@
# People who are freshly `pip install`ing from PyPI will pull in the latest versions of
# dependencies which match the broad requirements. Since most CI runs are against
# the locked poetry environment, run specifically against the latest dependencies to
# know if there's an upcoming breaking change.
#
# As an overview this workflow:
# - checks out develop,
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
# - runs mypy and test suites in that checkout.
#
# Based on the twisted trunk CI job.
name: Latest dependencies
on:
schedule:
- cron: 0 7 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
# poetry-core versions), so we install with poetry.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.2.0"
extras: "all"
# Dump installed versions for debugging.
- run: poetry run pip list > before.txt
# Upgrade all runtime dependencies only. This is intended to mimic a fresh
# `pip install matrix-synapse[all]` as closely as possible.
- run: poetry update --no-dev
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
trial:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- database: "sqlite"
- database: "postgres"
postgres-version: "14"
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@v4
with:
python-version: "3.x"
- run: pip install .[all,test]
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
# We nuke the local copy, as we've installed synapse into the virtualenv
# (rather than use an editable install, which we no longer support). If we
# don't do this then python can't find the native lib.
- run: rm -rf synapse/
- run: python -m twisted.trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:testing
volumes:
- ${{ github.workspace }}:/src
strategy:
fail-fast: false
matrix:
include:
- sytest-tag: focal
- sytest-tag: focal
postgres: postgres
workers: workers
redis: redis
env:
POSTGRES: ${{ matrix.postgres && 1}}
WORKERS: ${{ matrix.workers && 1 }}
REDIS: ${{ matrix.redis && 1 }}
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
- name: Ensure sytest runs `pip install`
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
run: rm /src/poetry.lock
working-directory: /src
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
complement:
if: "${{ !failure() && !cancelled() }}"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- run: |
set -o pipefail
TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests
# Open an issue if the build fails, so we know about it.
# Only do this if we're not experimenting with this action in a PR.
open-issue:
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
needs:
# TODO: should mypy be included here? It feels more brittle than the others.
- mypy
- trial
- sytest
- complement
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
update_existing: true
filename: .ci/latest_deps_build_failed_issue_template.md

View File

@@ -1,209 +0,0 @@
# GitHub actions workflow which builds the release artifacts.
name: Build release artifacts
on:
# we build on PRs and develop to (hopefully) get early warning
# of things breaking (but only build one set of debs)
pull_request:
push:
branches: ["develop", "release-*"]
# we do the full build on tags.
tags: ["v*"]
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: write
jobs:
get-distros:
name: "Calculate list of debian distros"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.x'
- id: set-distros
run: |
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
dists='["debian:sid"]'
if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
fi
echo "distros=$dists" >> "$GITHUB_OUTPUT"
# map the step outputs to job outputs
outputs:
distros: ${{ steps.set-distros.outputs.distros }}
# now build the packages with a matrix build.
build-debs:
needs: get-distros
name: "Build .deb packages"
runs-on: ubuntu-latest
strategy:
matrix:
distro: ${{ fromJson(needs.get-distros.outputs.distros) }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
path: src
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
with:
install: true
- name: Set up docker layer caching
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Set up python
uses: actions/setup-python@v4
with:
python-version: '3.x'
- name: Build the packages
# see https://github.com/docker/build-push-action/issues/252
# for the cache magic here
run: |
./src/scripts-dev/build_debian_packages.py \
--docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
--docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
--docker-build-arg=--progress=plain \
--docker-build-arg=--load \
"${{ matrix.distro }}"
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Upload debs as artifacts
uses: actions/upload-artifact@v3
with:
name: debs
path: debs/*
build-wheels:
name: Build wheels on ${{ matrix.os }} for ${{ matrix.arch }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-20.04, macos-10.15]
arch: [x86_64, aarch64]
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
# It is not read by the rest of the workflow.
is_pr:
- ${{ startsWith(github.ref, 'refs/pull/') }}
exclude:
# Don't build macos wheels on PR CI.
- is_pr: true
os: "macos-10.15"
# Don't build aarch64 wheels on mac.
- os: "macos-10.15"
arch: aarch64
# Don't build aarch64 wheels on PR CI.
- is_pr: true
arch: aarch64
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
# here, because `python` on osx points to Python 2.7.
python-version: "3.x"
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
- name: Set up QEMU to emulate aarch64
if: matrix.arch == 'aarch64'
uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Build aarch64 wheels
if: matrix.arch == 'aarch64'
run: echo 'CIBW_ARCHS_LINUX=aarch64' >> $GITHUB_ENV
- name: Only build a single wheel on PR
if: startsWith(github.ref, 'refs/pull/')
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
env:
# Skip testing for platforms which various libraries don't have wheels
# for, and so need extra build deps.
CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx*
# Fix Rust OOM errors on emulated aarch64: https://github.com/rust-lang/cargo/issues/10583
CARGO_NET_GIT_FETCH_WITH_CLI: true
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
- uses: actions/upload-artifact@v3
with:
name: Wheel
path: ./wheelhouse/*.whl
build-sdist:
name: Build sdist
runs-on: ubuntu-latest
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.10'
- run: pip install build
- name: Build sdist
run: python -m build --sdist
- uses: actions/upload-artifact@v3
with:
name: Sdist
path: dist/*.tar.gz
# if it's a tag, create a release and attach the artifacts to it
attach-assets:
name: "Attach assets to release"
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
needs:
- build-debs
- build-wheels
- build-sdist
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
uses: actions/download-artifact@v3
- name: Build a tarball for the debs
run: tar -cvJf debs.tar.xz debs
- name: Attach to release
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
files: |
Sdist/*
Wheel/*
debs.tar.xz
# if it's not already published, keep the release as a draft.
draft: true
# mark it as a prerelease if the tag contains 'rc'.
prerelease: ${{ contains(github.ref, 'rc') }}

View File

@@ -1,509 +0,0 @@
name: Tests
on:
push:
branches: ["develop", "release-*"]
pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# Job to detect what has changed so we don't run e.g. Rust checks on PRs that
# don't modify Rust code.
changes:
runs-on: ubuntu-latest
outputs:
rust: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.rust }}
steps:
- uses: dorny/paths-filter@v2
id: filter
# We only check on PRs
if: startsWith(github.ref, 'refs/pull/')
with:
filters: |
rust:
- 'rust/**'
- 'Cargo.toml'
check-sampleconfig:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
- run: poetry run scripts-dev/generate_sample_config.sh --check
- run: poetry run scripts-dev/config-lint.sh
check-schema-delta:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
lint:
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
with:
typechecking-extras: "all"
lint-crlf:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Check line endings
run: scripts-dev/check_line_terminators.sh
lint-newsfile:
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: actions/setup-python@v4
- run: "pip install 'towncrier>=18.6.0rc1'"
- run: scripts-dev/check-newsfragment.sh
env:
PULL_REQUEST_NUMBER: ${{ github.event.number }}
lint-pydantic:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
- run: poetry run scripts-dev/check_pydantic_models.py
lint-clippy:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
override: true
components: clippy
- uses: Swatinem/rust-cache@v2
- run: cargo clippy
lint-rustfmt:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
override: true
components: rustfmt
- uses: Swatinem/rust-cache@v2
- run: cargo fmt --check
# Dummy step to gate other tests on without repeating the whole list
linting-done:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
needs:
- lint
- lint-crlf
- lint-newsfile
- lint-pydantic
- check-sampleconfig
- check-schema-delta
- lint-clippy
- lint-rustfmt
runs-on: ubuntu-latest
steps:
- run: "true"
calculate-test-jobs:
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- id: get-matrix
run: .ci/scripts/calculate_jobs.py
outputs:
trial_test_matrix: ${{ steps.get-matrix.outputs.trial_test_matrix }}
sytest_test_matrix: ${{ steps.get-matrix.outputs.sytest_test_matrix }}
trial:
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: calculate-test-jobs
runs-on: ubuntu-latest
strategy:
matrix:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
steps:
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.job.postgres-version }}
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.job.python-version }}
extras: ${{ matrix.job.extras }}
- name: Await PostgreSQL
if: ${{ matrix.job.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: poetry run trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
trial-olddeps:
# Note: sqlite only; no postgres
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
override: true
- uses: Swatinem/rust-cache@v2
# There aren't wheels for some of the older deps, so we need to install
# their build dependencies
- run: |
sudo apt-get -qq install build-essential libffi-dev python-dev \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
- uses: actions/setup-python@v4
with:
python-version: '3.7'
# Calculating the old-deps actually takes a bunch of time, so we cache the
# pyproject.toml / poetry.lock. We need to cache pyproject.toml as
# otherwise the `poetry install` step will error due to the poetry.lock
# file being outdated.
#
# This caches the output of `Prepare old deps`, which should generate the
# same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input.
- uses: actions/cache@v3
id: cache-poetry-old-deps
name: Cache poetry.lock
with:
path: |
poetry.lock
pyproject.toml
key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }}
- name: Prepare old deps
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
run: .ci/scripts/prepare_old_deps.sh
# We only now install poetry so that `setup-python-poetry` caches the
# right poetry.lock's dependencies.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: '3.7'
extras: "all test"
- run: poetry run trial -j2 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
trial-pypy:
# Very slow; only run if the branch name includes 'pypy'
# Note: sqlite only; no postgres. Completely untested since poetry move.
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.7"]
extras: ["all"]
steps:
- uses: actions/checkout@v3
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: ${{ matrix.extras }}
- run: poetry run trial --jobs=2 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
if: ${{ !failure() && !cancelled() }}
needs: calculate-test-jobs
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:${{ matrix.job.sytest-tag }}
volumes:
- ${{ github.workspace }}:/src
env:
SYTEST_BRANCH: ${{ github.head_ref }}
POSTGRES: ${{ matrix.job.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
WORKERS: ${{ matrix.job.workers && 1 }}
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
TOP: ${{ github.workspace }}
strategy:
fail-fast: false
matrix:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
steps:
- uses: actions/checkout@v3
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
override: true
- uses: Swatinem/rust-cache@v2
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
export-data:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: [linting-done, portdb]
runs-on: ubuntu-latest
env:
TOP: ${{ github.workspace }}
services:
postgres:
image: postgres
ports:
- 5432:5432
env:
POSTGRES_PASSWORD: "postgres"
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh
env:
PGHOST: localhost
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: postgres
portdb:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
include:
- python-version: "3.7"
postgres-version: "10"
- python-version: "3.10"
postgres-version: "14"
services:
postgres:
image: postgres:${{ matrix.postgres-version }}
ports:
- 5432:5432
env:
POSTGRES_PASSWORD: "postgres"
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
id: run_tester_script
env:
PGHOST: localhost
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: postgres
- name: "Upload schema differences"
uses: actions/upload-artifact@v3
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
with:
name: Schema dumps
path: |
unported.sql
ported.sql
schema_diff
complement:
if: "${{ !failure() && !cancelled() }}"
needs: linting-done
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
override: true
- uses: Swatinem/rust-cache@v2
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- run: |
set -o pipefail
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests
cargo-test:
if: ${{ needs.changes.outputs.rust == 'true' }}
runs-on: ubuntu-latest
needs:
- linting-done
- changes
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
override: true
- uses: Swatinem/rust-cache@v2
- run: cargo test
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:
if: ${{ always() }}
needs:
- trial
- trial-olddeps
- sytest
- export-data
- portdb
- complement
- cargo-test
runs-on: ubuntu-latest
steps:
- uses: matrix-org/done-action@v2
with:
needs: ${{ toJSON(needs) }}
# The newsfile lint may be skipped on non PR builds
# Cargo test is skipped if there is no changes on Rust code
skippable: |
lint-newsfile
cargo-test

View File

@@ -1,28 +0,0 @@
name: Move new issues into the issue triage board
on:
issues:
types: [ opened ]
jobs:
add_new_issues:
name: Add new issues to the triage board
runs-on: ubuntu-latest
steps:
- uses: octokit/graphql-action@v2.x
id: add_to_project
with:
headers: '{"GraphQL-Features": "projects_next_graphql"}'
query: |
mutation add_to_project($projectid:ID!,$contentid:ID!) {
addProjectV2ItemById(input: {projectId: $projectid contentId: $contentid}) {
item {
id
}
}
}
projectid: ${{ env.PROJECT_ID }}
contentid: ${{ github.event.issue.node_id }}
env:
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}

View File

@@ -1,44 +0,0 @@
name: Move labelled issues to correct projects
on:
issues:
types: [ labeled ]
jobs:
move_needs_info:
name: Move X-Needs-Info on the triage board
runs-on: ubuntu-latest
if: >
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
steps:
- uses: octokit/graphql-action@v2.x
id: add_to_project
with:
headers: '{"GraphQL-Features": "projects_next_graphql"}'
query: |
mutation {
updateProjectV2ItemFieldValue(
input: {
projectId: $projectid
itemId: $contentid
fieldId: $fieldid
value: {
singleSelectOptionId: "Todo"
}
}
) {
projectV2Item {
id
}
}
}
projectid: ${{ env.PROJECT_ID }}
contentid: ${{ github.event.issue.node_id }}
fieldid: ${{ env.FIELD_ID }}
optionid: ${{ env.OPTION_ID }}
env:
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
FIELD_ID: "PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4"
OPTION_ID: "ba22e43c"

View File

@@ -1,186 +0,0 @@
name: Twisted Trunk
on:
schedule:
- cron: 0 8 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
extras: "all"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
trial:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
extras: "all test"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- run: poetry run trial --jobs 2 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
# Note: Dumps to workflow logs instead of using actions/upload-artifact
# This keeps logs colocated with failing jobs
# It also ignores find's exit code; this is a best effort affair
run: >-
find _trial_temp -name '*.log'
-exec echo "::group::{}" \;
-exec cat {} \;
-exec echo "::endgroup::" \;
|| true
sytest:
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:buster
volumes:
- ${{ github.workspace }}:/src
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
- name: Patch dependencies
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
# but the sytest-synapse container expects it to be in /venv/.
# We symlink it before running poetry so that poetry actually
# ends up installing to `/venv`.
run: |
ln -s -T /venv /src/.venv
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
working-directory: /src
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
env:
# Use offline mode to avoid reinstalling the pinned version of
# twisted.
OFFLINE: 1
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
complement:
if: "${{ !failure() && !cancelled() }}"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
# This step is specific to the 'Twisted trunk' test run:
- name: Patch dependencies
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
pipx install poetry==1.1.14
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry lock --no-update
# NOT IN 1.1.14 poetry lock --check
working-directory: synapse
- run: |
set -o pipefail
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests
# open an issue if the build fails, so we know about it.
open-issue:
if: failure()
needs:
- mypy
- trial
- sytest
- complement
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
update_existing: true
filename: .ci/twisted_trunk_build_failed_issue_template.md

108
.gitignore vendored
View File

@@ -1,70 +1,64 @@
# filename patterns
*~
*.pyc
.*.swp
.#*
*.deb
*.egg
*.egg-info
*~
*.lock
*.py[cod]
*.snap
*.tac
.DS_Store
_trial_temp/
_trial_temp*/
/out
.DS_Store
__pycache__/
logs/
dbs/
*.egg
dist/
docs/build/
*.egg-info
pip-wheel-metadata/
# We do want the poetry and cargo lockfile.
!poetry.lock
!Cargo.lock
# stuff that is likely to exist when you run a server locally
/*.db
/*.log
/*.log.*
/*.log.config
/*.pid
/.python-version
/*.signing.key
/env/
/.venv*/
cmdclient_config.json
homeserver*.db
homeserver*.log
homeserver*.log.*
homeserver*.pid
/homeserver*.yaml
/logs
/media_store/
/uploads
# For direnv users
/.envrc
*.signing.key
*.tls.crt
*.tls.dh
*.tls.key
# IDEs
/.idea/
/.ropeproject/
/.vscode/
.coverage*
coverage.*
!.coveragerc
htmlcov
# build products
!/.coveragerc
/.coverage*
/.mypy_cache/
/.tox
/.tox-pg-container
/build/
/coverage.*
/dist/
/docs/build/
/htmlcov
/pip-wheel-metadata/
demo/*/*.db
demo/*/*.log
demo/*/*.log.*
demo/*/*.pid
demo/media_store.*
demo/etc
# docs
book/
uploads
cache
# complement
/complement-*
/master.tar.gz
.idea/
media_store/
# rust
/target/
/synapse/*.so
*.tac
# Poetry will create a setup.py, which we don't want to include.
/setup.py
build/
venv/
venv*/
*venv/
localhost-800*/
static/client/register/register_config.js
.tox
env/
*.config
.vscode/
.ropeproject/
*.deb
/debs

View File

@@ -1 +0,0 @@
group_imports = "StdExternalCrate"

83
.travis.yml Normal file
View File

@@ -0,0 +1,83 @@
sudo: false
language: python
cache:
directories:
# we only bother to cache the wheels; parts of the http cache get
# invalidated every build (because they get served with a max-age of 600
# seconds), which means that we end up re-uploading the whole cache for
# every build, which is time-consuming In any case, it's not obvious that
# downloading the cache from S3 would be much faster than downloading the
# originals from pypi.
#
- $HOME/.cache/pip/wheels
addons:
postgresql: "9.4"
# don't clone the whole repo history, one commit will do
git:
depth: 1
# only build branches we care about (PRs are built seperately)
branches:
only:
- master
- develop
- /^release-v/
# When running the tox environments that call Twisted Trial, we can pass the -j
# flag to run the tests concurrently. We set this to 2 for CPU bound tests
# (SQLite) and 4 for I/O bound tests (PostgreSQL).
matrix:
fast_finish: true
include:
- python: 2.7
env: TOX_ENV=packaging
- python: 3.6
env: TOX_ENV="pep8,check_isort"
- python: 2.7
env: TOX_ENV=py27,codecov TRIAL_FLAGS="-j 2"
- python: 2.7
env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
- python: 2.7
env: TOX_ENV=py27-postgres,codecov TRIAL_FLAGS="-j 4"
services:
- postgresql
- python: 3.5
env: TOX_ENV=py35,codecov TRIAL_FLAGS="-j 2"
- python: 3.6
env: TOX_ENV=py36,codecov TRIAL_FLAGS="-j 2"
- python: 3.6
env: TOX_ENV=py36-postgres,codecov TRIAL_FLAGS="-j 4"
services:
- postgresql
- # we only need to check for the newsfragment if it's a PR build
if: type = pull_request
python: 3.6
env: TOX_ENV=check-newsfragment
script:
- git remote set-branches --add origin develop
- git fetch origin develop
- tox -e $TOX_ENV
install:
- pip install tox
# if we don't have python3.6 in this environment, travis unhelpfully gives us
# a `python3.6` on our path which does nothing but spit out a warning. Tox
# tries to run it (even if we're not running a py36 env), so the build logs
# then have warnings which look like errors. To reduce the noise, remove the
# non-functional python3.6.
- ( ! command -v python3.6 || python3.6 --version ) &>/dev/null || rm -f $(command -v python3.6)
script:
- tox -e $TOX_ENV

View File

@@ -1,8 +1,34 @@
The following is an incomplete list of people outside the core team who have
contributed to Synapse. It is no longer maintained: more recent contributions
are listed in the `changelog <CHANGES.md>`_.
Erik Johnston <erik at matrix.org>
* HS core
* Federation API impl
----
Mark Haines <mark at matrix.org>
* HS core
* Crypto
* Content repository
* CS v2 API impl
Kegan Dougal <kegan at matrix.org>
* HS core
* CS v1 API impl
* AS API impl
Paul "LeoNerd" Evans <paul at matrix.org>
* HS core
* Presence
* Typing Notifications
* Performance metrics and caching layer
Dave Baker <dave at matrix.org>
* Push notifications
* Auth CS v2 impl
Matthew Hodgson <matthew at matrix.org>
* General doc & housekeeping
* Vertobot/vertobridge matrix<->verto PoC
Emmanuel Rohee <manu at matrix.org>
* Supporting iOS clients (testability and fallback registration)
Turned to Dust <dwinslow86 at gmail.com>
* ArchLinux installation instructions
@@ -36,16 +62,10 @@ Christoph Witzany <christoph at web.crofting.com>
* Add LDAP support for authentication
Pierre Jaury <pierre at jaury.eu>
* Docker packaging
* Docker packaging
Serban Constantin <serban.constantin at gmail dot com>
* Small bug fix
Joseph Weston <joseph at weston.cloud>
* Add admin API for querying HS version
Benjamin Saunders <ben.e.saunders at gmail dot com>
* Documentation improvements
Werner Sembach <werner.sembach at fau dot de>
* Automatically remove a group/community when it is empty
Jason Robinson <jasonr at matrix.org>
* Minor fixes

4655
CHANGES.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +0,0 @@
# Welcome to Synapse
Please see the [contributors' guide](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html) in our rendered documentation.

169
CONTRIBUTING.rst Normal file
View File

@@ -0,0 +1,169 @@
Contributing code to Matrix
===========================
Everyone is welcome to contribute code to Matrix
(https://github.com/matrix-org), provided that they are willing to license
their contributions under the same license as the project itself. We follow a
simple 'inbound=outbound' model for contributions: the act of submitting an
'inbound' contribution means that the contributor agrees to license the code
under the same terms as the project's overall 'outbound' license - in our
case, this is almost always Apache Software License v2 (see LICENSE).
How to contribute
~~~~~~~~~~~~~~~~~
The preferred and easiest way to contribute changes to Matrix is to fork the
relevant project on github, and then create a pull request to ask us to pull
your changes into our repo
(https://help.github.com/articles/using-pull-requests/)
**The single biggest thing you need to know is: please base your changes on
the develop branch - /not/ master.**
We use the master branch to track the most recent release, so that folks who
blindly clone the repo and automatically check out master get something that
works. Develop is the unstable branch where all the development actually
happens: the workflow is that contributors should fork the develop branch to
make a 'feature' branch for a particular contribution, and then make a pull
request to merge this back into the matrix.org 'official' develop branch. We
use github's pull request workflow to review the contribution, and either ask
you to make any refinements needed or merge it and make them ourselves. The
changes will then land on master when we next do a release.
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
<https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
pull requests to synapse get automatically tested by Travis and CircleCI.
If your change breaks the build, this will be shown in GitHub, so please
keep an eye on the pull request for feedback.
To run unit tests in a local development environment, you can use:
- ``tox -e py27`` (requires tox to be installed by ``pip install tox``) for
SQLite-backed Synapse on Python 2.7.
- ``tox -e py35`` for SQLite-backed Synapse on Python 3.5.
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
- ``tox -e py27-postgres`` for PostgreSQL-backed Synapse on Python 2.7
(requires a running local PostgreSQL with access to create databases).
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 2.7
(requires Docker). Entirely self-contained, recommended if you don't want to
set up PostgreSQL yourself.
Docker images are available for running the integration tests (SyTest) locally,
see the `documentation in the SyTest repo
<https://github.com/matrix-org/sytest/blob/develop/docker/README.md>`_ for more
information.
Code style
~~~~~~~~~~
All Matrix projects have a well-defined code-style - and sometimes we've even
got as far as documenting it... For instance, synapse's code style doc lives
at https://github.com/matrix-org/synapse/tree/master/docs/code_style.rst.
Please ensure your changes match the cosmetic style of the existing project,
and **never** mix cosmetic and functional changes in the same commit, as it
makes it horribly hard to review otherwise.
Changelog
~~~~~~~~~
All changes, even minor ones, need a corresponding changelog / newsfragment
entry. These are managed by Towncrier
(https://github.com/hawkowl/towncrier).
To create a changelog entry, make a new file in the ``changelog.d``
file named in the format of ``PRnumber.type``. The type can be
one of ``feature``, ``bugfix``, ``removal`` (also used for
deprecations), or ``misc`` (for internal-only changes). The content of
the file is your changelog entry, which can contain Markdown
formatting. Adding credits to the changelog is encouraged, we value
your contributions and would like to have you shouted out in the
release notes!
For example, a fix in PR #1234 would have its changelog entry in
``changelog.d/1234.bugfix``, and contain content like "The security levels of
Florbs are now validated when recieved over federation. Contributed by Jane
Matrix".
Attribution
~~~~~~~~~~~
Everyone who contributes anything to Matrix is welcome to be listed in the
AUTHORS.rst file for the project in question. Please feel free to include a
change to AUTHORS.rst in your pull request to list yourself and a short
description of the area(s) you've worked on. Also, we sometimes have swag to
give away to contributors - if you feel that Matrix-branded apparel is missing
from your life, please mail us your shipping address to matrix at matrix.org and
we'll try to fix it :)
Sign off
~~~~~~~~
In order to have a concrete record that your contribution is intentional
and you agree to license it under the same terms as the project's license, we've adopted the
same lightweight approach that the Linux Kernel
`submitting patches process <https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>`_, Docker
(https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
projects use: the DCO (Developer Certificate of Origin:
http://developercertificate.org/). This is a simple declaration that you wrote
the contribution or otherwise have the right to contribute it to Matrix::
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
If you agree to this for your contribution, then all that's needed is to
include the line in your commit or pull request comment::
Signed-off-by: Your Name <your@email.example.org>
We accept contributions under a legally identifiable name, such as
your name on government documentation or common-law names (names
claimed by legitimate usage or repute). Unfortunately, we cannot
accept anonymous contributions at this time.
Git allows you to add this signoff automatically when using the ``-s``
flag to ``git commit``, which uses the name and email set in your
``user.name`` and ``user.email`` git configs.
Conclusion
~~~~~~~~~~
That's it! Matrix is a very open and collaborative project as you might expect
given our obsession with open communication. If we're going to successfully
matrix together all the fragmented communication technologies out there we are
reliant on contributions and collaboration from the community to do so. So
please get involved - and we hope you have as much fun hacking on Matrix as we
do!

466
Cargo.lock generated
View File

@@ -1,466 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "aho-corasick"
version = "0.7.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
dependencies = [
"memchr",
]
[[package]]
name = "anyhow"
version = "1.0.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602"
[[package]]
name = "arc-swap"
version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164"
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "blake2"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388"
dependencies = [
"digest",
]
[[package]]
name = "block-buffer"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
dependencies = [
"generic-array",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "crypto-common"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
"generic-array",
"typenum",
]
[[package]]
name = "digest"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
dependencies = [
"block-buffer",
"crypto-common",
"subtle",
]
[[package]]
name = "generic-array"
version = "0.14.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
dependencies = [
"typenum",
"version_check",
]
[[package]]
name = "hex"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "indoc"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3"
[[package]]
name = "itoa"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.135"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
[[package]]
name = "lock_api"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "memchr"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "memoffset"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
[[package]]
name = "parking_lot"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
"windows-sys",
]
[[package]]
name = "proc-macro2"
version = "1.0.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3"
version = "0.17.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "201b6887e5576bf2f945fe65172c1fcbf3fcf285b23e4d71eb171d9736e38d32"
dependencies = [
"anyhow",
"cfg-if",
"indoc",
"libc",
"memoffset",
"parking_lot",
"pyo3-build-config",
"pyo3-ffi",
"pyo3-macros",
"unindent",
]
[[package]]
name = "pyo3-build-config"
version = "0.17.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf0708c9ed01692635cbf056e286008e5a2927ab1a5e48cdd3aeb1ba5a6fef47"
dependencies = [
"once_cell",
"target-lexicon",
]
[[package]]
name = "pyo3-ffi"
version = "0.17.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90352dea4f486932b72ddf776264d293f85b79a1d214de1d023927b41461132d"
dependencies = [
"libc",
"pyo3-build-config",
]
[[package]]
name = "pyo3-log"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5695ccff5060c13ca1751cf8c857a12da9b0bf0378cb071c5e0326f7c7e4c1b"
dependencies = [
"arc-swap",
"log",
"pyo3",
]
[[package]]
name = "pyo3-macros"
version = "0.17.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7eb24b804a2d9e88bfcc480a5a6dd76f006c1e3edaf064e8250423336e2cd79d"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
"syn",
]
[[package]]
name = "pyo3-macros-backend"
version = "0.17.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f22bb49f6a7348c253d7ac67a6875f2dc65f36c2ae64a82c381d528972bea6d6"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "pythonize"
version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f7f0c136f5fbc01868185eef462800e49659eb23acca83b9e884367a006acb6"
dependencies = [
"pyo3",
"serde",
]
[[package]]
name = "quote"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
dependencies = [
"proc-macro2",
]
[[package]]
name = "redox_syscall"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
dependencies = [
"bitflags",
]
[[package]]
name = "regex"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
[[package]]
name = "ryu"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.145"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.145"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41feea4228a6f1cd09ec7a3593a682276702cd67b5273544757dae23c096f074"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "smallvec"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
[[package]]
name = "subtle"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "synapse"
version = "0.1.0"
dependencies = [
"anyhow",
"blake2",
"hex",
"lazy_static",
"log",
"pyo3",
"pyo3-log",
"pythonize",
"regex",
"serde",
"serde_json",
]
[[package]]
name = "target-lexicon"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1"
[[package]]
name = "typenum"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "unicode-ident"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
[[package]]
name = "unindent"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58ee9362deb4a96cef4d437d1ad49cffc9b9e92d202b6995674e928ce684f112"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "windows-sys"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
dependencies = [
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
[[package]]
name = "windows_i686_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
[[package]]
name = "windows_i686_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
[[package]]
name = "windows_x86_64_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
[[package]]
name = "windows_x86_64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"

View File

@@ -1,5 +0,0 @@
# We make the whole Synapse folder a workspace so that we can run `cargo`
# commands from the root (rather than having to cd into rust/).
[workspace]
members = ["rust"]

View File

@@ -1,7 +0,0 @@
# Installation Instructions
This document has moved to the
[Synapse documentation website](https://matrix-org.github.io/synapse/latest/setup/installation.html).
Please update your links.
The markdown source is available in [docs/setup/installation.md](docs/setup/installation.md).

44
MANIFEST.in Normal file
View File

@@ -0,0 +1,44 @@
include synctl
include LICENSE
include VERSION
include *.rst
include *.md
include demo/README
include demo/demo.tls.dh
include demo/*.py
include demo/*.sh
recursive-include synapse/storage/schema *.sql
recursive-include synapse/storage/schema *.py
recursive-include docs *
recursive-include scripts *
recursive-include scripts-dev *
recursive-include synapse *.pyi
recursive-include tests *.pem
recursive-include tests *.py
recursive-include synapse/res *
recursive-include synapse/static *.css
recursive-include synapse/static *.gif
recursive-include synapse/static *.html
recursive-include synapse/static *.js
exclude Dockerfile
exclude .dockerignore
exclude test_postgresql.sh
exclude .editorconfig
include pyproject.toml
recursive-include changelog.d *
prune .github
prune demo/etc
prune docker
prune .circleci
prune .coveragerc
prune debian
prune .codecov.yml
exclude jenkins*
recursive-exclude jenkins *.sh

1101
README.rst

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,431 @@
Upgrading Synapse
=================
This document has moved to the `Synapse documentation website <https://matrix-org.github.io/synapse/latest/upgrade>`_.
Please update your links.
Before upgrading check if any special steps are required to upgrade from the
what you currently have installed to current version of synapse. The extra
instructions that may be required are listed later in this document.
The markdown source is available in `docs/upgrade.md <docs/upgrade.md>`_.
1. If synapse was installed in a virtualenv then active that virtualenv before
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then
run:
.. code:: bash
source ~/.synapse/bin/activate
2. If synapse was installed using pip then upgrade to the latest version by
running:
.. code:: bash
pip install --upgrade matrix-synapse
# restart synapse
synctl restart
If synapse was installed using git then upgrade to the latest version by
running:
.. code:: bash
# Pull the latest version of the master branch.
git pull
# Update the versions of synapse's python dependencies.
python synapse/python_dependencies.py | xargs pip install --upgrade
# restart synapse
./synctl restart
To check whether your update was sucessful, you can check the Server header
returned by the Client-Server API:
.. code:: bash
# replace <host.name> with the hostname of your synapse homeserver.
# You may need to specify a port (eg, :8448) if your server is not
# configured on port 443.
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
Upgrading to v0.99.0
====================
In preparation for Synapse v1.0, you must update your TLS certificates from
self-signed ones to verifiable ones signed by a trusted root CA.
If you do not already have a certificate for your domain, the easiest way to get
one is with Synapse's new ACME support, which will use the ACME protocol to
provision a certificate automatically. By default, certificates will be obtained
from the publicly trusted CA Let's Encrypt.
For a sample configuration, please inspect the new ACME section in the example
generated config by running the ``generate-config`` executable. For example::
~/synapse/env3/bin/generate-config
You will need to provide Let's Encrypt (or other ACME provider) access to your
Synapse ACME challenge responder on port 80, at the domain of your homeserver.
This requires you either change the port of the ACME listener provided by
Synapse to a high port and reverse proxy to it, or use a tool like authbind to
allow Synapse to listen on port 80 without root access. (Do not run Synapse with
root permissions!)
You will need to back up or delete your self signed TLS certificate
(``example.com.tls.crt`` and ``example.com.tls.key``), Synapse's ACME
implementation will not overwrite them.
You may wish to use alternate methods such as Certbot to obtain a certificate
from Let's Encrypt, depending on your server configuration. Of course, if you
already have a valid certificate for your homeserver's domain, that can be
placed in Synapse's config directory without the need for ACME.
Upgrading to v0.34.0
====================
1. This release is the first to fully support Python 3. Synapse will now run on
Python versions 3.5, or 3.6 (as well as 2.7). We recommend switching to
Python 3, as it has been shown to give performance improvements.
For users who have installed Synapse into a virtualenv, we recommend doing
this by creating a new virtualenv. For example::
virtualenv -p python3 ~/synapse/env3
source ~/synapse/env3/bin/activate
pip install matrix-synapse
You can then start synapse as normal, having activated the new virtualenv::
cd ~/synapse
source env3/bin/activate
synctl start
Users who have installed from distribution packages should see the relevant
package documentation. See below for notes on Debian packages.
* When upgrading to Python 3, you **must** make sure that your log files are
configured as UTF-8, by adding ``encoding: utf8`` to the
``RotatingFileHandler`` configuration (if you have one) in your
``<server>.log.config`` file. For example, if your ``log.config`` file
contains::
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: homeserver.log
maxBytes: 104857600
backupCount: 10
filters: [context]
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
Then you should update this to be::
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: homeserver.log
maxBytes: 104857600
backupCount: 10
filters: [context]
encoding: utf8
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
There is no need to revert this change if downgrading to Python 2.
We are also making available Debian packages which will run Synapse on
Python 3. You can switch to these packages with ``apt-get install
matrix-synapse-py3``, however, please read `debian/NEWS
<https://github.com/matrix-org/synapse/blob/release-v0.34.0/debian/NEWS>`_
before doing so. The existing ``matrix-synapse`` packages will continue to
use Python 2 for the time being.
2. This release removes the ``riot.im`` from the default list of trusted
identity servers.
If ``riot.im`` is in your homeserver's list of
``trusted_third_party_id_servers``, you should remove it. It was added in
case a hypothetical future identity server was put there. If you don't
remove it, users may be unable to deactivate their accounts.
3. This release no longer installs the (unmaintained) Matrix Console web client
as part of the default installation. It is possible to re-enable it by
installing it separately and setting the ``web_client_location`` config
option, but please consider switching to another client.
Upgrading to v0.33.7
====================
This release removes the example email notification templates from
``res/templates`` (they are now internal to the python package). This should
only affect you if you (a) deploy your Synapse instance from a git checkout or
a github snapshot URL, and (b) have email notifications enabled.
If you have email notifications enabled, you should ensure that
``email.template_dir`` is either configured to point at a directory where you
have installed customised templates, or leave it unset to use the default
templates.
Upgrading to v0.27.3
====================
This release expands the anonymous usage stats sent if the opt-in
``report_stats`` configuration is set to ``true``. We now capture RSS memory
and cpu use at a very coarse level. This requires administrators to install
the optional ``psutil`` python module.
We would appreciate it if you could assist by ensuring this module is available
and ``report_stats`` is enabled. This will let us see if performance changes to
synapse are having an impact to the general community.
Upgrading to v0.15.0
====================
If you want to use the new URL previewing API (/_matrix/media/r0/preview_url)
then you have to explicitly enable it in the config and update your dependencies
dependencies. See README.rst for details.
Upgrading to v0.11.0
====================
This release includes the option to send anonymous usage stats to matrix.org,
and requires that administrators explictly opt in or out by setting the
``report_stats`` option to either ``true`` or ``false``.
We would really appreciate it if you could help our project out by reporting
anonymized usage statistics from your homeserver. Only very basic aggregate
data (e.g. number of users) will be reported, but it helps us to track the
growth of the Matrix community, and helps us to make Matrix a success, as well
as to convince other networks that they should peer with us.
Upgrading to v0.9.0
===================
Application services have had a breaking API change in this version.
They can no longer register themselves with a home server using the AS HTTP API. This
decision was made because a compromised application service with free reign to register
any regex in effect grants full read/write access to the home server if a regex of ``.*``
is used. An attack where a compromised AS re-registers itself with ``.*`` was deemed too
big of a security risk to ignore, and so the ability to register with the HS remotely has
been removed.
It has been replaced by specifying a list of application service registrations in
``homeserver.yaml``::
app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
Where ``registration-01.yaml`` looks like::
url: <String> # e.g. "https://my.application.service.com"
as_token: <String>
hs_token: <String>
sender_localpart: <String> # This is a new field which denotes the user_id localpart when using the AS token
namespaces:
users:
- exclusive: <Boolean>
regex: <String> # e.g. "@prefix_.*"
aliases:
- exclusive: <Boolean>
regex: <String>
rooms:
- exclusive: <Boolean>
regex: <String>
Upgrading to v0.8.0
===================
Servers which use captchas will need to add their public key to::
static/client/register/register_config.js
window.matrixRegistrationConfig = {
recaptcha_public_key: "YOUR_PUBLIC_KEY"
};
This is required in order to support registration fallback (typically used on
mobile devices).
Upgrading to v0.7.0
===================
New dependencies are:
- pydenticon
- simplejson
- syutil
- matrix-angular-sdk
To pull in these dependencies in a virtual env, run::
python synapse/python_dependencies.py | xargs -n 1 pip install
Upgrading to v0.6.0
===================
To pull in new dependencies, run::
python setup.py develop --user
This update includes a change to the database schema. To upgrade you first need
to upgrade the database by running::
python scripts/upgrade_db_to_v0.6.0.py <db> <server_name> <signing_key>
Where `<db>` is the location of the database, `<server_name>` is the
server name as specified in the synapse configuration, and `<signing_key>` is
the location of the signing key as specified in the synapse configuration.
This may take some time to complete. Failures of signatures and content hashes
can safely be ignored.
Upgrading to v0.5.1
===================
Depending on precisely when you installed v0.5.0 you may have ended up with
a stale release of the reference matrix webclient installed as a python module.
To uninstall it and ensure you are depending on the latest module, please run::
$ pip uninstall syweb
Upgrading to v0.5.0
===================
The webclient has been split out into a seperate repository/pacakage in this
release. Before you restart your homeserver you will need to pull in the
webclient package by running::
python setup.py develop --user
This release completely changes the database schema and so requires upgrading
it before starting the new version of the homeserver.
The script "database-prepare-for-0.5.0.sh" should be used to upgrade the
database. This will save all user information, such as logins and profiles,
but will otherwise purge the database. This includes messages, which
rooms the home server was a member of and room alias mappings.
If you would like to keep your history, please take a copy of your database
file and ask for help in #matrix:matrix.org. The upgrade process is,
unfortunately, non trivial and requires human intervention to resolve any
resulting conflicts during the upgrade process.
Before running the command the homeserver should be first completely
shutdown. To run it, simply specify the location of the database, e.g.:
./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
Once this has successfully completed it will be safe to restart the
homeserver. You may notice that the homeserver takes a few seconds longer to
restart than usual as it reinitializes the database.
On startup of the new version, users can either rejoin remote rooms using room
aliases or by being reinvited. Alternatively, if any other homeserver sends a
message to a room that the homeserver was previously in the local HS will
automatically rejoin the room.
Upgrading to v0.4.0
===================
This release needs an updated syutil version. Run::
python setup.py develop
You will also need to upgrade your configuration as the signing key format has
changed. Run::
python -m synapse.app.homeserver --config-path <CONFIG> --generate-config
Upgrading to v0.3.0
===================
This registration API now closely matches the login API. This introduces a bit
more backwards and forwards between the HS and the client, but this improves
the overall flexibility of the API. You can now GET on /register to retrieve a list
of valid registration flows. Upon choosing one, they are submitted in the same
way as login, e.g::
{
type: m.login.password,
user: foo,
password: bar
}
The default HS supports 2 flows, with and without Identity Server email
authentication. Enabling captcha on the HS will add in an extra step to all
flows: ``m.login.recaptcha`` which must be completed before you can transition
to the next stage. There is a new login type: ``m.login.email.identity`` which
contains the ``threepidCreds`` key which were previously sent in the original
register request. For more information on this, see the specification.
Web Client
----------
The VoIP specification has changed between v0.2.0 and v0.3.0. Users should
refresh any browser tabs to get the latest web client code. Users on
v0.2.0 of the web client will not be able to call those on v0.3.0 and
vice versa.
Upgrading to v0.2.0
===================
The home server now requires setting up of SSL config before it can run. To
automatically generate default config use::
$ python synapse/app/homeserver.py \
--server-name machine.my.domain.name \
--bind-port 8448 \
--config-path homeserver.config \
--generate-config
This config can be edited if desired, for example to specify a different SSL
certificate to use. Once done you can run the home server using::
$ python synapse/app/homeserver.py --config-path homeserver.config
See the README.rst for more information.
Also note that some config options have been renamed, including:
- "host" to "server-name"
- "database" to "database-path"
- "port" to "bind-port" and "unsecure-port"
Upgrading to v0.0.1
===================
This release completely changes the database schema and so requires upgrading
it before starting the new version of the homeserver.
The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
database. This will save all user information, such as logins and profiles,
but will otherwise purge the database. This includes messages, which
rooms the home server was a member of and room alias mappings.
Before running the command the homeserver should be first completely
shutdown. To run it, simply specify the location of the database, e.g.:
./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
Once this has successfully completed it will be safe to restart the
homeserver. You may notice that the homeserver takes a few seconds longer to
restart than usual as it reinitializes the database.
On startup of the new version, users can either rejoin remote rooms using room
aliases or by being reinvited. Alternatively, if any other homeserver sends a
message to a room that the homeserver was previously in the local HS will
automatically rejoin the room.

View File

@@ -1,39 +0,0 @@
# Documentation for possible options in this file is at
# https://rust-lang.github.io/mdBook/format/config.html
[book]
title = "Synapse"
authors = ["The Matrix.org Foundation C.I.C."]
language = "en"
multilingual = false
# The directory that documentation files are stored in
src = "docs"
[build]
# Prevent markdown pages from being automatically generated when they're
# linked to in SUMMARY.md
create-missing = false
[output.html]
# The URL visitors will be directed to when they try to edit a page
edit-url-template = "https://github.com/matrix-org/synapse/edit/develop/{path}"
# Remove the numbers that appear before each item in the sidebar, as they can
# get quite messy as we nest deeper
no-section-label = true
# The source code URL of the repository
git-repository-url = "https://github.com/matrix-org/synapse"
# The path that the docs are hosted on
site-url = "/synapse/"
# Additional HTML, JS, CSS that's injected into each page of the book.
# More information available in docs/website_files/README.md
additional-css = [
"docs/website_files/table-of-contents.css",
"docs/website_files/remove-nav-buttons.css",
"docs/website_files/indent-section-headers.css",
]
additional-js = ["docs/website_files/table-of-contents.js"]
theme = "docs/website_files/theme"

View File

@@ -1,23 +0,0 @@
# A build script for poetry that adds the rust extension.
import os
from typing import Any, Dict
from setuptools_rust import Binding, RustExtension
def build(setup_kwargs: Dict[str, Any]) -> None:
original_project_dir = os.path.dirname(os.path.realpath(__file__))
cargo_toml_path = os.path.join(original_project_dir, "rust", "Cargo.toml")
extension = RustExtension(
target="synapse.synapse_rust",
path=cargo_toml_path,
binding=Binding.PyO3,
py_limited_api=True,
# We force always building in release mode, as we can't tell the
# difference between using `poetry` in development vs production.
debug=False,
)
setup_kwargs.setdefault("rust_extensions", []).append(extension)
setup_kwargs["zip_safe"] = False

1
changelog.d/3902.feature Normal file
View File

@@ -0,0 +1 @@
Include m.room.encryption on invites by default

View File

@@ -15,8 +15,11 @@
# limitations under the License.
""" Starts a synapse client console. """
from twisted.internet import reactor, defer, threads
from http import TwistedHttpClient
import argparse
import binascii
import cmd
import getpass
import json
@@ -24,23 +27,21 @@ import shlex
import sys
import time
import urllib
from http import TwistedHttpClient
from typing import Optional
import urlparse
from signedjson.key import NACL_ED25519, decode_verify_key_bytes
from signedjson.sign import SignatureVerifyException, verify_signed_json
from twisted.internet import defer, reactor, threads
import nacl.signing
import nacl.encoding
from signedjson.sign import verify_signed_json, SignatureVerifyException
CONFIG_JSON = "cmdclient_config.json"
# TODO: The concept of trusted identity servers has been deprecated. This option and checks
# should be removed
TRUSTED_ID_SERVERS = ["localhost:8001"]
TRUSTED_ID_SERVERS = [
'localhost:8001'
]
class SynapseCmd(cmd.Cmd):
"""Basic synapse command-line processor.
This processes commands from the user and calls the relevant HTTP methods.
@@ -57,7 +58,7 @@ class SynapseCmd(cmd.Cmd):
"token": token,
"verbose": "on",
"complete_usernames": "on",
"send_delivery_receipts": "on",
"send_delivery_receipts": "on"
}
self.path_prefix = "/_matrix/client/api/v1"
self.event_stream_token = "END"
@@ -92,7 +93,7 @@ class SynapseCmd(cmd.Cmd):
return self.config["user"].split(":")[1]
def do_config(self, line):
"""Show the config for this client: "config"
""" Show the config for this client: "config"
Edit a key value mapping: "config key value" e.g. "config token 1234"
Config variables:
user: The username to auth with.
@@ -108,7 +109,7 @@ class SynapseCmd(cmd.Cmd):
by using $. E.g. 'config roomid room1' then 'raw get /rooms/$roomid'.
"""
if len(line) == 0:
print(json.dumps(self.config, indent=4))
print json.dumps(self.config, indent=4)
return
try:
@@ -118,11 +119,12 @@ class SynapseCmd(cmd.Cmd):
config_rules = [ # key, valid_values
("verbose", ["on", "off"]),
("complete_usernames", ["on", "off"]),
("send_delivery_receipts", ["on", "off"]),
("send_delivery_receipts", ["on", "off"])
]
for key, valid_vals in config_rules:
if key == args["key"] and args["val"] not in valid_vals:
print("%s value must be one of %s" % (args["key"], valid_vals))
print "%s value must be one of %s" % (args["key"],
valid_vals)
return
# toggle the http client verbosity
@@ -131,11 +133,11 @@ class SynapseCmd(cmd.Cmd):
# assign the new config
self.config[args["key"]] = args["val"]
print(json.dumps(self.config, indent=4))
print json.dumps(self.config, indent=4)
save_config(self.config)
except Exception as e:
print(e)
print e
def do_register(self, line):
"""Registers for a new account: "register <userid> <noupdate>"
@@ -151,32 +153,33 @@ class SynapseCmd(cmd.Cmd):
pwd = getpass.getpass("Type a password for this user: ")
pwd2 = getpass.getpass("Retype the password: ")
if pwd != pwd2 or len(pwd) == 0:
print("Password mismatch.")
print "Password mismatch."
pwd = None
else:
password = pwd
body = {"type": "m.login.password"}
body = {
"type": "m.login.password"
}
if "userid" in args:
body["user"] = args["userid"]
if password:
body["password"] = password
reactor.callFromThread(self._do_register, body, "noupdate" not in args)
reactor.callFromThread(self._do_register, body,
"noupdate" not in args)
@defer.inlineCallbacks
def _do_register(self, data, update_config):
# check the registration flows
url = self._url() + "/register"
json_res = yield self.http_client.do_request("GET", url)
print(json.dumps(json_res, indent=4))
print json.dumps(json_res, indent=4)
passwordFlow = None
for flow in json_res["flows"]:
if flow["type"] == "m.login.recaptcha" or (
"stages" in flow and "m.login.recaptcha" in flow["stages"]
):
print("Unable to register: Home server requires captcha.")
if flow["type"] == "m.login.recaptcha" or ("stages" in flow and "m.login.recaptcha" in flow["stages"]):
print "Unable to register: Home server requires captcha."
return
if flow["type"] == "m.login.password" and "stages" not in flow:
passwordFlow = flow
@@ -186,7 +189,7 @@ class SynapseCmd(cmd.Cmd):
return
json_res = yield self.http_client.do_request("POST", url, data=data)
print(json.dumps(json_res, indent=4))
print json.dumps(json_res, indent=4)
if update_config and "user_id" in json_res:
self.config["user"] = json_res["user_id"]
self.config["token"] = json_res["access_token"]
@@ -198,7 +201,9 @@ class SynapseCmd(cmd.Cmd):
"""
try:
args = self._parse(line, ["user_id"], force_keys=True)
can_login = threads.blockingCallFromThread(reactor, self._check_can_login)
can_login = threads.blockingCallFromThread(
reactor,
self._check_can_login)
if can_login:
p = getpass.getpass("Enter your password: ")
user = args["user_id"]
@@ -206,25 +211,29 @@ class SynapseCmd(cmd.Cmd):
domain = self._domain()
if domain:
user = "@" + user + ":" + domain
reactor.callFromThread(self._do_login, user, p)
# print " got %s " % p
#print " got %s " % p
except Exception as e:
print(e)
print e
@defer.inlineCallbacks
def _do_login(self, user, password):
path = "/login"
data = {"user": user, "password": password, "type": "m.login.password"}
data = {
"user": user,
"password": password,
"type": "m.login.password"
}
url = self._url() + path
json_res = yield self.http_client.do_request("POST", url, data=data)
print(json_res)
print json_res
if "access_token" in json_res:
self.config["user"] = user
self.config["token"] = json_res["access_token"]
save_config(self.config)
print("Login successful.")
print "Login successful."
@defer.inlineCallbacks
def _check_can_login(self):
@@ -233,19 +242,18 @@ class SynapseCmd(cmd.Cmd):
# submitting!
url = self._url() + path
json_res = yield self.http_client.do_request("GET", url)
print(json_res)
print json_res
if "flows" not in json_res:
print("Failed to find any login flows.")
print "Failed to find any login flows."
defer.returnValue(False)
flow = json_res["flows"][0] # assume first is the one we want.
if "type" not in flow or "m.login.password" != flow["type"] or "stages" in flow:
flow = json_res["flows"][0] # assume first is the one we want.
if ("type" not in flow or "m.login.password" != flow["type"] or
"stages" in flow):
fallback_url = self._url() + "/login/fallback"
print(
"Unable to login via the command line client. Please visit "
"%s to login." % fallback_url
)
print ("Unable to login via the command line client. Please visit "
"%s to login." % fallback_url)
defer.returnValue(False)
defer.returnValue(True)
@@ -255,34 +263,21 @@ class SynapseCmd(cmd.Cmd):
<clientSecret> A string of characters generated when requesting an email that you'll supply in subsequent calls to identify yourself
<sendAttempt> The number of times the user has requested an email. Leave this the same between requests to retry the request at the transport level. Increment it to request that the email be sent again.
"""
args = self._parse(line, ["address", "clientSecret", "sendAttempt"])
args = self._parse(line, ['address', 'clientSecret', 'sendAttempt'])
postArgs = {
"email": args["address"],
"clientSecret": args["clientSecret"],
"sendAttempt": args["sendAttempt"],
}
postArgs = {'email': args['address'], 'clientSecret': args['clientSecret'], 'sendAttempt': args['sendAttempt']}
reactor.callFromThread(self._do_emailrequest, postArgs)
@defer.inlineCallbacks
def _do_emailrequest(self, args):
# TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/validate/email/requestToken"
)
url = self._identityServerUrl()+"/_matrix/identity/api/v1/validate/email/requestToken"
json_res = yield self.http_client.do_request(
"POST",
url,
data=urllib.urlencode(args),
jsonreq=False,
headers={"Content-Type": ["application/x-www-form-urlencoded"]},
)
print(json_res)
if "sid" in json_res:
print("Token sent. Your session ID is %s" % (json_res["sid"]))
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
headers={'Content-Type': ['application/x-www-form-urlencoded']})
print json_res
if 'sid' in json_res:
print "Token sent. Your session ID is %s" % (json_res['sid'])
def do_emailvalidate(self, line):
"""Validate and associate a third party ID
@@ -290,58 +285,39 @@ class SynapseCmd(cmd.Cmd):
<token> The token sent to your third party identifier address
<clientSecret> The same clientSecret you supplied in requestToken
"""
args = self._parse(line, ["sid", "token", "clientSecret"])
args = self._parse(line, ['sid', 'token', 'clientSecret'])
postArgs = {
"sid": args["sid"],
"token": args["token"],
"clientSecret": args["clientSecret"],
}
postArgs = { 'sid' : args['sid'], 'token' : args['token'], 'clientSecret': args['clientSecret'] }
reactor.callFromThread(self._do_emailvalidate, postArgs)
@defer.inlineCallbacks
def _do_emailvalidate(self, args):
# TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/validate/email/submitToken"
)
url = self._identityServerUrl()+"/_matrix/identity/api/v1/validate/email/submitToken"
json_res = yield self.http_client.do_request(
"POST",
url,
data=urllib.urlencode(args),
jsonreq=False,
headers={"Content-Type": ["application/x-www-form-urlencoded"]},
)
print(json_res)
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
headers={'Content-Type': ['application/x-www-form-urlencoded']})
print json_res
def do_3pidbind(self, line):
"""Validate and associate a third party ID
<sid> The session ID (sid) given to you in the response to requestToken
<clientSecret> The same clientSecret you supplied in requestToken
"""
args = self._parse(line, ["sid", "clientSecret"])
args = self._parse(line, ['sid', 'clientSecret'])
postArgs = {"sid": args["sid"], "clientSecret": args["clientSecret"]}
postArgs["mxid"] = self.config["user"]
postArgs = { 'sid' : args['sid'], 'clientSecret': args['clientSecret'] }
postArgs['mxid'] = self.config["user"]
reactor.callFromThread(self._do_3pidbind, postArgs)
@defer.inlineCallbacks
def _do_3pidbind(self, args):
# TODO: Update to use v2 Identity Service API endpoint
url = self._identityServerUrl() + "/_matrix/identity/api/v1/3pid/bind"
url = self._identityServerUrl()+"/_matrix/identity/api/v1/3pid/bind"
json_res = yield self.http_client.do_request(
"POST",
url,
data=urllib.urlencode(args),
jsonreq=False,
headers={"Content-Type": ["application/x-www-form-urlencoded"]},
)
print(json_res)
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
headers={'Content-Type': ['application/x-www-form-urlencoded']})
print json_res
def do_join(self, line):
"""Joins a room: "join <roomid>" """
@@ -349,7 +325,7 @@ class SynapseCmd(cmd.Cmd):
args = self._parse(line, ["roomid"], force_keys=True)
self._do_membership_change(args["roomid"], "join", self._usr())
except Exception as e:
print(e)
print e
def do_joinalias(self, line):
try:
@@ -357,34 +333,36 @@ class SynapseCmd(cmd.Cmd):
path = "/join/%s" % urllib.quote(args["roomname"])
reactor.callFromThread(self._run_and_pprint, "POST", path, {})
except Exception as e:
print(e)
print e
def do_topic(self, line):
""" "topic [set|get] <roomid> [<newtopic>]"
""""topic [set|get] <roomid> [<newtopic>]"
Set the topic for a room: topic set <roomid> <newtopic>
Get the topic for a room: topic get <roomid>
"""
try:
args = self._parse(line, ["action", "roomid", "topic"])
if "action" not in args or "roomid" not in args:
print("Must specify set|get and a room ID.")
print "Must specify set|get and a room ID."
return
if args["action"].lower() not in ["set", "get"]:
print("Must specify set|get, not %s" % args["action"])
print "Must specify set|get, not %s" % args["action"]
return
path = "/rooms/%s/topic" % urllib.quote(args["roomid"])
if args["action"].lower() == "set":
if "topic" not in args:
print("Must specify a new topic.")
print "Must specify a new topic."
return
body = {"topic": args["topic"]}
body = {
"topic": args["topic"]
}
reactor.callFromThread(self._run_and_pprint, "PUT", path, body)
elif args["action"].lower() == "get":
reactor.callFromThread(self._run_and_pprint, "GET", path)
except Exception as e:
print(e)
print e
def do_invite(self, line):
"""Invite a user to a room: "invite <userid> <roomid>" """
@@ -395,66 +373,49 @@ class SynapseCmd(cmd.Cmd):
reactor.callFromThread(self._do_invite, args["roomid"], user_id)
except Exception as e:
print(e)
print e
@defer.inlineCallbacks
def _do_invite(self, roomid, userstring):
if not userstring.startswith("@") and self._is_on("complete_usernames"):
# TODO: Update to use v2 Identity Service API endpoint
url = self._identityServerUrl() + "/_matrix/identity/api/v1/lookup"
if (not userstring.startswith('@') and
self._is_on("complete_usernames")):
url = self._identityServerUrl()+"/_matrix/identity/api/v1/lookup"
json_res = yield self.http_client.do_request(
"GET", url, qparams={"medium": "email", "address": userstring}
)
json_res = yield self.http_client.do_request("GET", url, qparams={'medium':'email','address':userstring})
mxid = None
if "mxid" in json_res and "signatures" in json_res:
# TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/pubkey/ed25519"
)
if 'mxid' in json_res and 'signatures' in json_res:
url = self._identityServerUrl()+"/_matrix/identity/api/v1/pubkey/ed25519"
pubKey = None
pubKeyObj = yield self.http_client.do_request("GET", url)
if "public_key" in pubKeyObj:
pubKey = decode_verify_key_bytes(
NACL_ED25519, binascii.unhexlify(pubKeyObj["public_key"])
)
if 'public_key' in pubKeyObj:
pubKey = nacl.signing.VerifyKey(pubKeyObj['public_key'], encoder=nacl.encoding.HexEncoder)
else:
print("No public key found in pubkey response!")
print "No public key found in pubkey response!"
sigValid = False
if pubKey:
for signame in json_res["signatures"]:
for signame in json_res['signatures']:
if signame not in TRUSTED_ID_SERVERS:
print(
"Ignoring signature from untrusted server %s"
% (signame)
)
print "Ignoring signature from untrusted server %s" % (signame)
else:
try:
verify_signed_json(json_res, signame, pubKey)
sigValid = True
print(
"Mapping %s -> %s correctly signed by %s"
% (userstring, json_res["mxid"], signame)
)
print "Mapping %s -> %s correctly signed by %s" % (userstring, json_res['mxid'], signame)
break
except SignatureVerifyException as e:
print("Invalid signature from %s" % (signame))
print(e)
print "Invalid signature from %s" % (signame)
print e
if sigValid:
print("Resolved 3pid %s to %s" % (userstring, json_res["mxid"]))
mxid = json_res["mxid"]
print "Resolved 3pid %s to %s" % (userstring, json_res['mxid'])
mxid = json_res['mxid']
else:
print(
"Got association for %s but couldn't verify signature"
% (userstring)
)
print "Got association for %s but couldn't verify signature" % (userstring)
if not mxid:
mxid = "@" + userstring + ":" + self._domain()
@@ -467,17 +428,18 @@ class SynapseCmd(cmd.Cmd):
args = self._parse(line, ["roomid"], force_keys=True)
self._do_membership_change(args["roomid"], "leave", self._usr())
except Exception as e:
print(e)
print e
def do_send(self, line):
"""Sends a message. "send <roomid> <body>" """
args = self._parse(line, ["roomid", "body"])
txn_id = "txn%s" % int(time.time())
path = "/rooms/%s/send/m.room.message/%s" % (
urllib.quote(args["roomid"]),
txn_id,
)
body_json = {"msgtype": "m.text", "body": args["body"]}
path = "/rooms/%s/send/m.room.message/%s" % (urllib.quote(args["roomid"]),
txn_id)
body_json = {
"msgtype": "m.text",
"body": args["body"]
}
reactor.callFromThread(self._run_and_pprint, "PUT", path, body_json)
def do_list(self, line):
@@ -490,11 +452,11 @@ class SynapseCmd(cmd.Cmd):
"list messages <roomid> from=END&to=START&limit=3"
"""
args = self._parse(line, ["type", "roomid", "qp"])
if "type" not in args or "roomid" not in args:
print("Must specify type and room ID.")
if not "type" in args or not "roomid" in args:
print "Must specify type and room ID."
return
if args["type"] not in ["members", "messages"]:
print("Unrecognised type: %s" % args["type"])
print "Unrecognised type: %s" % args["type"]
return
room_id = args["roomid"]
path = "/rooms/%s/%s" % (urllib.quote(room_id), args["type"])
@@ -505,11 +467,12 @@ class SynapseCmd(cmd.Cmd):
try:
key_value = key_value_str.split("=")
qp[key_value[0]] = key_value[1]
except Exception:
print("Bad query param: %s" % key_value)
except:
print "Bad query param: %s" % key_value
return
reactor.callFromThread(self._run_and_pprint, "GET", path, query_params=qp)
reactor.callFromThread(self._run_and_pprint, "GET", path,
query_params=qp)
def do_create(self, line):
"""Creates a room.
@@ -545,22 +508,14 @@ class SynapseCmd(cmd.Cmd):
args = self._parse(line, ["method", "path", "data"])
# sanity check
if "method" not in args or "path" not in args:
print("Must specify path and method.")
print "Must specify path and method."
return
args["method"] = args["method"].upper()
valid_methods = [
"PUT",
"GET",
"POST",
"DELETE",
"XPUT",
"XGET",
"XPOST",
"XDELETE",
]
valid_methods = ["PUT", "GET", "POST", "DELETE",
"XPUT", "XGET", "XPOST", "XDELETE"]
if args["method"] not in valid_methods:
print("Unsupported method: %s" % args["method"])
print "Unsupported method: %s" % args["method"]
return
if "data" not in args:
@@ -569,7 +524,7 @@ class SynapseCmd(cmd.Cmd):
try:
args["data"] = json.loads(args["data"])
except Exception as e:
print("Data is not valid JSON. %s" % e)
print "Data is not valid JSON. %s" % e
return
qp = {"access_token": self._tok()}
@@ -582,16 +537,13 @@ class SynapseCmd(cmd.Cmd):
parsed_url = urlparse.urlparse(args["path"])
qp.update(urlparse.parse_qs(parsed_url.query))
args["path"] = parsed_url.path
except Exception:
except:
pass
reactor.callFromThread(
self._run_and_pprint,
args["method"],
args["path"],
args["data"],
query_params=qp,
)
reactor.callFromThread(self._run_and_pprint, args["method"],
args["path"],
args["data"],
query_params=qp)
def do_stream(self, line):
"""Stream data from the server: "stream <longpoll timeout ms>" """
@@ -601,31 +553,26 @@ class SynapseCmd(cmd.Cmd):
try:
timeout = int(args["timeout"])
except ValueError:
print("Timeout must be in milliseconds.")
print "Timeout must be in milliseconds."
return
reactor.callFromThread(self._do_event_stream, timeout)
@defer.inlineCallbacks
def _do_event_stream(self, timeout):
res = yield defer.ensureDeferred(
self.http_client.get_json(
res = yield self.http_client.get_json(
self._url() + "/events",
{
"access_token": self._tok(),
"timeout": str(timeout),
"from": self.event_stream_token,
},
)
)
print(json.dumps(res, indent=4))
"from": self.event_stream_token
})
print json.dumps(res, indent=4)
if "chunk" in res:
for event in res["chunk"]:
if (
event["type"] == "m.room.message"
and self._is_on("send_delivery_receipts")
and event["user_id"] != self._usr()
): # not sent by us
if (event["type"] == "m.room.message" and
self._is_on("send_delivery_receipts") and
event["user_id"] != self._usr()): # not sent by us
self._send_receipt(event, "d")
# update the position in the stram
@@ -633,28 +580,18 @@ class SynapseCmd(cmd.Cmd):
self.event_stream_token = res["end"]
def _send_receipt(self, event, feedback_type):
path = "/rooms/%s/messages/%s/%s/feedback/%s/%s" % (
urllib.quote(event["room_id"]),
event["user_id"],
event["msg_id"],
self._usr(),
feedback_type,
)
path = ("/rooms/%s/messages/%s/%s/feedback/%s/%s" %
(urllib.quote(event["room_id"]), event["user_id"], event["msg_id"],
self._usr(), feedback_type))
data = {}
reactor.callFromThread(
self._run_and_pprint,
"PUT",
path,
data=data,
alt_text="Sent receipt for %s" % event["msg_id"],
)
reactor.callFromThread(self._run_and_pprint, "PUT", path, data=data,
alt_text="Sent receipt for %s" % event["msg_id"])
def _do_membership_change(self, roomid, membership, userid):
path = "/rooms/%s/state/m.room.member/%s" % (
urllib.quote(roomid),
urllib.quote(userid),
)
data = {"membership": membership}
path = "/rooms/%s/state/m.room.member/%s" % (urllib.quote(roomid), urllib.quote(userid))
data = {
"membership": membership
}
reactor.callFromThread(self._run_and_pprint, "PUT", path, data=data)
def do_displayname(self, line):
@@ -690,7 +627,7 @@ class SynapseCmd(cmd.Cmd):
self._do_presence_state(2, line)
def _parse(self, line, keys, force_keys=False):
"""Parses the given line.
""" Parses the given line.
Args:
line : The line to parse
@@ -707,21 +644,16 @@ class SynapseCmd(cmd.Cmd):
for i, arg in enumerate(line_args):
for config_key in self.config:
if ("$" + config_key) in arg:
arg = arg.replace("$" + config_key, self.config[config_key])
arg = arg.replace("$" + config_key,
self.config[config_key])
line_args[i] = arg
return dict(zip(keys, line_args))
@defer.inlineCallbacks
def _run_and_pprint(
self,
method,
path,
data=None,
query_params: Optional[dict] = None,
alt_text=None,
):
"""Runs an HTTP request and pretty prints the output.
def _run_and_pprint(self, method, path, data=None,
query_params={"access_token": None}, alt_text=None):
""" Runs an HTTP request and pretty prints the output.
Args:
method: HTTP method
@@ -729,37 +661,35 @@ class SynapseCmd(cmd.Cmd):
data: Raw JSON data if any
query_params: dict of query parameters to add to the url
"""
query_params = query_params or {"access_token": None}
url = self._url() + path
if "access_token" in query_params:
query_params["access_token"] = self._tok()
json_res = yield self.http_client.do_request(
method, url, data=data, qparams=query_params
)
json_res = yield self.http_client.do_request(method, url,
data=data,
qparams=query_params)
if alt_text:
print(alt_text)
print alt_text
else:
print(json.dumps(json_res, indent=4))
print json.dumps(json_res, indent=4)
def save_config(config):
with open(CONFIG_JSON, "w") as out:
with open(CONFIG_JSON, 'w') as out:
json.dump(config, out)
def main(server_url, identity_server_url, username, token, config_path):
print("Synapse command line client")
print("===========================")
print("Server: %s" % server_url)
print("Type 'help' to get started.")
print("Close this console with CTRL+C then CTRL+D.")
print "Synapse command line client"
print "==========================="
print "Server: %s" % server_url
print "Type 'help' to get started."
print "Close this console with CTRL+C then CTRL+D."
if not username or not token:
print("- 'register <username>' - Register an account")
print("- 'stream' - Connect to the event stream")
print("- 'create <roomid>' - Create a room")
print("- 'send <roomid> <message>' - Send a message")
print "- 'register <username>' - Register an account"
print "- 'stream' - Connect to the event stream"
print "- 'create <roomid>' - Create a room"
print "- 'send <roomid> <message>' - Send a message"
http_client = TwistedHttpClient()
# the command line client
@@ -769,14 +699,14 @@ def main(server_url, identity_server_url, username, token, config_path):
global CONFIG_JSON
CONFIG_JSON = config_path # bit cheeky, but just overwrite the global
try:
with open(config_path, "r") as config:
with open(config_path, 'r') as config:
syn_cmd.config = json.load(config)
try:
http_client.verbose = "on" == syn_cmd.config["verbose"]
except Exception:
except:
pass
print("Loaded config from %s" % config_path)
except Exception:
print "Loaded config from %s" % config_path
except:
pass
# Twisted-specific: Runs the command processor in Twisted's event loop
@@ -786,37 +716,27 @@ def main(server_url, identity_server_url, username, token, config_path):
reactor.run()
if __name__ == "__main__":
if __name__ == '__main__':
parser = argparse.ArgumentParser("Starts a synapse client.")
parser.add_argument(
"-s",
"--server",
dest="server",
default="http://localhost:8008",
help="The URL of the home server to talk to.",
)
"-s", "--server", dest="server", default="http://localhost:8008",
help="The URL of the home server to talk to.")
parser.add_argument(
"-i",
"--identity-server",
dest="identityserver",
default="http://localhost:8090",
help="The URL of the identity server to talk to.",
)
"-i", "--identity-server", dest="identityserver", default="http://localhost:8090",
help="The URL of the identity server to talk to.")
parser.add_argument(
"-u", "--username", dest="username", help="Your username on the server."
)
parser.add_argument("-t", "--token", dest="token", help="Your access token.")
"-u", "--username", dest="username",
help="Your username on the server.")
parser.add_argument(
"-c",
"--config",
dest="config",
default=CONFIG_JSON,
help="The location of the config.json file to read from.",
)
"-t", "--token", dest="token",
help="Your access token.")
parser.add_argument(
"-c", "--config", dest="config", default=CONFIG_JSON,
help="The location of the config.json file to read from.")
args = parser.parse_args()
if not args.server:
print("You must supply a server URL to communicate with.")
print "You must supply a server URL to communicate with."
parser.print_help()
sys.exit(1)

View File

@@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,21 +13,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import urllib
from pprint import pformat
from typing import Optional
from twisted.internet import defer, reactor
from twisted.web.client import Agent, readBody
from twisted.web.http_headers import Headers
from twisted.internet import defer, reactor
from pprint import pformat
import json
import urllib
class HttpClient:
"""Interface for talking json over http"""
class HttpClient(object):
""" Interface for talking json over http
"""
def put_json(self, url, data):
"""Sends the specifed json data using PUT
""" Sends the specifed json data using PUT
Args:
url (str): The URL to PUT data to.
@@ -40,7 +42,7 @@ class HttpClient:
pass
def get_json(self, url, args=None):
"""Gets some json from the given host homeserver and path
""" Gets some json from the given host homeserver and path
Args:
url (str): The URL to GET data from.
@@ -57,7 +59,7 @@ class HttpClient:
class TwistedHttpClient(HttpClient):
"""Wrapper around the twisted HTTP client api.
""" Wrapper around the twisted HTTP client api.
Attributes:
agent (twisted.web.client.Agent): The twisted Agent used to send the
@@ -70,7 +72,9 @@ class TwistedHttpClient(HttpClient):
@defer.inlineCallbacks
def put_json(self, url, data):
response = yield self._create_put_request(
url, data, headers_dict={"Content-Type": ["application/json"]}
url,
data,
headers_dict={"Content-Type": ["application/json"]}
)
body = yield readBody(response)
defer.returnValue((response.code, body))
@@ -85,46 +89,45 @@ class TwistedHttpClient(HttpClient):
body = yield readBody(response)
defer.returnValue(json.loads(body))
def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None):
"""Wrapper of _create_request to issue a PUT request"""
headers_dict = headers_dict or {}
def _create_put_request(self, url, json_data, headers_dict={}):
""" Wrapper of _create_request to issue a PUT request
"""
if "Content-Type" not in headers_dict:
raise defer.error(RuntimeError("Must include Content-Type header for PUTs"))
raise defer.error(
RuntimeError("Must include Content-Type header for PUTs"))
return self._create_request(
"PUT", url, producer=_JsonProducer(json_data), headers_dict=headers_dict
"PUT",
url,
producer=_JsonProducer(json_data),
headers_dict=headers_dict
)
def _create_get_request(self, url, headers_dict: Optional[dict] = None):
"""Wrapper of _create_request to issue a GET request"""
return self._create_request("GET", url, headers_dict=headers_dict or {})
def _create_get_request(self, url, headers_dict={}):
""" Wrapper of _create_request to issue a GET request
"""
return self._create_request(
"GET",
url,
headers_dict=headers_dict
)
@defer.inlineCallbacks
def do_request(
self,
method,
url,
data=None,
qparams=None,
jsonreq=True,
headers: Optional[dict] = None,
):
headers = headers or {}
def do_request(self, method, url, data=None, qparams=None, jsonreq=True, headers={}):
if qparams:
url = "%s?%s" % (url, urllib.urlencode(qparams, True))
if jsonreq:
prod = _JsonProducer(data)
headers["Content-Type"] = ["application/json"]
headers['Content-Type'] = ["application/json"];
else:
prod = _RawProducer(data)
if method in ["POST", "PUT"]:
response = yield self._create_request(
method, url, producer=prod, headers_dict=headers
)
response = yield self._create_request(method, url,
producer=prod,
headers_dict=headers)
else:
response = yield self._create_request(method, url)
@@ -132,33 +135,33 @@ class TwistedHttpClient(HttpClient):
defer.returnValue(json.loads(body))
@defer.inlineCallbacks
def _create_request(
self, method, url, producer=None, headers_dict: Optional[dict] = None
):
"""Creates and sends a request to the given url"""
headers_dict = headers_dict or {}
def _create_request(self, method, url, producer=None, headers_dict={}):
""" Creates and sends a request to the given url
"""
headers_dict["User-Agent"] = ["Synapse Cmd Client"]
retries_left = 5
print("%s to %s with headers %s" % (method, url, headers_dict))
print "%s to %s with headers %s" % (method, url, headers_dict)
if self.verbose and producer:
if "password" in producer.data:
temp = producer.data["password"]
producer.data["password"] = "[REDACTED]"
print(json.dumps(producer.data, indent=4))
print json.dumps(producer.data, indent=4)
producer.data["password"] = temp
else:
print(json.dumps(producer.data, indent=4))
print json.dumps(producer.data, indent=4)
while True:
try:
response = yield self.agent.request(
method, url.encode("UTF8"), Headers(headers_dict), producer
method,
url.encode("UTF8"),
Headers(headers_dict),
producer
)
break
except Exception as e:
print("uh oh: %s" % e)
print "uh oh: %s" % e
if retries_left:
yield self.sleep(2 ** (5 - retries_left))
retries_left -= 1
@@ -166,8 +169,8 @@ class TwistedHttpClient(HttpClient):
raise e
if self.verbose:
print("Status %s %s" % (response.code, response.phrase))
print(pformat(list(response.headers.getAllRawHeaders())))
print "Status %s %s" % (response.code, response.phrase)
print pformat(list(response.headers.getAllRawHeaders()))
defer.returnValue(response)
def sleep(self, seconds):
@@ -175,8 +178,7 @@ class TwistedHttpClient(HttpClient):
reactor.callLater(seconds, d.callback, seconds)
return d
class _RawProducer:
class _RawProducer(object):
def __init__(self, data):
self.data = data
self.body = data
@@ -192,10 +194,9 @@ class _RawProducer:
def stopProducing(self):
pass
class _JsonProducer:
"""Used by the twisted http client to create the HTTP body from json"""
class _JsonProducer(object):
""" Used by the twisted http client to create the HTTP body from json
"""
def __init__(self, jsn):
self.data = jsn
self.body = json.dumps(jsn).encode("utf8")

View File

@@ -1,26 +1,35 @@
# Synapse Docker
### Configuration
### Automated configuration
It is recommended that you use Docker Compose to run your containers, including
this image and a Postgres server. A sample ``docker-compose.yml`` is provided,
including example labels for reverse proxying and other artifacts.
Read the section about environment variables and set at least mandatory variables,
then run the server:
```
docker-compose up -d
```
If secrets are not specified in the environment variables, they will be generated
as part of the startup. Please ensure these secrets are kept between launches of the
Docker container, as their loss may require users to log in again.
### Manual configuration
A sample ``docker-compose.yml`` is provided, including example labels for
reverse proxying and other artifacts. The docker-compose file is an example,
please comment/uncomment sections that are not suitable for your usecase.
Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path,
to use manual configuration.
To generate a fresh `homeserver.yaml`, you can use the `generate` command.
(See the [documentation](../../docker/README.md#generating-a-configuration-file)
for more information.) You will need to specify appropriate values for at least the
`SYNAPSE_SERVER_NAME` and `SYNAPSE_REPORT_STATS` environment variables. For example:
to use manual configuration. To generate a fresh ``homeserver.yaml``, simply run:
```
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host -e SYNAPSE_REPORT_STATS=yes synapse generate
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host synapse generate
```
(This will also generate necessary signing keys.)
Then, customize your configuration and run the server:
```

View File

@@ -14,9 +14,12 @@ services:
# failure
restart: unless-stopped
# See the readme for a full documentation of the environment settings
# NOTE: You must edit homeserver.yaml to use postgres, it defaults to sqlite
environment:
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
- SYNAPSE_SERVER_NAME=my.matrix.host
- SYNAPSE_REPORT_STATS=no
- SYNAPSE_ENABLE_REGISTRATION=yes
- SYNAPSE_LOG_LEVEL=INFO
- POSTGRES_PASSWORD=changeme
volumes:
# You may either store all the files in a local folder
- ./files:/data
@@ -32,33 +35,16 @@ services:
- 8448:8448/tcp
# ... or use a reverse proxy, here is an example for traefik:
labels:
# The following lines are valid for Traefik version 1.x:
- traefik.enable=true
- traefik.frontend.rule=Host:my.matrix.Host
- traefik.port=8008
# Alternatively, for Traefik version 2.0:
- traefik.enable=true
- traefik.http.routers.http-synapse.entryPoints=http
- traefik.http.routers.http-synapse.rule=Host(`my.matrix.host`)
- traefik.http.middlewares.https_redirect.redirectscheme.scheme=https
- traefik.http.middlewares.https_redirect.redirectscheme.permanent=true
- traefik.http.routers.http-synapse.middlewares=https_redirect
- traefik.http.routers.https-synapse.entryPoints=https
- traefik.http.routers.https-synapse.rule=Host(`my.matrix.host`)
- traefik.http.routers.https-synapse.service=synapse
- traefik.http.routers.https-synapse.tls=true
- traefik.http.services.synapse.loadbalancer.server.port=8008
- traefik.http.routers.https-synapse.tls.certResolver=le-ssl
db:
image: docker.io/postgres:12-alpine
image: docker.io/postgres:10-alpine
# Change that password, of course!
environment:
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=changeme
# ensure the database gets created correctly
# https://matrix-org.github.io/synapse/latest/postgres.html#set-up-database
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
volumes:
# You may store the database tables in a local folder..
- ./schemas:/var/lib/postgresql/data

View File

@@ -1,111 +0,0 @@
# Setting up Synapse with Workers using Docker Compose
This directory describes how deploy and manage Synapse and workers via [Docker Compose](https://docs.docker.com/compose/).
Example worker configuration files can be found [here](workers).
All examples and snippets assume that your Synapse service is called `synapse` in your Docker Compose file.
An example Docker Compose file can be found [here](docker-compose.yaml).
## Worker Service Examples in Docker Compose
In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`).
### Generic Worker Example
```yaml
synapse-generic-worker-1:
image: matrixdotorg/synapse:latest
container_name: synapse-generic-worker-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
healthcheck:
test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
start_period: "5s"
interval: "15s"
timeout: "5s"
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.generic_worker
# Expose port if required so your reverse proxy can send requests to this worker
# Port configuration will depend on how the http listener is defined in the worker configuration file
ports:
- 8081:8081
depends_on:
- synapse
```
### Federation Sender Example
Please note: The federation sender does not receive REST API calls so no exposed ports are required.
```yaml
synapse-federation-sender-1:
image: matrixdotorg/synapse:latest
container_name: synapse-federation-sender-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
healthcheck:
disable: true
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.federation_sender
depends_on:
- synapse
```
## `homeserver.yaml` Configuration
### Enable Redis
Locate the `redis` section of your `homeserver.yaml` and enable and configure it:
```yaml
redis:
enabled: true
host: redis
port: 6379
# password: <secret_password>
```
This assumes that your Redis service is called `redis` in your Docker Compose file.
### Add a replication Listener
Locate the `listeners` section of your `homeserver.yaml` and add the following replication listener:
```yaml
listeners:
# Other listeners
- port: 9093
type: http
resources:
- names: [replication]
```
This listener is used by the workers for replication and is referred to in worker config files using the following settings:
```yaml
worker_replication_host: synapse
worker_replication_http_port: 9093
```
### Configure Federation Senders
This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
```yaml
# This will disable federation sending on the main Synapse instance
send_federation: false
federation_sender_instances:
- synapse-federation-sender-1 # The worker_name setting in your federation sender worker configuration file
```
## Other Worker types
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.

View File

@@ -1,77 +0,0 @@
networks:
backend:
services:
postgres:
image: postgres:latest
restart: unless-stopped
volumes:
- ${VOLUME_PATH}/var/lib/postgresql/data:/var/lib/postgresql/data:rw
networks:
- backend
environment:
POSTGRES_DB: synapse
POSTGRES_USER: synapse_user
POSTGRES_PASSWORD: postgres
POSTGRES_INITDB_ARGS: --encoding=UTF8 --locale=C
redis:
image: redis:latest
restart: unless-stopped
networks:
- backend
synapse:
image: matrixdotorg/synapse:latest
container_name: synapse
restart: unless-stopped
volumes:
- ${VOLUME_PATH}/data:/data:rw
ports:
- 8008:8008
networks:
- backend
environment:
SYNAPSE_CONFIG_DIR: /data
SYNAPSE_CONFIG_PATH: /data/homeserver.yaml
depends_on:
- postgres
synapse-generic-worker-1:
image: matrixdotorg/synapse:latest
container_name: synapse-generic-worker-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
healthcheck:
test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
start_period: "5s"
interval: "15s"
timeout: "5s"
networks:
- backend
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.generic_worker
# Expose port if required so your reverse proxy can send requests to this worker
# Port configuration will depend on how the http listener is defined in the worker configuration file
ports:
- 8081:8081
depends_on:
- synapse
synapse-federation-sender-1:
image: matrixdotorg/synapse:latest
container_name: synapse-federation-sender-1
restart: unless-stopped
entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
healthcheck:
disable: true
networks:
- backend
volumes:
- ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
environment:
SYNAPSE_WORKER: synapse.app.federation_sender
depends_on:
- synapse

View File

@@ -1,8 +0,0 @@
worker_app: synapse.app.federation_sender
worker_name: synapse-federation-sender-1
# The replication listener on the main synapse process.
worker_replication_host: synapse
worker_replication_http_port: 9093
worker_log_config: /data/federation_sender.log.config

View File

@@ -1,15 +0,0 @@
worker_app: synapse.app.generic_worker
worker_name: synapse-generic-worker-1
# The replication listener on the main synapse process.
worker_replication_host: synapse
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8081
x_forwarded: true
resources:
- names: [client, federation]
worker_log_config: /data/worker.log.config

View File

@@ -1,7 +1,7 @@
# Example log_config file for synapse. To enable, point `log_config` to it in
# Example log_config file for synapse. To enable, point `log_config` to it in
# `homeserver.yaml`, and restart synapse.
#
# This configuration will produce similar results to the defaults within
# This configuration will produce similar results to the defaults within
# synapse, but can be edited to give more flexibility.
version: 1
@@ -12,14 +12,13 @@ formatters:
filters:
context:
(): synapse.logging.context.LoggingContextFilter
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
# example output to console
console:
class: logging.StreamHandler
formatter: fmt
filters: [context]
# example output to file - to enable, edit 'root' config below.
@@ -30,12 +29,12 @@ handlers:
maxBytes: 100000000
backupCount: 3
filters: [context]
encoding: utf8
root:
level: INFO
handlers: [console] # to use file handler instead, switch to [file]
loggers:
synapse:
level: INFO

View File

@@ -0,0 +1,168 @@
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import curses
import curses.wrapper
from curses.ascii import isprint
from twisted.internet import reactor
class CursesStdIO():
def __init__(self, stdscr, callback=None):
self.statusText = "Synapse test app -"
self.searchText = ''
self.stdscr = stdscr
self.logLine = ''
self.callback = callback
self._setup()
def _setup(self):
self.stdscr.nodelay(1) # Make non blocking
self.rows, self.cols = self.stdscr.getmaxyx()
self.lines = []
curses.use_default_colors()
self.paintStatus(self.statusText)
self.stdscr.refresh()
def set_callback(self, callback):
self.callback = callback
def fileno(self):
""" We want to select on FD 0 """
return 0
def connectionLost(self, reason):
self.close()
def print_line(self, text):
""" add a line to the internal list of lines"""
self.lines.append(text)
self.redraw()
def print_log(self, text):
self.logLine = text
self.redraw()
def redraw(self):
""" method for redisplaying lines
based on internal list of lines """
self.stdscr.clear()
self.paintStatus(self.statusText)
i = 0
index = len(self.lines) - 1
while i < (self.rows - 3) and index >= 0:
self.stdscr.addstr(self.rows - 3 - i, 0, self.lines[index],
curses.A_NORMAL)
i = i + 1
index = index - 1
self.printLogLine(self.logLine)
self.stdscr.refresh()
def paintStatus(self, text):
if len(text) > self.cols:
raise RuntimeError("TextTooLongError")
self.stdscr.addstr(
self.rows - 2, 0,
text + ' ' * (self.cols - len(text)),
curses.A_STANDOUT)
def printLogLine(self, text):
self.stdscr.addstr(
0, 0,
text + ' ' * (self.cols - len(text)),
curses.A_STANDOUT)
def doRead(self):
""" Input is ready! """
curses.noecho()
c = self.stdscr.getch() # read a character
if c == curses.KEY_BACKSPACE:
self.searchText = self.searchText[:-1]
elif c == curses.KEY_ENTER or c == 10:
text = self.searchText
self.searchText = ''
self.print_line(">> %s" % text)
try:
if self.callback:
self.callback.on_line(text)
except Exception as e:
self.print_line(str(e))
self.stdscr.refresh()
elif isprint(c):
if len(self.searchText) == self.cols - 2:
return
self.searchText = self.searchText + chr(c)
self.stdscr.addstr(self.rows - 1, 0,
self.searchText + (' ' * (
self.cols - len(self.searchText) - 2)))
self.paintStatus(self.statusText + ' %d' % len(self.searchText))
self.stdscr.move(self.rows - 1, len(self.searchText))
self.stdscr.refresh()
def logPrefix(self):
return "CursesStdIO"
def close(self):
""" clean up """
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
class Callback(object):
def __init__(self, stdio):
self.stdio = stdio
def on_line(self, text):
self.stdio.print_line(text)
def main(stdscr):
screen = CursesStdIO(stdscr) # create Screen object
callback = Callback(screen)
screen.set_callback(callback)
stdscr.refresh()
reactor.addReader(screen)
reactor.run()
screen.close()
if __name__ == '__main__':
curses.wrapper(main)

View File

@@ -0,0 +1,394 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is an example of using the server to server implementation to do a
basic chat style thing. It accepts commands from stdin and outputs to stdout.
It assumes that ucids are of the form <user>@<domain>, and uses <domain> as
the address of the remote home server to hit.
Usage:
python test_messaging.py <port>
Currently assumes the local address is localhost:<port>
"""
from synapse.federation import (
ReplicationHandler
)
from synapse.federation.units import Pdu
from synapse.util import origin_from_ucid
from synapse.app.homeserver import SynapseHomeServer
#from synapse.util.logutils import log_function
from twisted.internet import reactor, defer
from twisted.python import log
import argparse
import json
import logging
import os
import re
import cursesio
import curses.wrapper
logger = logging.getLogger("example")
def excpetion_errback(failure):
logging.exception(failure)
class InputOutput(object):
""" This is responsible for basic I/O so that a user can interact with
the example app.
"""
def __init__(self, screen, user):
self.screen = screen
self.user = user
def set_home_server(self, server):
self.server = server
def on_line(self, line):
""" This is where we process commands.
"""
try:
m = re.match("^join (\S+)$", line)
if m:
# The `sender` wants to join a room.
room_name, = m.groups()
self.print_line("%s joining %s" % (self.user, room_name))
self.server.join_room(room_name, self.user, self.user)
#self.print_line("OK.")
return
m = re.match("^invite (\S+) (\S+)$", line)
if m:
# `sender` wants to invite someone to a room
room_name, invitee = m.groups()
self.print_line("%s invited to %s" % (invitee, room_name))
self.server.invite_to_room(room_name, self.user, invitee)
#self.print_line("OK.")
return
m = re.match("^send (\S+) (.*)$", line)
if m:
# `sender` wants to message a room
room_name, body = m.groups()
self.print_line("%s send to %s" % (self.user, room_name))
self.server.send_message(room_name, self.user, body)
#self.print_line("OK.")
return
m = re.match("^backfill (\S+)$", line)
if m:
# we want to backfill a room
room_name, = m.groups()
self.print_line("backfill %s" % room_name)
self.server.backfill(room_name)
return
self.print_line("Unrecognized command")
except Exception as e:
logger.exception(e)
def print_line(self, text):
self.screen.print_line(text)
def print_log(self, text):
self.screen.print_log(text)
class IOLoggerHandler(logging.Handler):
def __init__(self, io):
logging.Handler.__init__(self)
self.io = io
def emit(self, record):
if record.levelno < logging.WARN:
return
msg = self.format(record)
self.io.print_log(msg)
class Room(object):
""" Used to store (in memory) the current membership state of a room, and
which home servers we should send PDUs associated with the room to.
"""
def __init__(self, room_name):
self.room_name = room_name
self.invited = set()
self.participants = set()
self.servers = set()
self.oldest_server = None
self.have_got_metadata = False
def add_participant(self, participant):
""" Someone has joined the room
"""
self.participants.add(participant)
self.invited.discard(participant)
server = origin_from_ucid(participant)
self.servers.add(server)
if not self.oldest_server:
self.oldest_server = server
def add_invited(self, invitee):
""" Someone has been invited to the room
"""
self.invited.add(invitee)
self.servers.add(origin_from_ucid(invitee))
class HomeServer(ReplicationHandler):
""" A very basic home server implentation that allows people to join a
room and then invite other people.
"""
def __init__(self, server_name, replication_layer, output):
self.server_name = server_name
self.replication_layer = replication_layer
self.replication_layer.set_handler(self)
self.joined_rooms = {}
self.output = output
def on_receive_pdu(self, pdu):
""" We just received a PDU
"""
pdu_type = pdu.pdu_type
if pdu_type == "sy.room.message":
self._on_message(pdu)
elif pdu_type == "sy.room.member" and "membership" in pdu.content:
if pdu.content["membership"] == "join":
self._on_join(pdu.context, pdu.state_key)
elif pdu.content["membership"] == "invite":
self._on_invite(pdu.origin, pdu.context, pdu.state_key)
else:
self.output.print_line("#%s (unrec) %s = %s" %
(pdu.context, pdu.pdu_type, json.dumps(pdu.content))
)
#def on_state_change(self, pdu):
##self.output.print_line("#%s (state) %s *** %s" %
##(pdu.context, pdu.state_key, pdu.pdu_type)
##)
#if "joinee" in pdu.content:
#self._on_join(pdu.context, pdu.content["joinee"])
#elif "invitee" in pdu.content:
#self._on_invite(pdu.origin, pdu.context, pdu.content["invitee"])
def _on_message(self, pdu):
""" We received a message
"""
self.output.print_line("#%s %s %s" %
(pdu.context, pdu.content["sender"], pdu.content["body"])
)
def _on_join(self, context, joinee):
""" Someone has joined a room, either a remote user or a local user
"""
room = self._get_or_create_room(context)
room.add_participant(joinee)
self.output.print_line("#%s %s %s" %
(context, joinee, "*** JOINED")
)
def _on_invite(self, origin, context, invitee):
""" Someone has been invited
"""
room = self._get_or_create_room(context)
room.add_invited(invitee)
self.output.print_line("#%s %s %s" %
(context, invitee, "*** INVITED")
)
if not room.have_got_metadata and origin is not self.server_name:
logger.debug("Get room state")
self.replication_layer.get_state_for_context(origin, context)
room.have_got_metadata = True
@defer.inlineCallbacks
def send_message(self, room_name, sender, body):
""" Send a message to a room!
"""
destinations = yield self.get_servers_for_context(room_name)
try:
yield self.replication_layer.send_pdu(
Pdu.create_new(
context=room_name,
pdu_type="sy.room.message",
content={"sender": sender, "body": body},
origin=self.server_name,
destinations=destinations,
)
)
except Exception as e:
logger.exception(e)
@defer.inlineCallbacks
def join_room(self, room_name, sender, joinee):
""" Join a room!
"""
self._on_join(room_name, joinee)
destinations = yield self.get_servers_for_context(room_name)
try:
pdu = Pdu.create_new(
context=room_name,
pdu_type="sy.room.member",
is_state=True,
state_key=joinee,
content={"membership": "join"},
origin=self.server_name,
destinations=destinations,
)
yield self.replication_layer.send_pdu(pdu)
except Exception as e:
logger.exception(e)
@defer.inlineCallbacks
def invite_to_room(self, room_name, sender, invitee):
""" Invite someone to a room!
"""
self._on_invite(self.server_name, room_name, invitee)
destinations = yield self.get_servers_for_context(room_name)
try:
yield self.replication_layer.send_pdu(
Pdu.create_new(
context=room_name,
is_state=True,
pdu_type="sy.room.member",
state_key=invitee,
content={"membership": "invite"},
origin=self.server_name,
destinations=destinations,
)
)
except Exception as e:
logger.exception(e)
def backfill(self, room_name, limit=5):
room = self.joined_rooms.get(room_name)
if not room:
return
dest = room.oldest_server
return self.replication_layer.backfill(dest, room_name, limit)
def _get_room_remote_servers(self, room_name):
return [i for i in self.joined_rooms.setdefault(room_name,).servers]
def _get_or_create_room(self, room_name):
return self.joined_rooms.setdefault(room_name, Room(room_name))
def get_servers_for_context(self, context):
return defer.succeed(
self.joined_rooms.setdefault(context, Room(context)).servers
)
def main(stdscr):
parser = argparse.ArgumentParser()
parser.add_argument('user', type=str)
parser.add_argument('-v', '--verbose', action='count')
args = parser.parse_args()
user = args.user
server_name = origin_from_ucid(user)
## Set up logging ##
root_logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(lineno)d - '
'%(levelname)s - %(message)s')
if not os.path.exists("logs"):
os.makedirs("logs")
fh = logging.FileHandler("logs/%s" % user)
fh.setFormatter(formatter)
root_logger.addHandler(fh)
root_logger.setLevel(logging.DEBUG)
# Hack: The only way to get it to stop logging to sys.stderr :(
log.theLogPublisher.observers = []
observer = log.PythonLoggingObserver()
observer.start()
## Set up synapse server
curses_stdio = cursesio.CursesStdIO(stdscr)
input_output = InputOutput(curses_stdio, user)
curses_stdio.set_callback(input_output)
app_hs = SynapseHomeServer(server_name, db_name="dbs/%s" % user)
replication = app_hs.get_replication_layer()
hs = HomeServer(server_name, replication, curses_stdio)
input_output.set_home_server(hs)
## Add input_output logger
io_logger = IOLoggerHandler(input_output)
io_logger.setFormatter(formatter)
root_logger.addHandler(io_logger)
## Start! ##
try:
port = int(server_name.split(":")[1])
except:
port = 12345
app_hs.get_http_server().start_listening(port)
reactor.addReader(curses_stdio)
reactor.run()
if __name__ == "__main__":
curses.wrapper(main)

View File

@@ -1,6 +1,6 @@
# Using the Synapse Grafana dashboard
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
1. Have your Prometheus scrape your Synapse. https://matrix-org.github.io/synapse/latest/metrics-howto.html
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
3. Set up required recording rules. [contrib/prometheus](../prometheus)
3. Set up additional recording rules

File diff suppressed because it is too large Load Diff

View File

@@ -12,30 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import cgi
import datetime
import json
import urllib.request
from typing import List
import sqlite3
import pydot
import cgi
import json
import datetime
import argparse
import urllib2
def make_name(pdu_id: str, origin: str) -> str:
return f"{pdu_id}@{origin}"
def make_name(pdu_id, origin):
return "%s@%s" % (pdu_id, origin)
def make_graph(pdus: List[dict], filename_prefix: str) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by querying a homeserver.
"""
def make_graph(pdus, room, filename_prefix):
pdu_map = {}
node_map = {}
origins = set()
colors = {"red", "green", "blue", "yellow", "purple"}
colors = set(("red", "green", "blue", "yellow", "purple"))
for pdu in pdus:
origins.add(pdu.get("origin"))
@@ -51,8 +47,8 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None:
try:
c = colors.pop()
color_map[o] = c
except Exception:
print("Run out of colours!")
except:
print "Run out of colours!"
color_map[o] = "black"
graph = pydot.Dot(graph_name="Test")
@@ -61,9 +57,9 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None:
name = make_name(pdu.get("pdu_id"), pdu.get("origin"))
pdu_map[name] = pdu
t = datetime.datetime.fromtimestamp(float(pdu["ts"]) / 1000).strftime(
"%Y-%m-%d %H:%M:%S,%f"
)
t = datetime.datetime.fromtimestamp(
float(pdu["ts"]) / 1000
).strftime('%Y-%m-%d %H:%M:%S,%f')
label = (
"<"
@@ -83,7 +79,11 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None:
"depth": pdu.get("depth"),
}
node = pydot.Node(name=name, label=label, color=color_map[pdu.get("origin")])
node = pydot.Node(
name=name,
label=label,
color=color_map[pdu.get("origin")]
)
node_map[name] = node
graph.add_node(node)
@@ -93,7 +93,7 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None:
end_name = make_name(i, o)
if end_name not in node_map:
print("%s not in nodes" % end_name)
print "%s not in nodes" % end_name
continue
edge = pydot.Edge(node_map[start_name], node_map[end_name])
@@ -107,19 +107,20 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None:
if prev_state_name in node_map:
state_edge = pydot.Edge(
node_map[start_name], node_map[prev_state_name], style="dotted"
node_map[start_name], node_map[prev_state_name],
style='dotted'
)
graph.add_edge(state_edge)
graph.write("%s.dot" % filename_prefix, format="raw", prog="dot")
# graph.write_png("%s.png" % filename_prefix, prog='dot')
graph.write_svg("%s.svg" % filename_prefix, prog="dot")
graph.write('%s.dot' % filename_prefix, format='raw', prog='dot')
# graph.write_png("%s.png" % filename_prefix, prog='dot')
graph.write_svg("%s.svg" % filename_prefix, prog='dot')
def get_pdus(host: str, room: str) -> List[dict]:
def get_pdus(host, room):
transaction = json.loads(
urllib.request.urlopen(
f"http://{host}/_matrix/federation/v1/context/{room}/"
urllib2.urlopen(
"http://%s/_matrix/federation/v1/context/%s/" % (host, room)
).read()
)
@@ -129,14 +130,15 @@ def get_pdus(host: str, room: str) -> List[dict]:
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDU graph for a given room by talking "
"to the given homeserver to get the list of PDUs. \n"
"Requires pydot."
"to the given homeserver to get the list of PDUs. \n"
"Requires pydot."
)
parser.add_argument(
"-p", "--prefix", dest="prefix", help="String to prefix output files with"
"-p", "--prefix", dest="prefix",
help="String to prefix output files with"
)
parser.add_argument("host")
parser.add_argument("room")
parser.add_argument('host')
parser.add_argument('room')
args = parser.parse_args()
@@ -146,4 +148,4 @@ if __name__ == "__main__":
pdus = get_pdus(host, room)
make_graph(pdus, prefix)
make_graph(pdus, room, prefix)

View File

@@ -13,32 +13,22 @@
# limitations under the License.
import argparse
import datetime
import html
import json
import sqlite3
import pydot
import cgi
import json
import datetime
import argparse
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import make_event_from_dict
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by reading from a Synapse SQLite database.
"""
def make_graph(db_name, room_id, file_prefix, limit):
conn = sqlite3.connect(db_name)
sql = "SELECT room_version FROM rooms WHERE room_id = ?"
c = conn.execute(sql, (room_id,))
room_version = KNOWN_ROOM_VERSIONS[c.fetchone()[0]]
sql = (
"SELECT json, internal_metadata FROM event_json as j "
"SELECT json FROM event_json as j "
"INNER JOIN events as e ON e.event_id = j.event_id "
"WHERE j.room_id = ?"
)
@@ -46,16 +36,16 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
args = [room_id]
if limit:
sql += " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
sql += (
" ORDER BY topological_ordering DESC, stream_ordering DESC "
"LIMIT ?"
)
args.append(limit)
c = conn.execute(sql, args)
events = [
make_event_from_dict(json.loads(e[0]), room_version, json.loads(e[1]))
for e in c.fetchall()
]
events = [FrozenEvent(json.loads(e[0])) for e in c.fetchall()]
events.sort(key=lambda e: e.depth)
@@ -66,8 +56,9 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
for event in events:
c = conn.execute(
"SELECT state_group FROM event_to_state_groups WHERE event_id = ?",
(event.event_id,),
"SELECT state_group FROM event_to_state_groups "
"WHERE event_id = ?",
(event.event_id,)
)
res = c.fetchone()
@@ -78,7 +69,7 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
t = datetime.datetime.fromtimestamp(
float(event.origin_server_ts) / 1000
).strftime("%Y-%m-%d %H:%M:%S,%f")
).strftime('%Y-%m-%d %H:%M:%S,%f')
content = json.dumps(unfreeze(event.get_dict()["content"]))
@@ -96,23 +87,29 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
"name": event.event_id,
"type": event.type,
"state_key": event.get("state_key", None),
"content": html.escape(content, quote=True),
"content": cgi.escape(content, quote=True),
"time": t,
"depth": event.depth,
"state_group": state_group,
}
node = pydot.Node(name=event.event_id, label=label)
node = pydot.Node(
name=event.event_id,
label=label,
)
node_map[event.event_id] = node
graph.add_node(node)
for event in events:
for prev_id in event.prev_event_ids():
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
except:
end_node = pydot.Node(
name=prev_id,
label="<<b>%s</b>>" % (prev_id,),
)
node_map[prev_id] = end_node
graph.add_node(end_node)
@@ -124,33 +121,36 @@ def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None
if len(event_ids) <= 1:
continue
cluster = pydot.Cluster(str(group), label=f"<State Group: {str(group)}>")
cluster = pydot.Cluster(
str(group),
label="<State Group: %s>" % (str(group),)
)
for event_id in event_ids:
cluster.add_node(node_map[event_id])
graph.add_subgraph(cluster)
graph.write("%s.dot" % file_prefix, format="raw", prog="dot")
graph.write_svg("%s.svg" % file_prefix, prog="dot")
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
graph.write_svg("%s.svg" % file_prefix, prog='dot')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDU graph for a given room by talking "
"to the given Synapse SQLite file to get the list of PDUs. \n"
"Requires pydot."
"to the given homeserver to get the list of PDUs. \n"
"Requires pydot."
)
parser.add_argument(
"-p",
"--prefix",
dest="prefix",
"-p", "--prefix", dest="prefix",
help="String to prefix output files with",
default="graph_output",
default="graph_output"
)
parser.add_argument("-l", "--limit", help="Only retrieve the last N events.")
parser.add_argument("db")
parser.add_argument("room")
parser.add_argument(
"-l", "--limit",
help="Only retrieve the last N events.",
)
parser.add_argument('db')
parser.add_argument('room')
args = parser.parse_args()

View File

@@ -12,44 +12,36 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import html
import json
import pydot
import cgi
import simplejson as json
import datetime
import argparse
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import make_event_from_dict
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
from six import string_types
def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
"""
Generate a dot and SVG file for a graph of events in the room based on the
topological ordering by reading line-delimited JSON from a file.
"""
print("Reading lines")
def make_graph(file_name, room_id, file_prefix, limit):
print "Reading lines"
with open(file_name) as f:
lines = f.readlines()
print("Read lines")
print "Read lines"
# Figure out the room version, assume the first line is the create event.
room_version = KNOWN_ROOM_VERSIONS[
json.loads(lines[0]).get("content", {}).get("room_version")
]
events = [FrozenEvent(json.loads(line)) for line in lines]
events = [make_event_from_dict(json.loads(line), room_version) for line in lines]
print("Loaded events.")
print "Loaded events."
events.sort(key=lambda e: e.depth)
print("Sorted events")
print "Sorted events"
if limit:
events = events[-int(limit) :]
events = events[-int(limit):]
node_map = {}
@@ -58,32 +50,31 @@ def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
for event in events:
t = datetime.datetime.fromtimestamp(
float(event.origin_server_ts) / 1000
).strftime("%Y-%m-%d %H:%M:%S,%f")
).strftime('%Y-%m-%d %H:%M:%S,%f')
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
content = content.replace("\n", "<br/>\n")
print(content)
print content
content = []
for key, value in unfreeze(event.get_dict()["content"]).items():
if value is None:
value = "<null>"
elif isinstance(value, str):
elif isinstance(value, string_types):
pass
else:
value = json.dumps(value)
content.append(
"<b>%s</b>: %s,"
% (
html.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"),
html.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"),
"<b>%s</b>: %s," % (
cgi.escape(key, quote=True).encode("ascii", 'xmlcharrefreplace'),
cgi.escape(value, quote=True).encode("ascii", 'xmlcharrefreplace'),
)
)
content = "<br/>\n".join(content)
print(content)
print content
label = (
"<"
@@ -103,19 +94,25 @@ def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
"depth": event.depth,
}
node = pydot.Node(name=event.event_id, label=label)
node = pydot.Node(
name=event.event_id,
label=label,
)
node_map[event.event_id] = node
graph.add_node(node)
print("Created Nodes")
print "Created Nodes"
for event in events:
for prev_id in event.prev_event_ids():
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except Exception:
end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
except:
end_node = pydot.Node(
name=prev_id,
label="<<b>%s</b>>" % (prev_id,),
)
node_map[prev_id] = end_node
graph.add_node(end_node)
@@ -123,33 +120,34 @@ def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
edge = pydot.Edge(node_map[event.event_id], end_node)
graph.add_edge(edge)
print("Created edges")
print "Created edges"
graph.write("%s.dot" % file_prefix, format="raw", prog="dot")
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
print("Created Dot")
print "Created Dot"
graph.write_svg("%s.svg" % file_prefix, prog="dot")
print("Created svg")
graph.write_svg("%s.svg" % file_prefix, prog='dot')
print "Created svg"
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDU graph for a given room by reading "
"from a file with line deliminated events. \n"
"Requires pydot."
"from a file with line deliminated events. \n"
"Requires pydot."
)
parser.add_argument(
"-p",
"--prefix",
dest="prefix",
"-p", "--prefix", dest="prefix",
help="String to prefix output files with",
default="graph_output",
default="graph_output"
)
parser.add_argument("-l", "--limit", help="Only retrieve the last N events.")
parser.add_argument("event_file")
parser.add_argument(
"-l", "--limit",
help="Only retrieve the last N events.",
)
parser.add_argument('event_file')
parser.add_argument('room')
args = parser.parse_args()
make_graph(args.event_file, args.prefix, args.limit)
make_graph(args.event_file, args.room, args.prefix, args.limit)

View File

@@ -0,0 +1,260 @@
#!/usr/bin/env python
"""
This is an attempt at bridging matrix clients into a Jitis meet room via Matrix
video call. It uses hard-coded xml strings overg XMPP BOSH. It can display one
of the streams from the Jitsi bridge until the second lot of SDP comes down and
we set the remote SDP at which point the stream ends. Our video never gets to
the bridge.
Requires:
npm install jquery jsdom
"""
import gevent
import grequests
from BeautifulSoup import BeautifulSoup
import json
import urllib
import subprocess
import time
#ACCESS_TOKEN="" #
MATRIXBASE = 'https://matrix.org/_matrix/client/api/v1/'
MYUSERNAME = '@davetest:matrix.org'
HTTPBIND = 'https://meet.jit.si/http-bind'
#HTTPBIND = 'https://jitsi.vuc.me/http-bind'
#ROOMNAME = "matrix"
ROOMNAME = "pibble"
HOST="guest.jit.si"
#HOST="jitsi.vuc.me"
TURNSERVER="turn.guest.jit.si"
#TURNSERVER="turn.jitsi.vuc.me"
ROOMDOMAIN="meet.jit.si"
#ROOMDOMAIN="conference.jitsi.vuc.me"
class TrivialMatrixClient:
def __init__(self, access_token):
self.token = None
self.access_token = access_token
def getEvent(self):
while True:
url = MATRIXBASE+'events?access_token='+self.access_token+"&timeout=60000"
if self.token:
url += "&from="+self.token
req = grequests.get(url)
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print "incoming from matrix",obj
if 'end' not in obj:
continue
self.token = obj['end']
if len(obj['chunk']):
return obj['chunk'][0]
def joinRoom(self, roomId):
url = MATRIXBASE+'rooms/'+roomId+'/join?access_token='+self.access_token
print url
headers={ 'Content-Type': 'application/json' }
req = grequests.post(url, headers=headers, data='{}')
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print "response: ",obj
def sendEvent(self, roomId, evType, event):
url = MATRIXBASE+'rooms/'+roomId+'/send/'+evType+'?access_token='+self.access_token
print url
print json.dumps(event)
headers={ 'Content-Type': 'application/json' }
req = grequests.post(url, headers=headers, data=json.dumps(event))
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print "response: ",obj
xmppClients = {}
def matrixLoop():
while True:
ev = matrixCli.getEvent()
print ev
if ev['type'] == 'm.room.member':
print 'membership event'
if ev['membership'] == 'invite' and ev['state_key'] == MYUSERNAME:
roomId = ev['room_id']
print "joining room %s" % (roomId)
matrixCli.joinRoom(roomId)
elif ev['type'] == 'm.room.message':
if ev['room_id'] in xmppClients:
print "already have a bridge for that user, ignoring"
continue
print "got message, connecting"
xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
elif ev['type'] == 'm.call.invite':
print "Incoming call"
#sdp = ev['content']['offer']['sdp']
#print "sdp: %s" % (sdp)
#xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
#gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
elif ev['type'] == 'm.call.answer':
print "Call answered"
sdp = ev['content']['answer']['sdp']
if ev['room_id'] not in xmppClients:
print "We didn't have a call for that room"
continue
# should probably check call ID too
xmppCli = xmppClients[ev['room_id']]
xmppCli.sendAnswer(sdp)
elif ev['type'] == 'm.call.hangup':
if ev['room_id'] in xmppClients:
xmppClients[ev['room_id']].stop()
del xmppClients[ev['room_id']]
class TrivialXmppClient:
def __init__(self, matrixRoom, userId):
self.rid = 0
self.matrixRoom = matrixRoom
self.userId = userId
self.running = True
def stop(self):
self.running = False
def nextRid(self):
self.rid += 1
return '%d' % (self.rid)
def sendIq(self, xml):
fullXml = "<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s'>%s</body>" % (self.nextRid(), self.sid, xml)
#print "\t>>>%s" % (fullXml)
return self.xmppPoke(fullXml)
def xmppPoke(self, xml):
headers = {'Content-Type': 'application/xml'}
req = grequests.post(HTTPBIND, verify=False, headers=headers, data=xml)
resps = grequests.map([req])
obj = BeautifulSoup(resps[0].content)
return obj
def sendAnswer(self, answer):
print "sdp from matrix client",answer
p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--sdp'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
jingle, out_err = p.communicate(answer)
jingle = jingle % {
'tojid': self.callfrom,
'action': 'session-accept',
'initiator': self.callfrom,
'responder': self.jid,
'sid': self.callsid
}
print "answer jingle from sdp",jingle
res = self.sendIq(jingle)
print "reply from answer: ",res
self.ssrcs = {}
jingleSoup = BeautifulSoup(jingle)
for cont in jingleSoup.iq.jingle.findAll('content'):
if cont.description:
self.ssrcs[cont['name']] = cont.description['ssrc']
print "my ssrcs:",self.ssrcs
gevent.joinall([
gevent.spawn(self.advertiseSsrcs)
])
def advertiseSsrcs(self):
time.sleep(7)
print "SSRC spammer started"
while self.running:
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % { 'tojid': "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid), 'nick': self.userId, 'assrc': self.ssrcs['audio'], 'vssrc': self.ssrcs['video'] }
res = self.sendIq(ssrcMsg)
print "reply from ssrc announce: ",res
time.sleep(10)
def xmppLoop(self):
self.matrixCallId = time.time()
res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' to='%s' xml:lang='en' wait='60' hold='1' content='text/xml; charset=utf-8' ver='1.6' xmpp:version='1.0' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), HOST))
print res
self.sid = res.body['sid']
print "sid %s" % (self.sid)
res = self.sendIq("<auth xmlns='urn:ietf:params:xml:ns:xmpp-sasl' mechanism='ANONYMOUS'/>")
res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s' to='%s' xml:lang='en' xmpp:restart='true' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), self.sid, HOST))
res = self.sendIq("<iq type='set' id='_bind_auth_2' xmlns='jabber:client'><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'/></iq>")
print res
self.jid = res.body.iq.bind.jid.string
print "jid: %s" % (self.jid)
self.shortJid = self.jid.split('-')[0]
res = self.sendIq("<iq type='set' id='_session_auth_2' xmlns='jabber:client'><session xmlns='urn:ietf:params:xml:ns:xmpp-session'/></iq>")
#randomthing = res.body.iq['to']
#whatsitpart = randomthing.split('-')[0]
#print "other random bind thing: %s" % (randomthing)
# advertise preence to the jitsi room, with our nick
res = self.sendIq("<iq type='get' to='%s' xmlns='jabber:client' id='1:sendIQ'><services xmlns='urn:xmpp:extdisco:1'><service host='%s'/></services></iq><presence to='%s@%s/d98f6c40' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%s</nick></presence>" % (HOST, TURNSERVER, ROOMNAME, ROOMDOMAIN, self.userId))
self.muc = {'users': []}
for p in res.body.findAll('presence'):
u = {}
u['shortJid'] = p['from'].split('/')[1]
if p.c and p.c.nick:
u['nick'] = p.c.nick.string
self.muc['users'].append(u)
print "muc: ",self.muc
# wait for stuff
while True:
print "waiting..."
res = self.sendIq("")
print "got from stream: ",res
if res.body.iq:
jingles = res.body.iq.findAll('jingle')
if len(jingles):
self.callfrom = res.body.iq['from']
self.handleInvite(jingles[0])
elif 'type' in res.body and res.body['type'] == 'terminate':
self.running = False
del xmppClients[self.matrixRoom]
return
def handleInvite(self, jingle):
self.initiator = jingle['initiator']
self.callsid = jingle['sid']
p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--jingle'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
print "raw jingle invite",str(jingle)
sdp, out_err = p.communicate(str(jingle))
print "transformed remote offer sdp",sdp
inviteEvent = {
'offer': {
'type': 'offer',
'sdp': sdp
},
'call_id': self.matrixCallId,
'version': 0,
'lifetime': 30000
}
matrixCli.sendEvent(self.matrixRoom, 'm.call.invite', inviteEvent)
matrixCli = TrivialMatrixClient(ACCESS_TOKEN)
gevent.joinall([
gevent.spawn(matrixLoop)
])

View File

@@ -0,0 +1,188 @@
diff --git a/syweb/webclient/app/components/matrix/matrix-call.js b/syweb/webclient/app/components/matrix/matrix-call.js
index 9fbfff0..dc68077 100644
--- a/syweb/webclient/app/components/matrix/matrix-call.js
+++ b/syweb/webclient/app/components/matrix/matrix-call.js
@@ -16,6 +16,45 @@ limitations under the License.
'use strict';
+
+function sendKeyframe(pc) {
+ console.log('sendkeyframe', pc.iceConnectionState);
+ if (pc.iceConnectionState !== 'connected') return; // safe...
+ pc.setRemoteDescription(
+ pc.remoteDescription,
+ function () {
+ pc.createAnswer(
+ function (modifiedAnswer) {
+ pc.setLocalDescription(
+ modifiedAnswer,
+ function () {
+ // noop
+ },
+ function (error) {
+ console.log('triggerKeyframe setLocalDescription failed', error);
+ messageHandler.showError();
+ }
+ );
+ },
+ function (error) {
+ console.log('triggerKeyframe createAnswer failed', error);
+ messageHandler.showError();
+ }
+ );
+ },
+ function (error) {
+ console.log('triggerKeyframe setRemoteDescription failed', error);
+ messageHandler.showError();
+ }
+ );
+}
+
+
+
+
+
+
+
var forAllVideoTracksOnStream = function(s, f) {
var tracks = s.getVideoTracks();
for (var i = 0; i < tracks.length; i++) {
@@ -83,7 +122,7 @@ angular.module('MatrixCall', [])
}
// FIXME: we should prevent any calls from being placed or accepted before this has finished
- MatrixCall.getTurnServer();
+ //MatrixCall.getTurnServer();
MatrixCall.CALL_TIMEOUT = 60000;
MatrixCall.FALLBACK_STUN_SERVER = 'stun:stun.l.google.com:19302';
@@ -132,6 +171,22 @@ angular.module('MatrixCall', [])
pc.onsignalingstatechange = function() { self.onSignallingStateChanged(); };
pc.onicecandidate = function(c) { self.gotLocalIceCandidate(c); };
pc.onaddstream = function(s) { self.onAddStream(s); };
+
+ var datachan = pc.createDataChannel('RTCDataChannel', {
+ reliable: false
+ });
+ console.log("data chan: "+datachan);
+ datachan.onopen = function() {
+ console.log("data channel open");
+ };
+ datachan.onmessage = function() {
+ console.log("data channel message");
+ };
+ pc.ondatachannel = function(event) {
+ console.log("have data channel");
+ event.channel.binaryType = 'blob';
+ };
+
return pc;
}
@@ -200,6 +255,12 @@ angular.module('MatrixCall', [])
}, this.msg.lifetime - event.age);
};
+ MatrixCall.prototype.receivedInvite = function(event) {
+ console.log("Got second invite for call "+this.call_id);
+ this.peerConn.setRemoteDescription(new RTCSessionDescription(this.msg.offer), this.onSetRemoteDescriptionSuccess, this.onSetRemoteDescriptionError);
+ };
+
+
// perverse as it may seem, sometimes we want to instantiate a call with a hangup message
// (because when getting the state of the room on load, events come in reverse order and
// we want to remember that a call has been hung up)
@@ -349,7 +410,7 @@ angular.module('MatrixCall', [])
'mandatory': {
'OfferToReceiveAudio': true,
'OfferToReceiveVideo': this.type == 'video'
- },
+ }
};
this.peerConn.createAnswer(function(d) { self.createdAnswer(d); }, function(e) {}, constraints);
// This can't be in an apply() because it's called by a predecessor call under glare conditions :(
@@ -359,8 +420,20 @@ angular.module('MatrixCall', [])
MatrixCall.prototype.gotLocalIceCandidate = function(event) {
if (event.candidate) {
console.log("Got local ICE "+event.candidate.sdpMid+" candidate: "+event.candidate.candidate);
- this.sendCandidate(event.candidate);
- }
+ //this.sendCandidate(event.candidate);
+ } else {
+ console.log("have all candidates, sending answer");
+ var content = {
+ version: 0,
+ call_id: this.call_id,
+ answer: this.peerConn.localDescription
+ };
+ this.sendEventWithRetry('m.call.answer', content);
+ var self = this;
+ $rootScope.$apply(function() {
+ self.state = 'connecting';
+ });
+ }
}
MatrixCall.prototype.gotRemoteIceCandidate = function(cand) {
@@ -418,15 +491,6 @@ angular.module('MatrixCall', [])
console.log("Created answer: "+description);
var self = this;
this.peerConn.setLocalDescription(description, function() {
- var content = {
- version: 0,
- call_id: self.call_id,
- answer: self.peerConn.localDescription
- };
- self.sendEventWithRetry('m.call.answer', content);
- $rootScope.$apply(function() {
- self.state = 'connecting';
- });
}, function() { console.log("Error setting local description!"); } );
};
@@ -448,6 +512,9 @@ angular.module('MatrixCall', [])
$rootScope.$apply(function() {
self.state = 'connected';
self.didConnect = true;
+ /*$timeout(function() {
+ sendKeyframe(self.peerConn);
+ }, 1000);*/
});
} else if (this.peerConn.iceConnectionState == 'failed') {
this.hangup('ice_failed');
@@ -518,6 +585,7 @@ angular.module('MatrixCall', [])
MatrixCall.prototype.onRemoteStreamEnded = function(event) {
console.log("Remote stream ended");
+ return;
var self = this;
$rootScope.$apply(function() {
self.state = 'ended';
diff --git a/syweb/webclient/app/components/matrix/matrix-phone-service.js b/syweb/webclient/app/components/matrix/matrix-phone-service.js
index 55dbbf5..272fa27 100644
--- a/syweb/webclient/app/components/matrix/matrix-phone-service.js
+++ b/syweb/webclient/app/components/matrix/matrix-phone-service.js
@@ -48,6 +48,13 @@ angular.module('matrixPhoneService', [])
return;
}
+ // do we already have an entry for this call ID?
+ var existingEntry = matrixPhoneService.allCalls[msg.call_id];
+ if (existingEntry) {
+ existingEntry.receivedInvite(msg);
+ return;
+ }
+
var call = undefined;
if (!isLive) {
// if this event wasn't live then this call may already be over
@@ -108,7 +115,7 @@ angular.module('matrixPhoneService', [])
call.hangup();
}
} else {
- $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
+ $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
}
} else if (event.type == 'm.call.answer') {
var call = matrixPhoneService.allCalls[msg.call_id];

View File

@@ -0,0 +1,712 @@
/* jshint -W117 */
// SDP STUFF
function SDP(sdp) {
this.media = sdp.split('\r\nm=');
for (var i = 1; i < this.media.length; i++) {
this.media[i] = 'm=' + this.media[i];
if (i != this.media.length - 1) {
this.media[i] += '\r\n';
}
}
this.session = this.media.shift() + '\r\n';
this.raw = this.session + this.media.join('');
}
exports.SDP = SDP;
var jsdom = require("jsdom");
var window = jsdom.jsdom().parentWindow;
var $ = require('jquery')(window);
var SDPUtil = require('./strophe.jingle.sdp.util.js').SDPUtil;
/**
* Returns map of MediaChannel mapped per channel idx.
*/
SDP.prototype.getMediaSsrcMap = function() {
var self = this;
var media_ssrcs = {};
for (channelNum = 0; channelNum < self.media.length; channelNum++) {
modified = true;
tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc:');
var type = SDPUtil.parse_mid(SDPUtil.find_line(self.media[channelNum], 'a=mid:'));
var channel = new MediaChannel(channelNum, type);
media_ssrcs[channelNum] = channel;
tmp.forEach(function (line) {
var linessrc = line.substring(7).split(' ')[0];
// allocate new ChannelSsrc
if(!channel.ssrcs[linessrc]) {
channel.ssrcs[linessrc] = new ChannelSsrc(linessrc, type);
}
channel.ssrcs[linessrc].lines.push(line);
});
tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc-group:');
tmp.forEach(function(line){
var semantics = line.substr(0, idx).substr(13);
var ssrcs = line.substr(14 + semantics.length).split(' ');
if (ssrcs.length != 0) {
var ssrcGroup = new ChannelSsrcGroup(semantics, ssrcs);
channel.ssrcGroups.push(ssrcGroup);
}
});
}
return media_ssrcs;
};
/**
* Returns <tt>true</tt> if this SDP contains given SSRC.
* @param ssrc the ssrc to check.
* @returns {boolean} <tt>true</tt> if this SDP contains given SSRC.
*/
SDP.prototype.containsSSRC = function(ssrc) {
var channels = this.getMediaSsrcMap();
var contains = false;
Object.keys(channels).forEach(function(chNumber){
var channel = channels[chNumber];
//console.log("Check", channel, ssrc);
if(Object.keys(channel.ssrcs).indexOf(ssrc) != -1){
contains = true;
}
});
return contains;
};
/**
* Returns map of MediaChannel that contains only media not contained in <tt>otherSdp</tt>. Mapped by channel idx.
* @param otherSdp the other SDP to check ssrc with.
*/
SDP.prototype.getNewMedia = function(otherSdp) {
// this could be useful in Array.prototype.
function arrayEquals(array) {
// if the other array is a falsy value, return
if (!array)
return false;
// compare lengths - can save a lot of time
if (this.length != array.length)
return false;
for (var i = 0, l=this.length; i < l; i++) {
// Check if we have nested arrays
if (this[i] instanceof Array && array[i] instanceof Array) {
// recurse into the nested arrays
if (!this[i].equals(array[i]))
return false;
}
else if (this[i] != array[i]) {
// Warning - two different object instances will never be equal: {x:20} != {x:20}
return false;
}
}
return true;
}
var myMedia = this.getMediaSsrcMap();
var othersMedia = otherSdp.getMediaSsrcMap();
var newMedia = {};
Object.keys(othersMedia).forEach(function(channelNum) {
var myChannel = myMedia[channelNum];
var othersChannel = othersMedia[channelNum];
if(!myChannel && othersChannel) {
// Add whole channel
newMedia[channelNum] = othersChannel;
return;
}
// Look for new ssrcs accross the channel
Object.keys(othersChannel.ssrcs).forEach(function(ssrc) {
if(Object.keys(myChannel.ssrcs).indexOf(ssrc) === -1) {
// Allocate channel if we've found ssrc that doesn't exist in our channel
if(!newMedia[channelNum]){
newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
}
newMedia[channelNum].ssrcs[ssrc] = othersChannel.ssrcs[ssrc];
}
});
// Look for new ssrc groups across the channels
othersChannel.ssrcGroups.forEach(function(otherSsrcGroup){
// try to match the other ssrc-group with an ssrc-group of ours
var matched = false;
for (var i = 0; i < myChannel.ssrcGroups.length; i++) {
var mySsrcGroup = myChannel.ssrcGroups[i];
if (otherSsrcGroup.semantics == mySsrcGroup.semantics
&& arrayEquals.apply(otherSsrcGroup.ssrcs, [mySsrcGroup.ssrcs])) {
matched = true;
break;
}
}
if (!matched) {
// Allocate channel if we've found an ssrc-group that doesn't
// exist in our channel
if(!newMedia[channelNum]){
newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
}
newMedia[channelNum].ssrcGroups.push(otherSsrcGroup);
}
});
});
return newMedia;
};
// remove iSAC and CN from SDP
SDP.prototype.mangle = function () {
var i, j, mline, lines, rtpmap, newdesc;
for (i = 0; i < this.media.length; i++) {
lines = this.media[i].split('\r\n');
lines.pop(); // remove empty last element
mline = SDPUtil.parse_mline(lines.shift());
if (mline.media != 'audio')
continue;
newdesc = '';
mline.fmt.length = 0;
for (j = 0; j < lines.length; j++) {
if (lines[j].substr(0, 9) == 'a=rtpmap:') {
rtpmap = SDPUtil.parse_rtpmap(lines[j]);
if (rtpmap.name == 'CN' || rtpmap.name == 'ISAC')
continue;
mline.fmt.push(rtpmap.id);
newdesc += lines[j] + '\r\n';
} else {
newdesc += lines[j] + '\r\n';
}
}
this.media[i] = SDPUtil.build_mline(mline) + '\r\n';
this.media[i] += newdesc;
}
this.raw = this.session + this.media.join('');
};
// remove lines matching prefix from session section
SDP.prototype.removeSessionLines = function(prefix) {
var self = this;
var lines = SDPUtil.find_lines(this.session, prefix);
lines.forEach(function(line) {
self.session = self.session.replace(line + '\r\n', '');
});
this.raw = this.session + this.media.join('');
return lines;
}
// remove lines matching prefix from a media section specified by mediaindex
// TODO: non-numeric mediaindex could match mid
SDP.prototype.removeMediaLines = function(mediaindex, prefix) {
var self = this;
var lines = SDPUtil.find_lines(this.media[mediaindex], prefix);
lines.forEach(function(line) {
self.media[mediaindex] = self.media[mediaindex].replace(line + '\r\n', '');
});
this.raw = this.session + this.media.join('');
return lines;
}
// add content's to a jingle element
SDP.prototype.toJingle = function (elem, thecreator) {
var i, j, k, mline, ssrc, rtpmap, tmp, line, lines;
var self = this;
// new bundle plan
if (SDPUtil.find_line(this.session, 'a=group:')) {
lines = SDPUtil.find_lines(this.session, 'a=group:');
for (i = 0; i < lines.length; i++) {
tmp = lines[i].split(' ');
var semantics = tmp.shift().substr(8);
elem.c('group', {xmlns: 'urn:xmpp:jingle:apps:grouping:0', semantics:semantics});
for (j = 0; j < tmp.length; j++) {
elem.c('content', {name: tmp[j]}).up();
}
elem.up();
}
}
// old bundle plan, to be removed
var bundle = [];
if (SDPUtil.find_line(this.session, 'a=group:BUNDLE')) {
bundle = SDPUtil.find_line(this.session, 'a=group:BUNDLE ').split(' ');
bundle.shift();
}
for (i = 0; i < this.media.length; i++) {
mline = SDPUtil.parse_mline(this.media[i].split('\r\n')[0]);
if (!(mline.media === 'audio' ||
mline.media === 'video' ||
mline.media === 'application'))
{
continue;
}
if (SDPUtil.find_line(this.media[i], 'a=ssrc:')) {
ssrc = SDPUtil.find_line(this.media[i], 'a=ssrc:').substring(7).split(' ')[0]; // take the first
} else {
ssrc = false;
}
elem.c('content', {creator: thecreator, name: mline.media});
if (SDPUtil.find_line(this.media[i], 'a=mid:')) {
// prefer identifier from a=mid if present
var mid = SDPUtil.parse_mid(SDPUtil.find_line(this.media[i], 'a=mid:'));
elem.attrs({ name: mid });
// old BUNDLE plan, to be removed
if (bundle.indexOf(mid) !== -1) {
elem.c('bundle', {xmlns: 'http://estos.de/ns/bundle'}).up();
bundle.splice(bundle.indexOf(mid), 1);
}
}
if (SDPUtil.find_line(this.media[i], 'a=rtpmap:').length)
{
elem.c('description',
{xmlns: 'urn:xmpp:jingle:apps:rtp:1',
media: mline.media });
if (ssrc) {
elem.attrs({ssrc: ssrc});
}
for (j = 0; j < mline.fmt.length; j++) {
rtpmap = SDPUtil.find_line(this.media[i], 'a=rtpmap:' + mline.fmt[j]);
elem.c('payload-type', SDPUtil.parse_rtpmap(rtpmap));
// put any 'a=fmtp:' + mline.fmt[j] lines into <param name=foo value=bar/>
if (SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j])) {
tmp = SDPUtil.parse_fmtp(SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j]));
for (k = 0; k < tmp.length; k++) {
elem.c('parameter', tmp[k]).up();
}
}
this.RtcpFbToJingle(i, elem, mline.fmt[j]); // XEP-0293 -- map a=rtcp-fb
elem.up();
}
if (SDPUtil.find_line(this.media[i], 'a=crypto:', this.session)) {
elem.c('encryption', {required: 1});
var crypto = SDPUtil.find_lines(this.media[i], 'a=crypto:', this.session);
crypto.forEach(function(line) {
elem.c('crypto', SDPUtil.parse_crypto(line)).up();
});
elem.up(); // end of encryption
}
if (ssrc) {
// new style mapping
elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
// FIXME: group by ssrc and support multiple different ssrcs
var ssrclines = SDPUtil.find_lines(this.media[i], 'a=ssrc:');
ssrclines.forEach(function(line) {
idx = line.indexOf(' ');
var linessrc = line.substr(0, idx).substr(7);
if (linessrc != ssrc) {
elem.up();
ssrc = linessrc;
elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
}
var kv = line.substr(idx + 1);
elem.c('parameter');
if (kv.indexOf(':') == -1) {
elem.attrs({ name: kv });
} else {
elem.attrs({ name: kv.split(':', 2)[0] });
elem.attrs({ value: kv.split(':', 2)[1] });
}
elem.up();
});
elem.up();
// old proprietary mapping, to be removed at some point
tmp = SDPUtil.parse_ssrc(this.media[i]);
tmp.xmlns = 'http://estos.de/ns/ssrc';
tmp.ssrc = ssrc;
elem.c('ssrc', tmp).up(); // ssrc is part of description
// XEP-0339 handle ssrc-group attributes
var ssrc_group_lines = SDPUtil.find_lines(this.media[i], 'a=ssrc-group:');
ssrc_group_lines.forEach(function(line) {
idx = line.indexOf(' ');
var semantics = line.substr(0, idx).substr(13);
var ssrcs = line.substr(14 + semantics.length).split(' ');
if (ssrcs.length != 0) {
elem.c('ssrc-group', { semantics: semantics, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
ssrcs.forEach(function(ssrc) {
elem.c('source', { ssrc: ssrc })
.up();
});
elem.up();
}
});
}
if (SDPUtil.find_line(this.media[i], 'a=rtcp-mux')) {
elem.c('rtcp-mux').up();
}
// XEP-0293 -- map a=rtcp-fb:*
this.RtcpFbToJingle(i, elem, '*');
// XEP-0294
if (SDPUtil.find_line(this.media[i], 'a=extmap:')) {
lines = SDPUtil.find_lines(this.media[i], 'a=extmap:');
for (j = 0; j < lines.length; j++) {
tmp = SDPUtil.parse_extmap(lines[j]);
elem.c('rtp-hdrext', { xmlns: 'urn:xmpp:jingle:apps:rtp:rtp-hdrext:0',
uri: tmp.uri,
id: tmp.value });
if (tmp.hasOwnProperty('direction')) {
switch (tmp.direction) {
case 'sendonly':
elem.attrs({senders: 'responder'});
break;
case 'recvonly':
elem.attrs({senders: 'initiator'});
break;
case 'sendrecv':
elem.attrs({senders: 'both'});
break;
case 'inactive':
elem.attrs({senders: 'none'});
break;
}
}
// TODO: handle params
elem.up();
}
}
elem.up(); // end of description
}
// map ice-ufrag/pwd, dtls fingerprint, candidates
this.TransportToJingle(i, elem);
if (SDPUtil.find_line(this.media[i], 'a=sendrecv', this.session)) {
elem.attrs({senders: 'both'});
} else if (SDPUtil.find_line(this.media[i], 'a=sendonly', this.session)) {
elem.attrs({senders: 'initiator'});
} else if (SDPUtil.find_line(this.media[i], 'a=recvonly', this.session)) {
elem.attrs({senders: 'responder'});
} else if (SDPUtil.find_line(this.media[i], 'a=inactive', this.session)) {
elem.attrs({senders: 'none'});
}
if (mline.port == '0') {
// estos hack to reject an m-line
elem.attrs({senders: 'rejected'});
}
elem.up(); // end of content
}
elem.up();
return elem;
};
SDP.prototype.TransportToJingle = function (mediaindex, elem) {
var i = mediaindex;
var tmp;
var self = this;
elem.c('transport');
// XEP-0343 DTLS/SCTP
if (SDPUtil.find_line(this.media[mediaindex], 'a=sctpmap:').length)
{
var sctpmap = SDPUtil.find_line(
this.media[i], 'a=sctpmap:', self.session);
if (sctpmap)
{
var sctpAttrs = SDPUtil.parse_sctpmap(sctpmap);
elem.c('sctpmap',
{
xmlns: 'urn:xmpp:jingle:transports:dtls-sctp:1',
number: sctpAttrs[0], /* SCTP port */
protocol: sctpAttrs[1], /* protocol */
});
// Optional stream count attribute
if (sctpAttrs.length > 2)
elem.attrs({ streams: sctpAttrs[2]});
elem.up();
}
}
// XEP-0320
var fingerprints = SDPUtil.find_lines(this.media[mediaindex], 'a=fingerprint:', this.session);
fingerprints.forEach(function(line) {
tmp = SDPUtil.parse_fingerprint(line);
tmp.xmlns = 'urn:xmpp:jingle:apps:dtls:0';
elem.c('fingerprint').t(tmp.fingerprint);
delete tmp.fingerprint;
line = SDPUtil.find_line(self.media[mediaindex], 'a=setup:', self.session);
if (line) {
tmp.setup = line.substr(8);
}
elem.attrs(tmp);
elem.up(); // end of fingerprint
});
tmp = SDPUtil.iceparams(this.media[mediaindex], this.session);
if (tmp) {
tmp.xmlns = 'urn:xmpp:jingle:transports:ice-udp:1';
elem.attrs(tmp);
// XEP-0176
if (SDPUtil.find_line(this.media[mediaindex], 'a=candidate:', this.session)) { // add any a=candidate lines
var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=candidate:', this.session);
lines.forEach(function (line) {
elem.c('candidate', SDPUtil.candidateToJingle(line)).up();
});
}
}
elem.up(); // end of transport
}
SDP.prototype.RtcpFbToJingle = function (mediaindex, elem, payloadtype) { // XEP-0293
var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=rtcp-fb:' + payloadtype);
lines.forEach(function (line) {
var tmp = SDPUtil.parse_rtcpfb(line);
if (tmp.type == 'trr-int') {
elem.c('rtcp-fb-trr-int', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', value: tmp.params[0]});
elem.up();
} else {
elem.c('rtcp-fb', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', type: tmp.type});
if (tmp.params.length > 0) {
elem.attrs({'subtype': tmp.params[0]});
}
elem.up();
}
});
};
SDP.prototype.RtcpFbFromJingle = function (elem, payloadtype) { // XEP-0293
var media = '';
var tmp = elem.find('>rtcp-fb-trr-int[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
if (tmp.length) {
media += 'a=rtcp-fb:' + '*' + ' ' + 'trr-int' + ' ';
if (tmp.attr('value')) {
media += tmp.attr('value');
} else {
media += '0';
}
media += '\r\n';
}
tmp = elem.find('>rtcp-fb[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
tmp.each(function () {
media += 'a=rtcp-fb:' + payloadtype + ' ' + $(this).attr('type');
if ($(this).attr('subtype')) {
media += ' ' + $(this).attr('subtype');
}
media += '\r\n';
});
return media;
};
// construct an SDP from a jingle stanza
SDP.prototype.fromJingle = function (jingle) {
var self = this;
this.raw = 'v=0\r\n' +
'o=- ' + '1923518516' + ' 2 IN IP4 0.0.0.0\r\n' +// FIXME
's=-\r\n' +
't=0 0\r\n';
// http://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-04#section-8
if ($(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').length) {
$(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').each(function (idx, group) {
var contents = $(group).find('>content').map(function (idx, content) {
return content.getAttribute('name');
}).get();
if (contents.length > 0) {
self.raw += 'a=group:' + (group.getAttribute('semantics') || group.getAttribute('type')) + ' ' + contents.join(' ') + '\r\n';
}
});
} else if ($(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').length) {
// temporary namespace, not to be used. to be removed soon.
$(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').each(function (idx, group) {
var contents = $(group).find('>content').map(function (idx, content) {
return content.getAttribute('name');
}).get();
if (group.getAttribute('type') !== null && contents.length > 0) {
self.raw += 'a=group:' + group.getAttribute('type') + ' ' + contents.join(' ') + '\r\n';
}
});
} else {
// for backward compability, to be removed soon
// assume all contents are in the same bundle group, can be improved upon later
var bundle = $(jingle).find('>content').filter(function (idx, content) {
//elem.c('bundle', {xmlns:'http://estos.de/ns/bundle'});
return $(content).find('>bundle').length > 0;
}).map(function (idx, content) {
return content.getAttribute('name');
}).get();
if (bundle.length) {
this.raw += 'a=group:BUNDLE ' + bundle.join(' ') + '\r\n';
}
}
this.session = this.raw;
jingle.find('>content').each(function () {
var m = self.jingle2media($(this));
self.media.push(m);
});
// reconstruct msid-semantic -- apparently not necessary
/*
var msid = SDPUtil.parse_ssrc(this.raw);
if (msid.hasOwnProperty('mslabel')) {
this.session += "a=msid-semantic: WMS " + msid.mslabel + "\r\n";
}
*/
this.raw = this.session + this.media.join('');
};
// translate a jingle content element into an an SDP media part
SDP.prototype.jingle2media = function (content) {
var media = '',
desc = content.find('description'),
ssrc = desc.attr('ssrc'),
self = this,
tmp;
var sctp = content.find(
'>transport>sctpmap[xmlns="urn:xmpp:jingle:transports:dtls-sctp:1"]');
tmp = { media: desc.attr('media') };
tmp.port = '1';
if (content.attr('senders') == 'rejected') {
// estos hack to reject an m-line.
tmp.port = '0';
}
if (content.find('>transport>fingerprint').length || desc.find('encryption').length) {
if (sctp.length)
tmp.proto = 'DTLS/SCTP';
else
tmp.proto = 'RTP/SAVPF';
} else {
tmp.proto = 'RTP/AVPF';
}
if (!sctp.length)
{
tmp.fmt = desc.find('payload-type').map(
function () { return this.getAttribute('id'); }).get();
media += SDPUtil.build_mline(tmp) + '\r\n';
}
else
{
media += 'm=application 1 DTLS/SCTP ' + sctp.attr('number') + '\r\n';
media += 'a=sctpmap:' + sctp.attr('number') +
' ' + sctp.attr('protocol');
var streamCount = sctp.attr('streams');
if (streamCount)
media += ' ' + streamCount + '\r\n';
else
media += '\r\n';
}
media += 'c=IN IP4 0.0.0.0\r\n';
if (!sctp.length)
media += 'a=rtcp:1 IN IP4 0.0.0.0\r\n';
//tmp = content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
tmp = content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
//console.log('transports: '+content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
//console.log('bundle.transports: '+content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
//console.log("tmp fingerprint: "+tmp.find('>fingerprint').innerHTML);
if (tmp.length) {
if (tmp.attr('ufrag')) {
media += SDPUtil.build_iceufrag(tmp.attr('ufrag')) + '\r\n';
}
if (tmp.attr('pwd')) {
media += SDPUtil.build_icepwd(tmp.attr('pwd')) + '\r\n';
}
tmp.find('>fingerprint').each(function () {
// FIXME: check namespace at some point
media += 'a=fingerprint:' + this.getAttribute('hash');
media += ' ' + $(this).text();
media += '\r\n';
//console.log("mline "+media);
if (this.getAttribute('setup')) {
media += 'a=setup:' + this.getAttribute('setup') + '\r\n';
}
});
}
switch (content.attr('senders')) {
case 'initiator':
media += 'a=sendonly\r\n';
break;
case 'responder':
media += 'a=recvonly\r\n';
break;
case 'none':
media += 'a=inactive\r\n';
break;
case 'both':
media += 'a=sendrecv\r\n';
break;
}
media += 'a=mid:' + content.attr('name') + '\r\n';
/*if (content.attr('name') == 'video') {
media += 'a=x-google-flag:conference' + '\r\n';
}*/
// <description><rtcp-mux/></description>
// see http://code.google.com/p/libjingle/issues/detail?id=309 -- no spec though
// and http://mail.jabber.org/pipermail/jingle/2011-December/001761.html
if (desc.find('rtcp-mux').length) {
media += 'a=rtcp-mux\r\n';
}
if (desc.find('encryption').length) {
desc.find('encryption>crypto').each(function () {
media += 'a=crypto:' + this.getAttribute('tag');
media += ' ' + this.getAttribute('crypto-suite');
media += ' ' + this.getAttribute('key-params');
if (this.getAttribute('session-params')) {
media += ' ' + this.getAttribute('session-params');
}
media += '\r\n';
});
}
desc.find('payload-type').each(function () {
media += SDPUtil.build_rtpmap(this) + '\r\n';
if ($(this).find('>parameter').length) {
media += 'a=fmtp:' + this.getAttribute('id') + ' ';
media += $(this).find('parameter').map(function () { return (this.getAttribute('name') ? (this.getAttribute('name') + '=') : '') + this.getAttribute('value'); }).get().join('; ');
media += '\r\n';
}
// xep-0293
media += self.RtcpFbFromJingle($(this), this.getAttribute('id'));
});
// xep-0293
media += self.RtcpFbFromJingle(desc, '*');
// xep-0294
tmp = desc.find('>rtp-hdrext[xmlns="urn:xmpp:jingle:apps:rtp:rtp-hdrext:0"]');
tmp.each(function () {
media += 'a=extmap:' + this.getAttribute('id') + ' ' + this.getAttribute('uri') + '\r\n';
});
content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]>candidate').each(function () {
media += SDPUtil.candidateFromJingle(this);
});
// XEP-0339 handle ssrc-group attributes
tmp = content.find('description>ssrc-group[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]').each(function() {
var semantics = this.getAttribute('semantics');
var ssrcs = $(this).find('>source').map(function() {
return this.getAttribute('ssrc');
}).get();
if (ssrcs.length != 0) {
media += 'a=ssrc-group:' + semantics + ' ' + ssrcs.join(' ') + '\r\n';
}
});
tmp = content.find('description>source[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]');
tmp.each(function () {
var ssrc = this.getAttribute('ssrc');
$(this).find('>parameter').each(function () {
media += 'a=ssrc:' + ssrc + ' ' + this.getAttribute('name');
if (this.getAttribute('value') && this.getAttribute('value').length)
media += ':' + this.getAttribute('value');
media += '\r\n';
});
});
if (tmp.length === 0) {
// fallback to proprietary mapping of a=ssrc lines
tmp = content.find('description>ssrc[xmlns="http://estos.de/ns/ssrc"]');
if (tmp.length) {
media += 'a=ssrc:' + ssrc + ' cname:' + tmp.attr('cname') + '\r\n';
media += 'a=ssrc:' + ssrc + ' msid:' + tmp.attr('msid') + '\r\n';
media += 'a=ssrc:' + ssrc + ' mslabel:' + tmp.attr('mslabel') + '\r\n';
media += 'a=ssrc:' + ssrc + ' label:' + tmp.attr('label') + '\r\n';
}
}
return media;
};

View File

@@ -0,0 +1,408 @@
/**
* Contains utility classes used in SDP class.
*
*/
/**
* Class holds a=ssrc lines and media type a=mid
* @param ssrc synchronization source identifier number(a=ssrc lines from SDP)
* @param type media type eg. "audio" or "video"(a=mid frm SDP)
* @constructor
*/
function ChannelSsrc(ssrc, type) {
this.ssrc = ssrc;
this.type = type;
this.lines = [];
}
/**
* Class holds a=ssrc-group: lines
* @param semantics
* @param ssrcs
* @constructor
*/
function ChannelSsrcGroup(semantics, ssrcs, line) {
this.semantics = semantics;
this.ssrcs = ssrcs;
}
/**
* Helper class represents media channel. Is a container for ChannelSsrc, holds channel idx and media type.
* @param channelNumber channel idx in SDP media array.
* @param mediaType media type(a=mid)
* @constructor
*/
function MediaChannel(channelNumber, mediaType) {
/**
* SDP channel number
* @type {*}
*/
this.chNumber = channelNumber;
/**
* Channel media type(a=mid)
* @type {*}
*/
this.mediaType = mediaType;
/**
* The maps of ssrc numbers to ChannelSsrc objects.
*/
this.ssrcs = {};
/**
* The array of ChannelSsrcGroup objects.
* @type {Array}
*/
this.ssrcGroups = [];
}
SDPUtil = {
iceparams: function (mediadesc, sessiondesc) {
var data = null;
if (SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc) &&
SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc)) {
data = {
ufrag: SDPUtil.parse_iceufrag(SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc)),
pwd: SDPUtil.parse_icepwd(SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc))
};
}
return data;
},
parse_iceufrag: function (line) {
return line.substring(12);
},
build_iceufrag: function (frag) {
return 'a=ice-ufrag:' + frag;
},
parse_icepwd: function (line) {
return line.substring(10);
},
build_icepwd: function (pwd) {
return 'a=ice-pwd:' + pwd;
},
parse_mid: function (line) {
return line.substring(6);
},
parse_mline: function (line) {
var parts = line.substring(2).split(' '),
data = {};
data.media = parts.shift();
data.port = parts.shift();
data.proto = parts.shift();
if (parts[parts.length - 1] === '') { // trailing whitespace
parts.pop();
}
data.fmt = parts;
return data;
},
build_mline: function (mline) {
return 'm=' + mline.media + ' ' + mline.port + ' ' + mline.proto + ' ' + mline.fmt.join(' ');
},
parse_rtpmap: function (line) {
var parts = line.substring(9).split(' '),
data = {};
data.id = parts.shift();
parts = parts[0].split('/');
data.name = parts.shift();
data.clockrate = parts.shift();
data.channels = parts.length ? parts.shift() : '1';
return data;
},
/**
* Parses SDP line "a=sctpmap:..." and extracts SCTP port from it.
* @param line eg. "a=sctpmap:5000 webrtc-datachannel"
* @returns [SCTP port number, protocol, streams]
*/
parse_sctpmap: function (line)
{
var parts = line.substring(10).split(' ');
var sctpPort = parts[0];
var protocol = parts[1];
// Stream count is optional
var streamCount = parts.length > 2 ? parts[2] : null;
return [sctpPort, protocol, streamCount];// SCTP port
},
build_rtpmap: function (el) {
var line = 'a=rtpmap:' + el.getAttribute('id') + ' ' + el.getAttribute('name') + '/' + el.getAttribute('clockrate');
if (el.getAttribute('channels') && el.getAttribute('channels') != '1') {
line += '/' + el.getAttribute('channels');
}
return line;
},
parse_crypto: function (line) {
var parts = line.substring(9).split(' '),
data = {};
data.tag = parts.shift();
data['crypto-suite'] = parts.shift();
data['key-params'] = parts.shift();
if (parts.length) {
data['session-params'] = parts.join(' ');
}
return data;
},
parse_fingerprint: function (line) { // RFC 4572
var parts = line.substring(14).split(' '),
data = {};
data.hash = parts.shift();
data.fingerprint = parts.shift();
// TODO assert that fingerprint satisfies 2UHEX *(":" 2UHEX) ?
return data;
},
parse_fmtp: function (line) {
var parts = line.split(' '),
i, key, value,
data = [];
parts.shift();
parts = parts.join(' ').split(';');
for (i = 0; i < parts.length; i++) {
key = parts[i].split('=')[0];
while (key.length && key[0] == ' ') {
key = key.substring(1);
}
value = parts[i].split('=')[1];
if (key && value) {
data.push({name: key, value: value});
} else if (key) {
// rfc 4733 (DTMF) style stuff
data.push({name: '', value: key});
}
}
return data;
},
parse_icecandidate: function (line) {
var candidate = {},
elems = line.split(' ');
candidate.foundation = elems[0].substring(12);
candidate.component = elems[1];
candidate.protocol = elems[2].toLowerCase();
candidate.priority = elems[3];
candidate.ip = elems[4];
candidate.port = elems[5];
// elems[6] => "typ"
candidate.type = elems[7];
candidate.generation = 0; // default value, may be overwritten below
for (var i = 8; i < elems.length; i += 2) {
switch (elems[i]) {
case 'raddr':
candidate['rel-addr'] = elems[i + 1];
break;
case 'rport':
candidate['rel-port'] = elems[i + 1];
break;
case 'generation':
candidate.generation = elems[i + 1];
break;
case 'tcptype':
candidate.tcptype = elems[i + 1];
break;
default: // TODO
console.log('parse_icecandidate not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
}
}
candidate.network = '1';
candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
return candidate;
},
build_icecandidate: function (cand) {
var line = ['a=candidate:' + cand.foundation, cand.component, cand.protocol, cand.priority, cand.ip, cand.port, 'typ', cand.type].join(' ');
line += ' ';
switch (cand.type) {
case 'srflx':
case 'prflx':
case 'relay':
if (cand.hasOwnAttribute('rel-addr') && cand.hasOwnAttribute('rel-port')) {
line += 'raddr';
line += ' ';
line += cand['rel-addr'];
line += ' ';
line += 'rport';
line += ' ';
line += cand['rel-port'];
line += ' ';
}
break;
}
if (cand.hasOwnAttribute('tcptype')) {
line += 'tcptype';
line += ' ';
line += cand.tcptype;
line += ' ';
}
line += 'generation';
line += ' ';
line += cand.hasOwnAttribute('generation') ? cand.generation : '0';
return line;
},
parse_ssrc: function (desc) {
// proprietary mapping of a=ssrc lines
// TODO: see "Jingle RTP Source Description" by Juberti and P. Thatcher on google docs
// and parse according to that
var lines = desc.split('\r\n'),
data = {};
for (var i = 0; i < lines.length; i++) {
if (lines[i].substring(0, 7) == 'a=ssrc:') {
var idx = lines[i].indexOf(' ');
data[lines[i].substr(idx + 1).split(':', 2)[0]] = lines[i].substr(idx + 1).split(':', 2)[1];
}
}
return data;
},
parse_rtcpfb: function (line) {
var parts = line.substr(10).split(' ');
var data = {};
data.pt = parts.shift();
data.type = parts.shift();
data.params = parts;
return data;
},
parse_extmap: function (line) {
var parts = line.substr(9).split(' ');
var data = {};
data.value = parts.shift();
if (data.value.indexOf('/') != -1) {
data.direction = data.value.substr(data.value.indexOf('/') + 1);
data.value = data.value.substr(0, data.value.indexOf('/'));
} else {
data.direction = 'both';
}
data.uri = parts.shift();
data.params = parts;
return data;
},
find_line: function (haystack, needle, sessionpart) {
var lines = haystack.split('\r\n');
for (var i = 0; i < lines.length; i++) {
if (lines[i].substring(0, needle.length) == needle) {
return lines[i];
}
}
if (!sessionpart) {
return false;
}
// search session part
lines = sessionpart.split('\r\n');
for (var j = 0; j < lines.length; j++) {
if (lines[j].substring(0, needle.length) == needle) {
return lines[j];
}
}
return false;
},
find_lines: function (haystack, needle, sessionpart) {
var lines = haystack.split('\r\n'),
needles = [];
for (var i = 0; i < lines.length; i++) {
if (lines[i].substring(0, needle.length) == needle)
needles.push(lines[i]);
}
if (needles.length || !sessionpart) {
return needles;
}
// search session part
lines = sessionpart.split('\r\n');
for (var j = 0; j < lines.length; j++) {
if (lines[j].substring(0, needle.length) == needle) {
needles.push(lines[j]);
}
}
return needles;
},
candidateToJingle: function (line) {
// a=candidate:2979166662 1 udp 2113937151 192.168.2.100 57698 typ host generation 0
// <candidate component=... foundation=... generation=... id=... ip=... network=... port=... priority=... protocol=... type=.../>
if (line.indexOf('candidate:') === 0) {
line = 'a=' + line;
} else if (line.substring(0, 12) != 'a=candidate:') {
console.log('parseCandidate called with a line that is not a candidate line');
console.log(line);
return null;
}
if (line.substring(line.length - 2) == '\r\n') // chomp it
line = line.substring(0, line.length - 2);
var candidate = {},
elems = line.split(' '),
i;
if (elems[6] != 'typ') {
console.log('did not find typ in the right place');
console.log(line);
return null;
}
candidate.foundation = elems[0].substring(12);
candidate.component = elems[1];
candidate.protocol = elems[2].toLowerCase();
candidate.priority = elems[3];
candidate.ip = elems[4];
candidate.port = elems[5];
// elems[6] => "typ"
candidate.type = elems[7];
candidate.generation = '0'; // default, may be overwritten below
for (i = 8; i < elems.length; i += 2) {
switch (elems[i]) {
case 'raddr':
candidate['rel-addr'] = elems[i + 1];
break;
case 'rport':
candidate['rel-port'] = elems[i + 1];
break;
case 'generation':
candidate.generation = elems[i + 1];
break;
case 'tcptype':
candidate.tcptype = elems[i + 1];
break;
default: // TODO
console.log('not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
}
}
candidate.network = '1';
candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
return candidate;
},
candidateFromJingle: function (cand) {
var line = 'a=candidate:';
line += cand.getAttribute('foundation');
line += ' ';
line += cand.getAttribute('component');
line += ' ';
line += cand.getAttribute('protocol'); //.toUpperCase(); // chrome M23 doesn't like this
line += ' ';
line += cand.getAttribute('priority');
line += ' ';
line += cand.getAttribute('ip');
line += ' ';
line += cand.getAttribute('port');
line += ' ';
line += 'typ';
line += ' ' + cand.getAttribute('type');
line += ' ';
switch (cand.getAttribute('type')) {
case 'srflx':
case 'prflx':
case 'relay':
if (cand.getAttribute('rel-addr') && cand.getAttribute('rel-port')) {
line += 'raddr';
line += ' ';
line += cand.getAttribute('rel-addr');
line += ' ';
line += 'rport';
line += ' ';
line += cand.getAttribute('rel-port');
line += ' ';
}
break;
}
if (cand.getAttribute('protocol').toLowerCase() == 'tcp') {
line += 'tcptype';
line += ' ';
line += cand.getAttribute('tcptype');
line += ' ';
}
line += 'generation';
line += ' ';
line += cand.getAttribute('generation') || '0';
return line + '\r\n';
}
};
exports.SDPUtil = SDPUtil;

View File

@@ -0,0 +1,254 @@
/**
* Wrapper for built-in http.js to emulate the browser XMLHttpRequest object.
*
* This can be used with JS designed for browsers to improve reuse of code and
* allow the use of existing libraries.
*
* Usage: include("XMLHttpRequest.js") and use XMLHttpRequest per W3C specs.
*
* @todo SSL Support
* @author Dan DeFelippi <dan@driverdan.com>
* @license MIT
*/
var Url = require("url")
,sys = require("util");
exports.XMLHttpRequest = function() {
/**
* Private variables
*/
var self = this;
var http = require('http');
var https = require('https');
// Holds http.js objects
var client;
var request;
var response;
// Request settings
var settings = {};
// Set some default headers
var defaultHeaders = {
"User-Agent": "node.js",
"Accept": "*/*",
};
var headers = defaultHeaders;
/**
* Constants
*/
this.UNSENT = 0;
this.OPENED = 1;
this.HEADERS_RECEIVED = 2;
this.LOADING = 3;
this.DONE = 4;
/**
* Public vars
*/
// Current state
this.readyState = this.UNSENT;
// default ready state change handler in case one is not set or is set late
this.onreadystatechange = function() {};
// Result & response
this.responseText = "";
this.responseXML = "";
this.status = null;
this.statusText = null;
/**
* Open the connection. Currently supports local server requests.
*
* @param string method Connection method (eg GET, POST)
* @param string url URL for the connection.
* @param boolean async Asynchronous connection. Default is true.
* @param string user Username for basic authentication (optional)
* @param string password Password for basic authentication (optional)
*/
this.open = function(method, url, async, user, password) {
settings = {
"method": method,
"url": url,
"async": async || null,
"user": user || null,
"password": password || null
};
this.abort();
setState(this.OPENED);
};
/**
* Sets a header for the request.
*
* @param string header Header name
* @param string value Header value
*/
this.setRequestHeader = function(header, value) {
headers[header] = value;
};
/**
* Gets a header from the server response.
*
* @param string header Name of header to get.
* @return string Text of the header or null if it doesn't exist.
*/
this.getResponseHeader = function(header) {
if (this.readyState > this.OPENED && response.headers[header]) {
return header + ": " + response.headers[header];
}
return null;
};
/**
* Gets all the response headers.
*
* @return string
*/
this.getAllResponseHeaders = function() {
if (this.readyState < this.HEADERS_RECEIVED) {
throw "INVALID_STATE_ERR: Headers have not been received.";
}
var result = "";
for (var i in response.headers) {
result += i + ": " + response.headers[i] + "\r\n";
}
return result.substr(0, result.length - 2);
};
/**
* Sends the request to the server.
*
* @param string data Optional data to send as request body.
*/
this.send = function(data) {
if (this.readyState != this.OPENED) {
throw "INVALID_STATE_ERR: connection must be opened before send() is called";
}
var ssl = false;
var url = Url.parse(settings.url);
// Determine the server
switch (url.protocol) {
case 'https:':
ssl = true;
// SSL & non-SSL both need host, no break here.
case 'http:':
var host = url.hostname;
break;
case undefined:
case '':
var host = "localhost";
break;
default:
throw "Protocol not supported.";
}
// Default to port 80. If accessing localhost on another port be sure
// to use http://localhost:port/path
var port = url.port || (ssl ? 443 : 80);
// Add query string if one is used
var uri = url.pathname + (url.search ? url.search : '');
// Set the Host header or the server may reject the request
this.setRequestHeader("Host", host);
// Set content length header
if (settings.method == "GET" || settings.method == "HEAD") {
data = null;
} else if (data) {
this.setRequestHeader("Content-Length", Buffer.byteLength(data));
if (!headers["Content-Type"]) {
this.setRequestHeader("Content-Type", "text/plain;charset=UTF-8");
}
}
// Use the proper protocol
var doRequest = ssl ? https.request : http.request;
var options = {
host: host,
port: port,
path: uri,
method: settings.method,
headers: headers,
agent: false
};
var req = doRequest(options, function(res) {
response = res;
response.setEncoding("utf8");
setState(self.HEADERS_RECEIVED);
self.status = response.statusCode;
response.on('data', function(chunk) {
// Make sure there's some data
if (chunk) {
self.responseText += chunk;
}
setState(self.LOADING);
});
response.on('end', function() {
setState(self.DONE);
});
response.on('error', function() {
self.handleError(error);
});
}).on('error', function(error) {
self.handleError(error);
});
req.setHeader("Connection", "Close");
// Node 0.4 and later won't accept empty data. Make sure it's needed.
if (data) {
req.write(data);
}
req.end();
};
this.handleError = function(error) {
this.status = 503;
this.statusText = error;
this.responseText = error.stack;
setState(this.DONE);
};
/**
* Aborts a request.
*/
this.abort = function() {
headers = defaultHeaders;
this.readyState = this.UNSENT;
this.responseText = "";
this.responseXML = "";
};
/**
* Changes readyState and calls onreadystatechange.
*
* @param int state New state
*/
var setState = function(state) {
self.readyState = state;
self.onreadystatechange();
}
};

View File

@@ -0,0 +1,83 @@
// This code was written by Tyler Akins and has been placed in the
// public domain. It would be nice if you left this header intact.
// Base64 code from Tyler Akins -- http://rumkin.com
var Base64 = (function () {
var keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
var obj = {
/**
* Encodes a string in base64
* @param {String} input The string to encode in base64.
*/
encode: function (input) {
var output = "";
var chr1, chr2, chr3;
var enc1, enc2, enc3, enc4;
var i = 0;
do {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2) +
keyStr.charAt(enc3) + keyStr.charAt(enc4);
} while (i < input.length);
return output;
},
/**
* Decodes a base64 string.
* @param {String} input The string to decode.
*/
decode: function (input) {
var output = "";
var chr1, chr2, chr3;
var enc1, enc2, enc3, enc4;
var i = 0;
// remove all characters that are not A-Z, a-z, 0-9, +, /, or =
input = input.replace(/[^A-Za-z0-9\+\/\=]/g, '');
do {
enc1 = keyStr.indexOf(input.charAt(i++));
enc2 = keyStr.indexOf(input.charAt(i++));
enc3 = keyStr.indexOf(input.charAt(i++));
enc4 = keyStr.indexOf(input.charAt(i++));
chr1 = (enc1 << 2) | (enc2 >> 4);
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2);
chr3 = ((enc3 & 3) << 6) | enc4;
output = output + String.fromCharCode(chr1);
if (enc3 != 64) {
output = output + String.fromCharCode(chr2);
}
if (enc4 != 64) {
output = output + String.fromCharCode(chr3);
}
} while (i < input.length);
return output;
}
};
return obj;
})();
// Nodify
exports.Base64 = Base64;

View File

@@ -0,0 +1,279 @@
/*
* A JavaScript implementation of the RSA Data Security, Inc. MD5 Message
* Digest Algorithm, as defined in RFC 1321.
* Version 2.1 Copyright (C) Paul Johnston 1999 - 2002.
* Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
* Distributed under the BSD License
* See http://pajhome.org.uk/crypt/md5 for more info.
*/
var MD5 = (function () {
/*
* Configurable variables. You may need to tweak these to be compatible with
* the server-side, but the defaults work in most cases.
*/
var hexcase = 0; /* hex output format. 0 - lowercase; 1 - uppercase */
var b64pad = ""; /* base-64 pad character. "=" for strict RFC compliance */
var chrsz = 8; /* bits per input character. 8 - ASCII; 16 - Unicode */
/*
* Add integers, wrapping at 2^32. This uses 16-bit operations internally
* to work around bugs in some JS interpreters.
*/
var safe_add = function (x, y) {
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
return (msw << 16) | (lsw & 0xFFFF);
};
/*
* Bitwise rotate a 32-bit number to the left.
*/
var bit_rol = function (num, cnt) {
return (num << cnt) | (num >>> (32 - cnt));
};
/*
* Convert a string to an array of little-endian words
* If chrsz is ASCII, characters >255 have their hi-byte silently ignored.
*/
var str2binl = function (str) {
var bin = [];
var mask = (1 << chrsz) - 1;
for(var i = 0; i < str.length * chrsz; i += chrsz)
{
bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (i%32);
}
return bin;
};
/*
* Convert an array of little-endian words to a string
*/
var binl2str = function (bin) {
var str = "";
var mask = (1 << chrsz) - 1;
for(var i = 0; i < bin.length * 32; i += chrsz)
{
str += String.fromCharCode((bin[i>>5] >>> (i % 32)) & mask);
}
return str;
};
/*
* Convert an array of little-endian words to a hex string.
*/
var binl2hex = function (binarray) {
var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
var str = "";
for(var i = 0; i < binarray.length * 4; i++)
{
str += hex_tab.charAt((binarray[i>>2] >> ((i%4)*8+4)) & 0xF) +
hex_tab.charAt((binarray[i>>2] >> ((i%4)*8 )) & 0xF);
}
return str;
};
/*
* Convert an array of little-endian words to a base-64 string
*/
var binl2b64 = function (binarray) {
var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var str = "";
var triplet, j;
for(var i = 0; i < binarray.length * 4; i += 3)
{
triplet = (((binarray[i >> 2] >> 8 * ( i %4)) & 0xFF) << 16) |
(((binarray[i+1 >> 2] >> 8 * ((i+1)%4)) & 0xFF) << 8 ) |
((binarray[i+2 >> 2] >> 8 * ((i+2)%4)) & 0xFF);
for(j = 0; j < 4; j++)
{
if(i * 8 + j * 6 > binarray.length * 32) { str += b64pad; }
else { str += tab.charAt((triplet >> 6*(3-j)) & 0x3F); }
}
}
return str;
};
/*
* These functions implement the four basic operations the algorithm uses.
*/
var md5_cmn = function (q, a, b, x, s, t) {
return safe_add(bit_rol(safe_add(safe_add(a, q),safe_add(x, t)), s),b);
};
var md5_ff = function (a, b, c, d, x, s, t) {
return md5_cmn((b & c) | ((~b) & d), a, b, x, s, t);
};
var md5_gg = function (a, b, c, d, x, s, t) {
return md5_cmn((b & d) | (c & (~d)), a, b, x, s, t);
};
var md5_hh = function (a, b, c, d, x, s, t) {
return md5_cmn(b ^ c ^ d, a, b, x, s, t);
};
var md5_ii = function (a, b, c, d, x, s, t) {
return md5_cmn(c ^ (b | (~d)), a, b, x, s, t);
};
/*
* Calculate the MD5 of an array of little-endian words, and a bit length
*/
var core_md5 = function (x, len) {
/* append padding */
x[len >> 5] |= 0x80 << ((len) % 32);
x[(((len + 64) >>> 9) << 4) + 14] = len;
var a = 1732584193;
var b = -271733879;
var c = -1732584194;
var d = 271733878;
var olda, oldb, oldc, oldd;
for (var i = 0; i < x.length; i += 16)
{
olda = a;
oldb = b;
oldc = c;
oldd = d;
a = md5_ff(a, b, c, d, x[i+ 0], 7 , -680876936);
d = md5_ff(d, a, b, c, x[i+ 1], 12, -389564586);
c = md5_ff(c, d, a, b, x[i+ 2], 17, 606105819);
b = md5_ff(b, c, d, a, x[i+ 3], 22, -1044525330);
a = md5_ff(a, b, c, d, x[i+ 4], 7 , -176418897);
d = md5_ff(d, a, b, c, x[i+ 5], 12, 1200080426);
c = md5_ff(c, d, a, b, x[i+ 6], 17, -1473231341);
b = md5_ff(b, c, d, a, x[i+ 7], 22, -45705983);
a = md5_ff(a, b, c, d, x[i+ 8], 7 , 1770035416);
d = md5_ff(d, a, b, c, x[i+ 9], 12, -1958414417);
c = md5_ff(c, d, a, b, x[i+10], 17, -42063);
b = md5_ff(b, c, d, a, x[i+11], 22, -1990404162);
a = md5_ff(a, b, c, d, x[i+12], 7 , 1804603682);
d = md5_ff(d, a, b, c, x[i+13], 12, -40341101);
c = md5_ff(c, d, a, b, x[i+14], 17, -1502002290);
b = md5_ff(b, c, d, a, x[i+15], 22, 1236535329);
a = md5_gg(a, b, c, d, x[i+ 1], 5 , -165796510);
d = md5_gg(d, a, b, c, x[i+ 6], 9 , -1069501632);
c = md5_gg(c, d, a, b, x[i+11], 14, 643717713);
b = md5_gg(b, c, d, a, x[i+ 0], 20, -373897302);
a = md5_gg(a, b, c, d, x[i+ 5], 5 , -701558691);
d = md5_gg(d, a, b, c, x[i+10], 9 , 38016083);
c = md5_gg(c, d, a, b, x[i+15], 14, -660478335);
b = md5_gg(b, c, d, a, x[i+ 4], 20, -405537848);
a = md5_gg(a, b, c, d, x[i+ 9], 5 , 568446438);
d = md5_gg(d, a, b, c, x[i+14], 9 , -1019803690);
c = md5_gg(c, d, a, b, x[i+ 3], 14, -187363961);
b = md5_gg(b, c, d, a, x[i+ 8], 20, 1163531501);
a = md5_gg(a, b, c, d, x[i+13], 5 , -1444681467);
d = md5_gg(d, a, b, c, x[i+ 2], 9 , -51403784);
c = md5_gg(c, d, a, b, x[i+ 7], 14, 1735328473);
b = md5_gg(b, c, d, a, x[i+12], 20, -1926607734);
a = md5_hh(a, b, c, d, x[i+ 5], 4 , -378558);
d = md5_hh(d, a, b, c, x[i+ 8], 11, -2022574463);
c = md5_hh(c, d, a, b, x[i+11], 16, 1839030562);
b = md5_hh(b, c, d, a, x[i+14], 23, -35309556);
a = md5_hh(a, b, c, d, x[i+ 1], 4 , -1530992060);
d = md5_hh(d, a, b, c, x[i+ 4], 11, 1272893353);
c = md5_hh(c, d, a, b, x[i+ 7], 16, -155497632);
b = md5_hh(b, c, d, a, x[i+10], 23, -1094730640);
a = md5_hh(a, b, c, d, x[i+13], 4 , 681279174);
d = md5_hh(d, a, b, c, x[i+ 0], 11, -358537222);
c = md5_hh(c, d, a, b, x[i+ 3], 16, -722521979);
b = md5_hh(b, c, d, a, x[i+ 6], 23, 76029189);
a = md5_hh(a, b, c, d, x[i+ 9], 4 , -640364487);
d = md5_hh(d, a, b, c, x[i+12], 11, -421815835);
c = md5_hh(c, d, a, b, x[i+15], 16, 530742520);
b = md5_hh(b, c, d, a, x[i+ 2], 23, -995338651);
a = md5_ii(a, b, c, d, x[i+ 0], 6 , -198630844);
d = md5_ii(d, a, b, c, x[i+ 7], 10, 1126891415);
c = md5_ii(c, d, a, b, x[i+14], 15, -1416354905);
b = md5_ii(b, c, d, a, x[i+ 5], 21, -57434055);
a = md5_ii(a, b, c, d, x[i+12], 6 , 1700485571);
d = md5_ii(d, a, b, c, x[i+ 3], 10, -1894986606);
c = md5_ii(c, d, a, b, x[i+10], 15, -1051523);
b = md5_ii(b, c, d, a, x[i+ 1], 21, -2054922799);
a = md5_ii(a, b, c, d, x[i+ 8], 6 , 1873313359);
d = md5_ii(d, a, b, c, x[i+15], 10, -30611744);
c = md5_ii(c, d, a, b, x[i+ 6], 15, -1560198380);
b = md5_ii(b, c, d, a, x[i+13], 21, 1309151649);
a = md5_ii(a, b, c, d, x[i+ 4], 6 , -145523070);
d = md5_ii(d, a, b, c, x[i+11], 10, -1120210379);
c = md5_ii(c, d, a, b, x[i+ 2], 15, 718787259);
b = md5_ii(b, c, d, a, x[i+ 9], 21, -343485551);
a = safe_add(a, olda);
b = safe_add(b, oldb);
c = safe_add(c, oldc);
d = safe_add(d, oldd);
}
return [a, b, c, d];
};
/*
* Calculate the HMAC-MD5, of a key and some data
*/
var core_hmac_md5 = function (key, data) {
var bkey = str2binl(key);
if(bkey.length > 16) { bkey = core_md5(bkey, key.length * chrsz); }
var ipad = new Array(16), opad = new Array(16);
for(var i = 0; i < 16; i++)
{
ipad[i] = bkey[i] ^ 0x36363636;
opad[i] = bkey[i] ^ 0x5C5C5C5C;
}
var hash = core_md5(ipad.concat(str2binl(data)), 512 + data.length * chrsz);
return core_md5(opad.concat(hash), 512 + 128);
};
var obj = {
/*
* These are the functions you'll usually want to call.
* They take string arguments and return either hex or base-64 encoded
* strings.
*/
hexdigest: function (s) {
return binl2hex(core_md5(str2binl(s), s.length * chrsz));
},
b64digest: function (s) {
return binl2b64(core_md5(str2binl(s), s.length * chrsz));
},
hash: function (s) {
return binl2str(core_md5(str2binl(s), s.length * chrsz));
},
hmac_hexdigest: function (key, data) {
return binl2hex(core_hmac_md5(key, data));
},
hmac_b64digest: function (key, data) {
return binl2b64(core_hmac_md5(key, data));
},
hmac_hash: function (key, data) {
return binl2str(core_hmac_md5(key, data));
},
/*
* Perform a simple self-test to see if the VM is working
*/
test: function () {
return MD5.hexdigest("abc") === "900150983cd24fb0d6963f7d28e17f72";
}
};
return obj;
})();
// Nodify
exports.MD5 = MD5;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,48 @@
var strophe = require("./strophe/strophe.js").Strophe;
var Strophe = strophe.Strophe;
var $iq = strophe.$iq;
var $msg = strophe.$msg;
var $build = strophe.$build;
var $pres = strophe.$pres;
var jsdom = require("jsdom");
var window = jsdom.jsdom().parentWindow;
var $ = require('jquery')(window);
var stropheJingle = require("./strophe.jingle.sdp.js");
var input = '';
process.stdin.on('readable', function() {
var chunk = process.stdin.read();
if (chunk !== null) {
input += chunk;
}
});
process.stdin.on('end', function() {
if (process.argv[2] == '--jingle') {
var elem = $(input);
// app does:
// sess.setRemoteDescription($(iq).find('>jingle'), 'offer');
//console.log(elem.find('>content'));
var sdp = new stropheJingle.SDP('');
sdp.fromJingle(elem);
console.log(sdp.raw);
} else if (process.argv[2] == '--sdp') {
var sdp = new stropheJingle.SDP(input);
var accept = $iq({to: '%(tojid)s',
type: 'set'})
.c('jingle', {xmlns: 'urn:xmpp:jingle:1',
//action: 'session-accept',
action: '%(action)s',
initiator: '%(initiator)s',
responder: '%(responder)s',
sid: '%(sid)s' });
sdp.toJingle(accept, 'responder');
console.log(Strophe.serialize(accept));
}
});

View File

@@ -6,10 +6,8 @@ To use it, first install prometheus by following the instructions at
http://prometheus.io/
### for Prometheus v1
Add a new job to the main prometheus.conf file:
```yaml
job: {
name: "synapse"
@@ -17,31 +15,22 @@ Add a new job to the main prometheus.conf file:
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
}
}
```
### for Prometheus v2
Add a new job to the main prometheus.yml file:
```yaml
- job_name: "synapse"
metrics_path: "/_synapse/metrics"
# when endpoint uses https:
scheme: "https"
static_configs:
- targets: ["my.server.here:port"]
```
An example of a Prometheus configuration with workers can be found in
[metrics-howto.md](https://matrix-org.github.io/synapse/latest/metrics-howto.html).
- targets: ['SERVER.LOCATION:PORT']
To use `synapse.rules` add
```yaml
rule_files:
- "/PATH/TO/synapse-v2.rules"
```
rule_files:
- "/PATH/TO/synapse-v2.rules"
Metrics are disabled by default when running synapse; they must be enabled
with the 'enable-metrics' option, either in the synapse config file or as a

View File

@@ -9,7 +9,7 @@
new PromConsole.Graph({
node: document.querySelector("#process_resource_utime"),
expr: "rate(process_cpu_seconds_total[2m]) * 100",
name: "[[job]]-[[index]]",
name: "[[job]]",
min: 0,
max: 100,
renderer: "line",
@@ -22,12 +22,12 @@ new PromConsole.Graph({
</script>
<h3>Memory</h3>
<div id="process_resident_memory_bytes"></div>
<div id="process_resource_maxrss"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#process_resident_memory_bytes"),
expr: "process_resident_memory_bytes",
name: "[[job]]-[[index]]",
node: document.querySelector("#process_resource_maxrss"),
expr: "process_psutil_rss:max",
name: "Maxrss",
min: 0,
renderer: "line",
height: 150,
@@ -43,8 +43,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#process_fds"),
expr: "process_open_fds",
name: "[[job]]-[[index]]",
expr: "process_open_fds{job='synapse'}",
name: "FDs",
min: 0,
renderer: "line",
height: 150,
@@ -62,8 +62,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_total_time"),
expr: "rate(python_twisted_reactor_tick_time_sum[2m])",
name: "[[job]]-[[index]]",
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
name: "time",
max: 1,
min: 0,
renderer: "area",
@@ -80,8 +80,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_average_time"),
expr: "rate(python_twisted_reactor_tick_time_sum[2m]) / rate(python_twisted_reactor_tick_time_count[2m])",
name: "[[job]]-[[index]]",
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
name: "time",
min: 0,
renderer: "line",
height: 150,
@@ -92,6 +92,22 @@ new PromConsole.Graph({
})
</script>
<h3>Pending calls per tick</h3>
<div id="reactor_pending_calls"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#reactor_pending_calls"),
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
name: "calls",
min: 0,
renderer: "line",
height: 150,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yTitle: "Pending Cals"
})
</script>
<h1>Storage</h1>
<h3>Queries</h3>
@@ -99,7 +115,7 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_query_time"),
expr: "sum(rate(synapse_storage_query_time_count[2m])) by (verb)",
expr: "rate(synapse_storage_query_time:count[2m])",
name: "[[verb]]",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
@@ -113,8 +129,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_transaction_time"),
expr: "topk(10, rate(synapse_storage_transaction_time_count[2m]))",
name: "[[job]]-[[index]] [[desc]]",
expr: "rate(synapse_storage_transaction_time:count[2m])",
name: "[[desc]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
@@ -124,12 +140,12 @@ new PromConsole.Graph({
</script>
<h3>Transaction execution time</h3>
<div id="synapse_storage_transactions_time_sec"></div>
<div id="synapse_storage_transactions_time_msec"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_transactions_time_sec"),
expr: "rate(synapse_storage_transaction_time_sum[2m])",
name: "[[job]]-[[index]] [[desc]]",
node: document.querySelector("#synapse_storage_transactions_time_msec"),
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
name: "[[desc]]",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
@@ -138,33 +154,34 @@ new PromConsole.Graph({
})
</script>
<h3>Average time waiting for database connection</h3>
<div id="synapse_storage_avg_waiting_time"></div>
<h3>Database scheduling latency</h3>
<div id="synapse_storage_schedule_time"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_storage_avg_waiting_time"),
expr: "rate(synapse_storage_schedule_time_sum[2m]) / rate(synapse_storage_schedule_time_count[2m])",
name: "[[job]]-[[index]]",
node: document.querySelector("#synapse_storage_schedule_time"),
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
name: "Total latency",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s",
yTitle: "Time"
yUnits: "s/s",
yTitle: "Usage"
})
</script>
<h3>Cache request rate</h3>
<div id="synapse_cache_request_rate"></div>
<h3>Cache hit ratio</h3>
<div id="synapse_cache_ratio"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_cache_request_rate"),
expr: "rate(synapse_util_caches_cache:total[2m])",
name: "[[job]]-[[index]] [[name]]",
node: document.querySelector("#synapse_cache_ratio"),
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
name: "[[name]]",
min: 0,
max: 100,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "rps",
yTitle: "Cache request rate"
yUnits: "%",
yTitle: "Percentage"
})
</script>
@@ -174,7 +191,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_cache_size"),
expr: "synapse_util_caches_cache:size",
name: "[[job]]-[[index]] [[name]]",
name: "[[name]]",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "",
@@ -189,8 +206,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_request_count_servlet"),
expr: "rate(synapse_http_server_in_flight_requests_count[2m])",
name: "[[job]]-[[index]] [[method]] [[servlet]]",
expr: "rate(synapse_http_server_request_count:servlet[2m])",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
@@ -202,8 +219,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
expr: "rate(synapse_http_server_in_flight_requests_count{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
name: "[[job]]-[[index]] [[method]] [[servlet]]",
expr: "rate(synapse_http_server_request_count:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
@@ -216,8 +233,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_time_avg"),
expr: "rate(synapse_http_server_response_time_seconds_sum[2m]) / rate(synapse_http_server_response_count[2m])",
name: "[[job]]-[[index]] [[servlet]]",
expr: "rate(synapse_http_server_response_time_seconds[2m]) / rate(synapse_http_server_response_count[2m]) / 1000",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/req",
@@ -260,7 +277,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_ru_utime"),
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
name: "[[job]]-[[index]] [[servlet]]",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
@@ -275,7 +292,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
name: "[[job]]-[[index]] [[servlet]]",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/s",
@@ -289,8 +306,8 @@ new PromConsole.Graph({
<script>
new PromConsole.Graph({
node: document.querySelector("#synapse_http_server_send_time_avg"),
expr: "rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m])",
name: "[[job]]-[[index]] [[servlet]]",
expr: "rate(synapse_http_server_response_time_second{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
name: "[[servlet]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "s/req",
@@ -306,7 +323,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_federation_client_sent"),
expr: "rate(synapse_federation_client_sent[2m])",
name: "[[job]]-[[index]] [[type]]",
name: "[[type]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
@@ -320,7 +337,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_federation_server_received"),
expr: "rate(synapse_federation_server_received[2m])",
name: "[[job]]-[[index]] [[type]]",
name: "[[type]]",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "req/s",
@@ -350,7 +367,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_notifier_listeners"),
expr: "synapse_notifier_listeners",
name: "[[job]]-[[index]]",
name: "listeners",
min: 0,
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
@@ -365,7 +382,7 @@ new PromConsole.Graph({
new PromConsole.Graph({
node: document.querySelector("#synapse_notifier_notified_events"),
expr: "rate(synapse_notifier_notified_events[2m])",
name: "[[job]]-[[index]]",
name: "events",
yAxisFormatter: PromConsole.NumberFormatter.humanize,
yHoverFormatter: PromConsole.NumberFormatter.humanize,
yUnits: "events/s",

View File

@@ -0,0 +1,21 @@
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0

View File

@@ -1,20 +1,37 @@
groups:
- name: synapse
rules:
- record: "synapse_federation_transaction_queue_pendingEdus:total"
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
- record: "synapse_federation_transaction_queue_pendingPdus:total"
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
- record: 'synapse_http_server_request_count:method'
labels:
servlet: ""
expr: "sum(synapse_http_server_request_count) by (method)"
- record: 'synapse_http_server_request_count:servlet'
labels:
method: ""
expr: 'sum(synapse_http_server_request_count) by (servlet)'
- record: 'synapse_http_server_request_count:total'
labels:
servlet: ""
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
- record: 'synapse_cache:hit_ratio_5m'
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
- record: 'synapse_cache:hit_ratio_30s'
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
###
### Prometheus Console Only
### The following rules are only needed if you use the Prometheus Console
### in contrib/prometheus/consoles/synapse.html
###
- record: 'synapse_federation_client_sent'
labels:
type: "EDU"
expr: 'synapse_federation_client_sent_edus_total + 0'
expr: 'synapse_federation_client_sent_edus + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "PDU"
expr: 'synapse_federation_client_sent_pdu_destinations_count_total + 0'
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "Query"
@@ -23,11 +40,11 @@ groups:
- record: 'synapse_federation_server_received'
labels:
type: "EDU"
expr: 'synapse_federation_server_received_edus_total + 0'
expr: 'synapse_federation_server_received_edus + 0'
- record: 'synapse_federation_server_received'
labels:
type: "PDU"
expr: 'synapse_federation_server_received_pdus_total + 0'
expr: 'synapse_federation_server_received_pdus + 0'
- record: 'synapse_federation_server_received'
labels:
type: "Query"
@@ -41,34 +58,3 @@ groups:
labels:
type: "PDU"
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
###
### End of 'Prometheus Console Only' rules block
###
###
### Grafana Only
### The following rules are only needed if you use the Grafana dashboard
### in contrib/grafana/synapse.json
###
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
labels:
type: remote
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity="*client*",origin_type="local"})
labels:
type: local
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity!="*client*",origin_type="local"})
labels:
type: bridges
- record: synapse_storage_events_persisted_by_event_type
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
- record: synapse_storage_events_persisted_by_origin
expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
###
### End of 'Grafana Only' rules block
###

View File

@@ -3,9 +3,8 @@ Purge history API examples
# `purge_history.sh`
A bash file, that uses the
[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
to purge all messages in a list of rooms up to a certain event. You can select a
A bash file, that uses the [purge history API](/docs/admin_api/README.rst) to
purge all messages in a list of rooms up to a certain event. You can select a
timeframe or a number of messages that you want to keep in the room.
Just configure the variables DOMAIN, ADMIN, ROOMS_ARRAY and TIME at the top of
@@ -13,6 +12,5 @@ the script.
# `purge_remote_media.sh`
A bash file, that uses the
[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
to purge all old cached remote media.
A bash file, that uses the [purge history API](/docs/admin_api/README.rst) to
purge all old cached remote media.

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
#!/bin/bash
# this script will use the api:
# https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html
# https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst
#
# It will purge all messages in a list of rooms up to a cetrain event
@@ -84,9 +84,7 @@ AUTH="Authorization: Bearer $TOKEN"
###################################################################################################
# finally start pruning the room:
###################################################################################################
# this will really delete local events, so the messages in the room really
# disappear unless they are restored by remote federation. This is because
# we pass {"delete_local_events":true} to the curl invocation below.
POSTDATA='{"delete_local_events":"true"}' # this will really delete local events, so the messages in the room really disappear unless they are restored by remote federation
for ROOM in "${ROOMS_ARRAY[@]}"; do
echo "########################################### $(date) ################# "
@@ -106,7 +104,7 @@ for ROOM in "${ROOMS_ARRAY[@]}"; do
SLEEP=2
set -x
# call purge
OUT=$(curl --header "$AUTH" -s -d '{"delete_local_events":true}' POST "$API_URL/admin/purge_history/$ROOM/$EVENT_ID")
OUT=$(curl --header "$AUTH" -s -d $POSTDATA POST "$API_URL/admin/purge_history/$ROOM/$EVENT_ID")
PURGE_ID=$(echo "$OUT" |grep purge_id|cut -d'"' -f4 )
if [ "$PURGE_ID" == "" ]; then
# probably the history purge is already in progress for $ROOM

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
DOMAIN=yourserver.tld
# add this user as admin in your home server:
@@ -51,4 +51,4 @@ TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id
# finally start pruning media:
###############################################################################
set -x # for debugging the generated string
curl --header "Authorization: Bearer $TOKEN" -X POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP"
curl --header "Authorization: Bearer $TOKEN" -v POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP"

93
contrib/scripts/kick_users.py Executable file
View File

@@ -0,0 +1,93 @@
#!/usr/bin/env python
from argparse import ArgumentParser
import json
import requests
import sys
import urllib
def _mkurl(template, kws):
for key in kws:
template = template.replace(key, kws[key])
return template
def main(hs, room_id, access_token, user_id_prefix, why):
if not why:
why = "Automated kick."
print "Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)
room_state_url = _mkurl(
"$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN",
{
"$HS": hs,
"$ROOM": room_id,
"$TOKEN": access_token
}
)
print "Getting room state => %s" % room_state_url
res = requests.get(room_state_url)
print "HTTP %s" % res.status_code
state_events = res.json()
if "error" in state_events:
print "FATAL"
print state_events
return
kick_list = []
room_name = room_id
for event in state_events:
if not event["type"] == "m.room.member":
if event["type"] == "m.room.name":
room_name = event["content"].get("name")
continue
if not event["content"].get("membership") == "join":
continue
if event["state_key"].startswith(user_id_prefix):
kick_list.append(event["state_key"])
if len(kick_list) == 0:
print "No user IDs match the prefix '%s'" % user_id_prefix
return
print "The following user IDs will be kicked from %s" % room_name
for uid in kick_list:
print uid
doit = raw_input("Continue? [Y]es\n")
if len(doit) > 0 and doit.lower() == 'y':
print "Kicking members..."
# encode them all
kick_list = [urllib.quote(uid) for uid in kick_list]
for uid in kick_list:
kick_url = _mkurl(
"$HS/_matrix/client/api/v1/rooms/$ROOM/state/m.room.member/$UID?access_token=$TOKEN",
{
"$HS": hs,
"$UID": uid,
"$ROOM": room_id,
"$TOKEN": access_token
}
)
kick_body = {
"membership": "leave",
"reason": why
}
print "Kicking %s" % uid
res = requests.put(kick_url, data=json.dumps(kick_body))
if res.status_code != 200:
print "ERROR: HTTP %s" % res.status_code
if res.json().get("error"):
print "ERROR: JSON %s" % res.json()
if __name__ == "__main__":
parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")
parser.add_argument("-u","--user-id",help="The user ID prefix e.g. '@irc_'")
parser.add_argument("-t","--token",help="Your access_token")
parser.add_argument("-r","--room",help="The room ID to kick members in")
parser.add_argument("-s","--homeserver",help="The base HS url e.g. http://matrix.org")
parser.add_argument("-w","--why",help="Reason for the kick. Optional.")
args = parser.parse_args()
if not args.room or not args.token or not args.user_id or not args.homeserver:
parser.print_help()
sys.exit(1)
else:
main(args.homeserver, args.room, args.token, args.user_id, args.why)

View File

@@ -1,57 +0,0 @@
name: matrix-synapse
base: core18
version: git
summary: Reference Matrix homeserver
description: |
Synapse is the reference Matrix homeserver.
Matrix is a federated and decentralised instant messaging and VoIP system.
grade: stable
confinement: strict
apps:
matrix-synapse:
command: synctl --no-daemonize start $SNAP_COMMON/homeserver.yaml
stop-command: synctl -c $SNAP_COMMON stop
plugs: [network-bind, network]
daemon: simple
hash-password:
command: hash_password
generate-config:
command: generate_config
generate-signing-key:
command: generate_signing_key
register-new-matrix-user:
command: register_new_matrix_user
plugs: [network]
synctl:
command: synctl
parts:
matrix-synapse:
source: .
plugin: python
python-version: python3
python-packages:
- '.[all]'
- pip
- setuptools
- setuptools-scm
- wheel
build-packages:
- libffi-dev
- libturbojpeg0-dev
- libssl-dev
- libxslt1-dev
- libpq-dev
- zlib1g-dev
stage-packages:
- libasn1-8-heimdal
- libgssapi3-heimdal
- libhcrypto4-heimdal
- libheimbase1-heimdal
- libheimntlm0-heimdal
- libhx509-5-heimdal
- libkrb5-26-heimdal
- libldap-2.4-2
- libpq5
- libsasl2-2

View File

@@ -1,3 +0,0 @@
The documentation for using systemd to manage synapse workers is now part of
the main synapse distribution. See
[docs/systemd-with-workers](https://matrix-org.github.io/synapse/latest/systemd-with-workers/index.html).

View File

@@ -1,18 +0,0 @@
# Setup Synapse with Systemd
This is a setup for managing synapse with a user contributed systemd unit
file. It provides a `matrix-synapse` systemd unit file that should be tailored
to accommodate your installation in accordance with the installation
instructions provided in
[installation instructions](https://matrix-org.github.io/synapse/latest/setup/installation.html).
## Setup
1. Under the service section, ensure the `User` variable matches which user
you installed synapse under and wish to run it as.
2. Under the service section, ensure the `WorkingDirectory` variable matches
where you have installed synapse.
3. Under the service section, ensure the `ExecStart` variable matches the
appropriate locations of your installation.
4. Copy the `matrix-synapse.service` to `/etc/systemd/system/`
5. Start Synapse: `sudo systemctl start matrix-synapse`
6. Verify Synapse is running: `sudo systemctl status matrix-synapse`
7. *optional* Enable Synapse to start at system boot: `sudo systemctl enable matrix-synapse`

View File

@@ -8,7 +8,7 @@ formatters:
filters:
context:
(): synapse.logging.context.LoggingContextFilter
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:

View File

@@ -4,36 +4,28 @@
# systemctl enable matrix-synapse
# systemctl start matrix-synapse
#
# This assumes that Synapse has been installed by a user named
# synapse.
#
# This assumes that Synapse has been installed in a virtualenv in
# the user's home directory: `/home/synapse/synapse/env`.
# /opt/synapse/env.
#
# **NOTE:** This is an example service file that may change in the future. If you
# wish to use this please copy rather than symlink it.
[Unit]
Description=Synapse Matrix homeserver
# If you are using postgresql to persist data, uncomment this line to make sure
# synapse starts after the postgresql service.
# After=postgresql.service
[Service]
Type=notify
NotifyAccess=main
ExecReload=/bin/kill -HUP $MAINPID
Type=simple
Restart=on-abort
User=synapse
Group=nogroup
WorkingDirectory=/home/synapse/synapse
ExecStart=/home/synapse/synapse/env/bin/python -m synapse.app.homeserver --config-path=/home/synapse/synapse/homeserver.yaml
SyslogIdentifier=matrix-synapse
WorkingDirectory=/opt/synapse
ExecStart=/opt/synapse/env/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml
# adjust the cache factor if necessary
# Environment=SYNAPSE_CACHE_FACTOR=2.0
[Install]
WantedBy=multi-user.target

View File

@@ -1,71 +0,0 @@
[Service]
# The following directives give the synapse service R/W access to:
# - /run/matrix-synapse
# - /var/lib/matrix-synapse
# - /var/log/matrix-synapse
RuntimeDirectory=matrix-synapse
StateDirectory=matrix-synapse
LogsDirectory=matrix-synapse
######################
## Security Sandbox ##
######################
# Make sure that the service has its own unshared tmpfs at /tmp and that it
# cannot see or change any real devices
PrivateTmp=true
PrivateDevices=true
# We give no capabilities to a service by default
CapabilityBoundingSet=
AmbientCapabilities=
# Protect the following from modification:
# - The entire filesystem
# - sysctl settings and loaded kernel modules
# - No modifications allowed to Control Groups
# - Hostname
# - System Clock
ProtectSystem=strict
ProtectKernelTunables=true
ProtectKernelModules=true
ProtectControlGroups=true
ProtectClock=true
ProtectHostname=true
# Prevent access to the following:
# - /home directory
# - Kernel logs
ProtectHome=tmpfs
ProtectKernelLogs=true
# Make sure that the process can only see PIDs and process details of itself,
# and the second option disables seeing details of things like system load and
# I/O etc
ProtectProc=invisible
ProcSubset=pid
# While not needed, we set these options explicitly
# - This process has been given access to the host network
# - It can also communicate with any IP Address
PrivateNetwork=false
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
IPAddressAllow=any
# Restrict system calls to a sane bunch
SystemCallArchitectures=native
SystemCallFilter=@system-service
SystemCallFilter=~@privileged @resources @obsolete
# Misc restrictions
# - Since the process is a python process it needs to be able to write and
# execute memory regions, so we set MemoryDenyWriteExecute to false
RestrictSUIDSGID=true
RemoveIPC=true
NoNewPrivileges=true
RestrictRealtime=true
RestrictNamespaces=true
LockPersonality=true
PrivateUsers=true
MemoryDenyWriteExecute=false

View File

@@ -1,33 +0,0 @@
# Creating multiple generic workers with a bash script
Setting up multiple worker configuration files manually can be time-consuming.
You can alternatively create multiple worker configuration files with a simple `bash` script. For example:
```sh
#!/bin/bash
for i in {1..5}
do
cat << EOF > generic_worker$i.yaml
worker_app: synapse.app.generic_worker
worker_name: generic_worker$i
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_main_http_uri: http://localhost:8008/
worker_listeners:
- type: http
port: 808$i
resources:
- names: [client, federation]
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
EOF
done
```
This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085.
Customise the script to your needs.

View File

@@ -1,145 +0,0 @@
# Creating multiple stream writers with a bash script
This script creates multiple [stream writer](https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#stream-writers) workers.
Stream writers require both replication and HTTP listeners.
It also prints out the example lines for Synapse main configuration file.
Remember to route necessary endpoints directly to a worker associated with it.
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
```sh
#!/bin/bash
# Start with these replication and http ports.
# The script loop starts with the exact port and then increments it by one.
REP_START_PORT=8034
HTTP_START_PORT=8044
# Stream writer workers to generate. Feel free to add or remove them as you wish.
# Event persister ("events") isn't included here as it does not require its
# own HTTP listener.
STREAM_WRITERS+=( "presence" "typing" "receipts" "to_device" "account_data" )
NUM_WRITERS=$(expr ${#STREAM_WRITERS[@]})
i=0
while [ $i -lt "$NUM_WRITERS" ]
do
cat << EOF > ${STREAM_WRITERS[$i]}_stream_writer.yaml
worker_app: synapse.app.generic_worker
worker_name: ${STREAM_WRITERS[$i]}_stream_writer
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: $(expr $REP_START_PORT + $i)
resources:
- names: [replication]
- type: http
port: $(expr $HTTP_START_PORT + $i)
resources:
- names: [client]
worker_log_config: /etc/matrix-synapse/stream-writer-log.yaml
EOF
HOMESERVER_YAML_INSTANCE_MAP+=$" ${STREAM_WRITERS[$i]}_stream_writer:
host: 127.0.0.1
port: $(expr $REP_START_PORT + $i)
"
HOMESERVER_YAML_STREAM_WRITERS+=$" ${STREAM_WRITERS[$i]}: ${STREAM_WRITERS[$i]}_stream_writer
"
((i++))
done
cat << EXAMPLECONFIG
# Add these lines to your homeserver.yaml.
# Don't forget to configure your reverse proxy and
# necessary endpoints to their respective worker.
# See https://github.com/matrix-org/synapse/blob/develop/docs/workers.md
# for more information.
# Remember: Under NO circumstances should the replication
# listener be exposed to the public internet;
# it has no authentication and is unencrypted.
instance_map:
$HOMESERVER_YAML_INSTANCE_MAP
stream_writers:
$HOMESERVER_YAML_STREAM_WRITERS
EXAMPLECONFIG
```
Copy the code above save it to a file ```create_stream_writers.sh``` (for example).
Make the script executable by running ```chmod +x create_stream_writers.sh```.
## Run the script to create workers and print out a sample configuration
Simply run the script to create YAML files in the current folder and print out the required configuration for ```homeserver.yaml```.
```console
$ ./create_stream_writers.sh
# Add these lines to your homeserver.yaml.
# Don't forget to configure your reverse proxy and
# necessary endpoints to their respective worker.
# See https://github.com/matrix-org/synapse/blob/develop/docs/workers.md
# for more information
# Remember: Under NO circumstances should the replication
# listener be exposed to the public internet;
# it has no authentication and is unencrypted.
instance_map:
presence_stream_writer:
host: 127.0.0.1
port: 8034
typing_stream_writer:
host: 127.0.0.1
port: 8035
receipts_stream_writer:
host: 127.0.0.1
port: 8036
to_device_stream_writer:
host: 127.0.0.1
port: 8037
account_data_stream_writer:
host: 127.0.0.1
port: 8038
stream_writers:
presence: presence_stream_writer
typing: typing_stream_writer
receipts: receipts_stream_writer
to_device: to_device_stream_writer
account_data: account_data_stream_writer
```
Simply copy-and-paste the output to an appropriate place in your Synapse main configuration file.
## Write directly to Synapse configuration file
You could also write the output directly to homeserver main configuration file. **This, however, is not recommended** as even a small typo (such as replacing >> with >) can erase the entire ```homeserver.yaml```.
If you do this, back up your original configuration file first:
```console
# Back up homeserver.yaml first
cp /etc/matrix-synapse/homeserver.yaml /etc/matrix-synapse/homeserver.yaml.bak
# Create workers and write output to your homeserver.yaml
./create_stream_writers.sh >> /etc/matrix-synapse/homeserver.yaml
```

View File

@@ -15,7 +15,7 @@ export DH_VIRTUALENV_INSTALL_ROOT=/opt/venvs
# python won't look in the right directory. At least this way, the error will
# be a *bit* more obvious.
#
SNAKE=$(readlink -e /usr/bin/python3)
SNAKE=`readlink -e /usr/bin/python3`
# try to set the CFLAGS so any compiled C extensions are compiled with the most
# generic as possible x64 instructions, so that compiling it on a new Intel chip
@@ -24,73 +24,43 @@ SNAKE=$(readlink -e /usr/bin/python3)
# TODO: add similar things for non-amd64, or figure out a more generic way to
# do this.
case $(dpkg-architecture -q DEB_HOST_ARCH) in
case `dpkg-architecture -q DEB_HOST_ARCH` in
amd64)
export CFLAGS=-march=x86-64
;;
esac
# Manually install Poetry and export a pip-compatible `requirements.txt`
# We need a Poetry pre-release as the export command is buggy in < 1.2
TEMP_VENV="$(mktemp -d)"
python3 -m venv "$TEMP_VENV"
source "$TEMP_VENV/bin/activate"
pip install -U pip
pip install poetry==1.2.0
poetry export \
--extras all \
--extras test \
--extras systemd \
-o exported_requirements.txt
deactivate
rm -rf "$TEMP_VENV"
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
# than the 2/3 compatible `virtualenv`.
# Use --no-deps to only install pinned versions in exported_requirements.txt,
# and to avoid https://github.com/pypa/pip/issues/9644
dh_virtualenv \
--install-suffix "matrix-synapse" \
--builtin-venv \
--setuptools \
--python "$SNAKE" \
--upgrade-pip \
--preinstall="lxml" \
--preinstall="mock" \
--preinstall="wheel" \
--extra-pip-arg="--no-deps" \
--extra-pip-arg="--no-cache-dir" \
--extra-pip-arg="--compile" \
--extras="all,systemd,test" \
--requirements="exported_requirements.txt"
--extras="all"
PACKAGE_BUILD_DIR="$(pwd)/debian/matrix-synapse-py3"
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
case "$DEB_BUILD_OPTIONS" in
*nocheck*)
# Skip running tests if "nocheck" present in $DEB_BUILD_OPTIONS
;;
# we copy the tests to a temporary directory so that we can put them on the
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
tmpdir=`mktemp -d`
trap "rm -r $tmpdir" EXIT
*)
# Copy tests to a temporary directory so that we can put them on the
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
tmpdir=$(mktemp -d)
trap 'rm -r $tmpdir' EXIT
cp -r tests "$tmpdir"
cp -r tests "$tmpdir"
# To avoid pulling in the unbuilt Synapse in the local directory
pushd /
PYTHONPATH="$tmpdir" \
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
popd
;;
esac
PYTHONPATH="$tmpdir" \
"${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
# build the config file
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
--config-dir="/etc/matrix-synapse" \
--data-dir="/var/lib/matrix-synapse" |
perl -pe '
@@ -115,25 +85,7 @@ esac
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
# build the log config file
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_log_config" \
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
# add a dependency on the right version of python to substvars.
PYPKG=$(basename "$SNAKE")
PYPKG=`basename $SNAKE`
echo "synapse:pydepends=$PYPKG" >> debian/matrix-synapse-py3.substvars
# add a couple of triggers. This is needed so that dh-virtualenv can rebuild
# the venv when the system python changes (see
# https://dh-virtualenv.readthedocs.io/en/latest/tutorial.html#step-2-set-up-packaging-for-your-project)
#
# we do it here rather than the more conventional way of just adding it to
# debian/matrix-synapse-py3.triggers, because we need to add a trigger on the
# right version of python.
cat >>"debian/.debhelper/generated/matrix-synapse-py3/triggers" <<EOF
# triggers for dh-virtualenv
interest-noawait $SNAKE
interest dh-virtualenv-interpreter-update
EOF

1251
debian/changelog vendored

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More