mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
1 Commits
v1.39.0
...
erikj/test
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa2fe082ae |
@@ -3,7 +3,7 @@
|
|||||||
# CI's Docker setup at the point where this file is considered.
|
# CI's Docker setup at the point where this file is considered.
|
||||||
server_name: "localhost:8800"
|
server_name: "localhost:8800"
|
||||||
|
|
||||||
signing_key_path: ".buildkite/test.signing.key"
|
signing_key_path: "/src/.buildkite/test.signing.key"
|
||||||
|
|
||||||
report_stats: false
|
report_stats: false
|
||||||
|
|
||||||
@@ -16,4 +16,6 @@ database:
|
|||||||
database: synapse
|
database: synapse
|
||||||
|
|
||||||
# Suppress the key server warning.
|
# Suppress the key server warning.
|
||||||
trusted_key_servers: []
|
trusted_key_servers:
|
||||||
|
- server_name: "matrix.org"
|
||||||
|
suppress_key_server_warning: true
|
||||||
|
|||||||
37
.buildkite/scripts/create_postgres_db.py
Executable file
37
.buildkite/scripts/create_postgres_db.py
Executable file
@@ -0,0 +1,37 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
|
||||||
|
logger = logging.getLogger("create_postgres_db")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Create a PostgresEngine.
|
||||||
|
db_engine = create_engine({"name": "psycopg2", "args": {}})
|
||||||
|
|
||||||
|
# Connect to postgres to create the base database.
|
||||||
|
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||||
|
# doesn't exist yet.
|
||||||
|
db_conn = db_engine.module.connect(
|
||||||
|
user="postgres", host="postgres", password="postgres", dbname="postgres"
|
||||||
|
)
|
||||||
|
db_conn.autocommit = True
|
||||||
|
cur = db_conn.cursor()
|
||||||
|
cur.execute("CREATE DATABASE synapse;")
|
||||||
|
cur.close()
|
||||||
|
db_conn.close()
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import psycopg2
|
|
||||||
|
|
||||||
# a very simple replacment for `psql`, to make up for the lack of the postgres client
|
|
||||||
# libraries in the synapse docker image.
|
|
||||||
|
|
||||||
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
|
||||||
# doesn't exist yet.
|
|
||||||
db_conn = psycopg2.connect(
|
|
||||||
user="postgres", host="postgres", password="postgres", dbname="postgres"
|
|
||||||
)
|
|
||||||
db_conn.autocommit = True
|
|
||||||
cur = db_conn.cursor()
|
|
||||||
for c in sys.argv[1:]:
|
|
||||||
cur.execute(c)
|
|
||||||
11
.buildkite/scripts/setup_old_deps.sh
Executable file
11
.buildkite/scripts/setup_old_deps.sh
Executable file
@@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# this script is run by buildkite in a plain `xenial` container; it installs the
|
||||||
|
# minimal requirements for tox and hands over to the py35-old tox environment.
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox
|
||||||
|
|
||||||
|
export LANG="C.UTF-8"
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# this script is run by buildkite in a plain `bionic` container; it installs the
|
|
||||||
# minimal requirements for tox and hands over to the py3-old tox environment.
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox
|
|
||||||
|
|
||||||
export LANG="C.UTF-8"
|
|
||||||
|
|
||||||
# Prevent virtualenv from auto-updating pip to an incompatible version
|
|
||||||
export VIRTUALENV_NO_DOWNLOAD=1
|
|
||||||
|
|
||||||
exec tox -e py3-old,combine
|
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
#
|
#
|
||||||
# Test script for 'synapse_port_db'.
|
# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
|
||||||
# - sets up synapse and deps
|
# with additional dependencies needed for the test (such as coverage or the PostgreSQL
|
||||||
# - runs the port script on a prepopulated test sqlite db
|
# driver), update the schema of the test SQLite database and run background updates on it,
|
||||||
# - also runs it against an new sqlite db
|
# create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to
|
||||||
|
# test porting the SQLite database to the PostgreSQL database (with coverage).
|
||||||
|
|
||||||
set -xe
|
set -xe
|
||||||
cd `dirname $0`/../..
|
cd `dirname $0`/../..
|
||||||
@@ -22,36 +22,15 @@ echo "--- Generate the signing key"
|
|||||||
# Generate the server's signing key.
|
# Generate the server's signing key.
|
||||||
python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
|
python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
|
||||||
|
|
||||||
echo "--- Prepare test database"
|
echo "--- Prepare the databases"
|
||||||
|
|
||||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||||
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||||
|
|
||||||
# Create the PostgreSQL database.
|
# Create the PostgreSQL database.
|
||||||
./.buildkite/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
./.buildkite/scripts/create_postgres_db.py
|
||||||
|
|
||||||
echo "+++ Run synapse_port_db against test database"
|
echo "+++ Run synapse_port_db"
|
||||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
|
||||||
|
# Run the script
|
||||||
# We should be able to run twice against the same database.
|
|
||||||
echo "+++ Run synapse_port_db a second time"
|
|
||||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
|
||||||
|
|
||||||
#####
|
|
||||||
|
|
||||||
# Now do the same again, on an empty database.
|
|
||||||
|
|
||||||
echo "--- Prepare empty SQLite database"
|
|
||||||
|
|
||||||
# we do this by deleting the sqlite db, and then doing the same again.
|
|
||||||
rm .buildkite/test_db.db
|
|
||||||
|
|
||||||
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
|
||||||
|
|
||||||
# re-create the PostgreSQL database.
|
|
||||||
./.buildkite/scripts/postgres_exec.py \
|
|
||||||
"DROP DATABASE synapse" \
|
|
||||||
"CREATE DATABASE synapse"
|
|
||||||
|
|
||||||
echo "+++ Run synapse_port_db against empty database"
|
|
||||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
# schema and run background updates on it.
|
# schema and run background updates on it.
|
||||||
server_name: "localhost:8800"
|
server_name: "localhost:8800"
|
||||||
|
|
||||||
signing_key_path: ".buildkite/test.signing.key"
|
signing_key_path: "/src/.buildkite/test.signing.key"
|
||||||
|
|
||||||
report_stats: false
|
report_stats: false
|
||||||
|
|
||||||
@@ -13,4 +13,6 @@ database:
|
|||||||
database: ".buildkite/test_db.db"
|
database: ".buildkite/test_db.db"
|
||||||
|
|
||||||
# Suppress the key server warning.
|
# Suppress the key server warning.
|
||||||
trusted_key_servers: []
|
trusted_key_servers:
|
||||||
|
- server_name: "matrix.org"
|
||||||
|
suppress_key_server_warning: true
|
||||||
|
|||||||
78
.circleci/config.yml
Normal file
78
.circleci/config.yml
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
version: 2.1
|
||||||
|
jobs:
|
||||||
|
dockerhubuploadrelease:
|
||||||
|
docker:
|
||||||
|
- image: docker:git
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- docker_prepare
|
||||||
|
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||||
|
# for release builds, we want to get the amd64 image out asap, so first
|
||||||
|
# we do an amd64-only build, before following up with a multiarch build.
|
||||||
|
- docker_build:
|
||||||
|
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
|
||||||
|
platforms: linux/amd64
|
||||||
|
- docker_build:
|
||||||
|
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
|
||||||
|
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||||
|
|
||||||
|
dockerhubuploadlatest:
|
||||||
|
docker:
|
||||||
|
- image: docker:git
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- docker_prepare
|
||||||
|
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||||
|
# for `latest`, we don't want the arm images to disappear, so don't update the tag
|
||||||
|
# until all of the platforms are built.
|
||||||
|
- docker_build:
|
||||||
|
tag: -t matrixdotorg/synapse:latest
|
||||||
|
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||||
|
|
||||||
|
workflows:
|
||||||
|
build:
|
||||||
|
jobs:
|
||||||
|
- dockerhubuploadrelease:
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only: /v[0-9].[0-9]+.[0-9]+.*/
|
||||||
|
branches:
|
||||||
|
ignore: /.*/
|
||||||
|
- dockerhubuploadlatest:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only: master
|
||||||
|
|
||||||
|
commands:
|
||||||
|
docker_prepare:
|
||||||
|
description: Sets up a remote docker server, downloads the buildx cli plugin, and enables multiarch images
|
||||||
|
parameters:
|
||||||
|
buildx_version:
|
||||||
|
type: string
|
||||||
|
default: "v0.4.1"
|
||||||
|
steps:
|
||||||
|
- setup_remote_docker:
|
||||||
|
# 19.03.13 was the most recent available on circleci at the time of
|
||||||
|
# writing.
|
||||||
|
version: 19.03.13
|
||||||
|
- run: apk add --no-cache curl
|
||||||
|
- run: mkdir -vp ~/.docker/cli-plugins/ ~/dockercache
|
||||||
|
- run: curl --silent -L "https://github.com/docker/buildx/releases/download/<< parameters.buildx_version >>/buildx-<< parameters.buildx_version >>.linux-amd64" > ~/.docker/cli-plugins/docker-buildx
|
||||||
|
- run: chmod a+x ~/.docker/cli-plugins/docker-buildx
|
||||||
|
# install qemu links in /proc/sys/fs/binfmt_misc on the docker instance running the circleci job
|
||||||
|
- run: docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||||
|
# create a context named `builder` for the builds
|
||||||
|
- run: docker context create builder
|
||||||
|
# create a buildx builder using the new context, and set it as the default
|
||||||
|
- run: docker buildx create builder --use
|
||||||
|
|
||||||
|
docker_build:
|
||||||
|
description: Builds and pushed images to dockerhub using buildx
|
||||||
|
parameters:
|
||||||
|
platforms:
|
||||||
|
type: string
|
||||||
|
default: linux/amd64
|
||||||
|
tag:
|
||||||
|
type: string
|
||||||
|
steps:
|
||||||
|
- run: docker buildx build -f docker/Dockerfile --push --platform << parameters.platforms >> --label gitsha1=${CIRCLE_SHA1} << parameters.tag >> --progress=plain .
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
# Black reformatting (#5482).
|
|
||||||
32e7c9e7f20b57dd081023ac42d6931a8da9b3a3
|
|
||||||
|
|
||||||
# Target Python 3.5 with black (#8664).
|
|
||||||
aff1eb7c671b0a3813407321d2702ec46c71fa56
|
|
||||||
|
|
||||||
# Update black to 20.8b1 (#9381).
|
|
||||||
0a00b7ff14890987f09112a2ae696c61001e6cf1
|
|
||||||
72
.github/workflows/docker.yml
vendored
72
.github/workflows/docker.yml
vendored
@@ -1,72 +0,0 @@
|
|||||||
# GitHub actions workflow which builds and publishes the docker images.
|
|
||||||
|
|
||||||
name: Build docker images
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags: ["v*"]
|
|
||||||
branches: [ master, main ]
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Set up QEMU
|
|
||||||
id: qemu
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
with:
|
|
||||||
platforms: arm64
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
id: buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
|
|
||||||
- name: Inspect builder
|
|
||||||
run: docker buildx inspect
|
|
||||||
|
|
||||||
- name: Log in to DockerHub
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Calculate docker image tag
|
|
||||||
id: set-tag
|
|
||||||
run: |
|
|
||||||
case "${GITHUB_REF}" in
|
|
||||||
refs/heads/master|refs/heads/main)
|
|
||||||
tag=latest
|
|
||||||
;;
|
|
||||||
refs/tags/*)
|
|
||||||
tag=${GITHUB_REF#refs/tags/}
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
tag=${GITHUB_SHA}
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo "::set-output name=tag::$tag"
|
|
||||||
|
|
||||||
# for release builds, we want to get the amd64 image out asap, so first
|
|
||||||
# we do an amd64-only build, before following up with a multiarch build.
|
|
||||||
- name: Build and push amd64
|
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
if: "${{ startsWith(github.ref, 'refs/tags/v') }}"
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
labels: "gitsha1=${{ github.sha }}"
|
|
||||||
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
|
|
||||||
file: "docker/Dockerfile"
|
|
||||||
platforms: linux/amd64
|
|
||||||
|
|
||||||
- name: Build and push all platforms
|
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
labels: "gitsha1=${{ github.sha }}"
|
|
||||||
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
|
|
||||||
file: "docker/Dockerfile"
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
66
.github/workflows/docs.yaml
vendored
66
.github/workflows/docs.yaml
vendored
@@ -1,66 +0,0 @@
|
|||||||
name: Deploy the documentation
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
# For bleeding-edge documentation
|
|
||||||
- develop
|
|
||||||
# For documentation specific to a release
|
|
||||||
- 'release-v*'
|
|
||||||
# stable docs
|
|
||||||
- master
|
|
||||||
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
pages:
|
|
||||||
name: GitHub Pages
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Setup mdbook
|
|
||||||
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
|
|
||||||
with:
|
|
||||||
mdbook-version: '0.4.9'
|
|
||||||
|
|
||||||
- name: Build the documentation
|
|
||||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
|
||||||
# However, we're using docs/README.md for other purposes and need to pick a new page
|
|
||||||
# as the default. Let's opt for the welcome page instead.
|
|
||||||
run: |
|
|
||||||
mdbook build
|
|
||||||
cp book/welcome_and_overview.html book/index.html
|
|
||||||
|
|
||||||
# Figure out the target directory.
|
|
||||||
#
|
|
||||||
# The target directory depends on the name of the branch
|
|
||||||
#
|
|
||||||
- name: Get the target directory name
|
|
||||||
id: vars
|
|
||||||
run: |
|
|
||||||
# first strip the 'refs/heads/' prefix with some shell foo
|
|
||||||
branch="${GITHUB_REF#refs/heads/}"
|
|
||||||
|
|
||||||
case $branch in
|
|
||||||
release-*)
|
|
||||||
# strip 'release-' from the name for release branches.
|
|
||||||
branch="${branch#release-}"
|
|
||||||
;;
|
|
||||||
master)
|
|
||||||
# deploy to "latest" for the master branch.
|
|
||||||
branch="latest"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# finally, set the 'branch-version' var.
|
|
||||||
echo "::set-output name=branch-version::$branch"
|
|
||||||
|
|
||||||
# Deploy to the target directory.
|
|
||||||
- name: Deploy to gh pages
|
|
||||||
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
|
|
||||||
with:
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
keep_files: true
|
|
||||||
publish_dir: ./book
|
|
||||||
destination_dir: ./${{ steps.vars.outputs.branch-version }}
|
|
||||||
95
.github/workflows/release-artifacts.yml
vendored
95
.github/workflows/release-artifacts.yml
vendored
@@ -1,95 +0,0 @@
|
|||||||
# GitHub actions workflow which builds the release artifacts.
|
|
||||||
|
|
||||||
name: Build release artifacts
|
|
||||||
|
|
||||||
on:
|
|
||||||
# we build on PRs and develop to (hopefully) get early warning
|
|
||||||
# of things breaking (but only build one set of debs)
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches: ["develop"]
|
|
||||||
|
|
||||||
# we do the full build on tags.
|
|
||||||
tags: ["v*"]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
get-distros:
|
|
||||||
name: "Calculate list of debian distros"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
- id: set-distros
|
|
||||||
run: |
|
|
||||||
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
|
|
||||||
dists='["debian:sid"]'
|
|
||||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
|
||||||
dists=$(scripts-dev/build_debian_packages --show-dists-json)
|
|
||||||
fi
|
|
||||||
echo "::set-output name=distros::$dists"
|
|
||||||
# map the step outputs to job outputs
|
|
||||||
outputs:
|
|
||||||
distros: ${{ steps.set-distros.outputs.distros }}
|
|
||||||
|
|
||||||
# now build the packages with a matrix build.
|
|
||||||
build-debs:
|
|
||||||
needs: get-distros
|
|
||||||
name: "Build .deb packages"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
distro: ${{ fromJson(needs.get-distros.outputs.distros) }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
path: src
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
- run: ./src/scripts-dev/build_debian_packages "${{ matrix.distro }}"
|
|
||||||
- uses: actions/upload-artifact@v2
|
|
||||||
with:
|
|
||||||
name: debs
|
|
||||||
path: debs/*
|
|
||||||
|
|
||||||
build-sdist:
|
|
||||||
name: "Build pypi distribution files"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
- run: pip install wheel
|
|
||||||
- run: |
|
|
||||||
python setup.py sdist bdist_wheel
|
|
||||||
- uses: actions/upload-artifact@v2
|
|
||||||
with:
|
|
||||||
name: python-dist
|
|
||||||
path: dist/*
|
|
||||||
|
|
||||||
# if it's a tag, create a release and attach the artifacts to it
|
|
||||||
attach-assets:
|
|
||||||
name: "Attach assets to release"
|
|
||||||
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
|
|
||||||
needs:
|
|
||||||
- build-debs
|
|
||||||
- build-sdist
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Download all workflow run artifacts
|
|
||||||
uses: actions/download-artifact@v2
|
|
||||||
- name: Build a tarball for the debs
|
|
||||||
run: tar -cvJf debs.tar.xz debs
|
|
||||||
- name: Attach to release
|
|
||||||
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
files: |
|
|
||||||
python-dist/*
|
|
||||||
debs.tar.xz
|
|
||||||
# if it's not already published, keep the release as a draft.
|
|
||||||
draft: true
|
|
||||||
# mark it as a prerelease if the tag contains 'rc'.
|
|
||||||
prerelease: ${{ contains(github.ref, 'rc') }}
|
|
||||||
358
.github/workflows/tests.yml
vendored
358
.github/workflows/tests.yml
vendored
@@ -1,358 +0,0 @@
|
|||||||
name: Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: ["develop", "release-*"]
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
lint:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
toxenv:
|
|
||||||
- "check-sampleconfig"
|
|
||||||
- "check_codestyle"
|
|
||||||
- "check_isort"
|
|
||||||
- "mypy"
|
|
||||||
- "packaging"
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
- run: pip install tox
|
|
||||||
- run: tox -e ${{ matrix.toxenv }}
|
|
||||||
|
|
||||||
lint-crlf:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Check line endings
|
|
||||||
run: scripts-dev/check_line_terminators.sh
|
|
||||||
|
|
||||||
lint-newsfile:
|
|
||||||
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
# Note: This and the script can be simplified once we drop Buildkite. See:
|
|
||||||
# https://github.com/actions/checkout/issues/266#issuecomment-638346893
|
|
||||||
# https://github.com/actions/checkout/issues/416
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
|
||||||
fetch-depth: 0
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
- run: pip install tox
|
|
||||||
- name: Patch Buildkite-specific test script
|
|
||||||
run: |
|
|
||||||
sed -i -e 's/\$BUILDKITE_PULL_REQUEST/${{ github.event.number }}/' \
|
|
||||||
scripts-dev/check-newsfragment
|
|
||||||
- run: scripts-dev/check-newsfragment
|
|
||||||
|
|
||||||
lint-sdist:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: "3.x"
|
|
||||||
- run: pip install wheel
|
|
||||||
- run: python setup.py sdist bdist_wheel
|
|
||||||
- uses: actions/upload-artifact@v2
|
|
||||||
with:
|
|
||||||
name: Python Distributions
|
|
||||||
path: dist/*
|
|
||||||
|
|
||||||
# Dummy step to gate other tests on without repeating the whole list
|
|
||||||
linting-done:
|
|
||||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
|
||||||
needs: [lint, lint-crlf, lint-newsfile, lint-sdist]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- run: "true"
|
|
||||||
|
|
||||||
trial:
|
|
||||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
|
||||||
needs: linting-done
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
python-version: ["3.6", "3.7", "3.8", "3.9"]
|
|
||||||
database: ["sqlite"]
|
|
||||||
include:
|
|
||||||
# Newest Python without optional deps
|
|
||||||
- python-version: "3.9"
|
|
||||||
toxenv: "py-noextras,combine"
|
|
||||||
|
|
||||||
# Oldest Python with PostgreSQL
|
|
||||||
- python-version: "3.6"
|
|
||||||
database: "postgres"
|
|
||||||
postgres-version: "9.6"
|
|
||||||
|
|
||||||
# Newest Python with PostgreSQL
|
|
||||||
- python-version: "3.9"
|
|
||||||
database: "postgres"
|
|
||||||
postgres-version: "13"
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- run: sudo apt-get -qq install xmlsec1
|
|
||||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
|
||||||
if: ${{ matrix.postgres-version }}
|
|
||||||
run: |
|
|
||||||
docker run -d -p 5432:5432 \
|
|
||||||
-e POSTGRES_PASSWORD=postgres \
|
|
||||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
|
||||||
postgres:${{ matrix.postgres-version }}
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
- run: pip install tox
|
|
||||||
- name: Await PostgreSQL
|
|
||||||
if: ${{ matrix.postgres-version }}
|
|
||||||
timeout-minutes: 2
|
|
||||||
run: until pg_isready -h localhost; do sleep 1; done
|
|
||||||
- run: tox -e py,combine
|
|
||||||
env:
|
|
||||||
TRIAL_FLAGS: "--jobs=2"
|
|
||||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
|
||||||
SYNAPSE_POSTGRES_HOST: localhost
|
|
||||||
SYNAPSE_POSTGRES_USER: postgres
|
|
||||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
|
||||||
- name: Dump logs
|
|
||||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
|
||||||
# This keeps logs colocated with failing jobs
|
|
||||||
# It also ignores find's exit code; this is a best effort affair
|
|
||||||
run: >-
|
|
||||||
find _trial_temp -name '*.log'
|
|
||||||
-exec echo "::group::{}" \;
|
|
||||||
-exec cat {} \;
|
|
||||||
-exec echo "::endgroup::" \;
|
|
||||||
|| true
|
|
||||||
|
|
||||||
trial-olddeps:
|
|
||||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
|
||||||
needs: linting-done
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Test with old deps
|
|
||||||
uses: docker://ubuntu:bionic # For old python and sqlite
|
|
||||||
with:
|
|
||||||
workdir: /github/workspace
|
|
||||||
entrypoint: .buildkite/scripts/test_old_deps.sh
|
|
||||||
env:
|
|
||||||
TRIAL_FLAGS: "--jobs=2"
|
|
||||||
- name: Dump logs
|
|
||||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
|
||||||
# This keeps logs colocated with failing jobs
|
|
||||||
# It also ignores find's exit code; this is a best effort affair
|
|
||||||
run: >-
|
|
||||||
find _trial_temp -name '*.log'
|
|
||||||
-exec echo "::group::{}" \;
|
|
||||||
-exec cat {} \;
|
|
||||||
-exec echo "::endgroup::" \;
|
|
||||||
|| true
|
|
||||||
|
|
||||||
trial-pypy:
|
|
||||||
# Very slow; only run if the branch name includes 'pypy'
|
|
||||||
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
|
|
||||||
needs: linting-done
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
python-version: ["pypy-3.6"]
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
- run: pip install tox
|
|
||||||
- run: tox -e py,combine
|
|
||||||
env:
|
|
||||||
TRIAL_FLAGS: "--jobs=2"
|
|
||||||
- name: Dump logs
|
|
||||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
|
||||||
# This keeps logs colocated with failing jobs
|
|
||||||
# It also ignores find's exit code; this is a best effort affair
|
|
||||||
run: >-
|
|
||||||
find _trial_temp -name '*.log'
|
|
||||||
-exec echo "::group::{}" \;
|
|
||||||
-exec cat {} \;
|
|
||||||
-exec echo "::endgroup::" \;
|
|
||||||
|| true
|
|
||||||
|
|
||||||
sytest:
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
needs: linting-done
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
container:
|
|
||||||
image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }}
|
|
||||||
volumes:
|
|
||||||
- ${{ github.workspace }}:/src
|
|
||||||
env:
|
|
||||||
BUILDKITE_BRANCH: ${{ github.head_ref }}
|
|
||||||
POSTGRES: ${{ matrix.postgres && 1}}
|
|
||||||
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
|
|
||||||
WORKERS: ${{ matrix.workers && 1 }}
|
|
||||||
REDIS: ${{ matrix.redis && 1 }}
|
|
||||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
|
||||||
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- sytest-tag: bionic
|
|
||||||
|
|
||||||
- sytest-tag: bionic
|
|
||||||
postgres: postgres
|
|
||||||
|
|
||||||
- sytest-tag: testing
|
|
||||||
postgres: postgres
|
|
||||||
|
|
||||||
- sytest-tag: bionic
|
|
||||||
postgres: multi-postgres
|
|
||||||
workers: workers
|
|
||||||
|
|
||||||
- sytest-tag: buster
|
|
||||||
postgres: multi-postgres
|
|
||||||
workers: workers
|
|
||||||
|
|
||||||
- sytest-tag: buster
|
|
||||||
postgres: postgres
|
|
||||||
workers: workers
|
|
||||||
redis: redis
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Prepare test blacklist
|
|
||||||
run: cat sytest-blacklist .buildkite/worker-blacklist > synapse-blacklist-with-workers
|
|
||||||
- name: Run SyTest
|
|
||||||
run: /bootstrap.sh synapse
|
|
||||||
working-directory: /src
|
|
||||||
- name: Summarise results.tap
|
|
||||||
if: ${{ always() }}
|
|
||||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
|
||||||
- name: Upload SyTest logs
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: ${{ always() }}
|
|
||||||
with:
|
|
||||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
|
||||||
path: |
|
|
||||||
/logs/results.tap
|
|
||||||
/logs/**/*.log*
|
|
||||||
|
|
||||||
portdb:
|
|
||||||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
|
||||||
needs: linting-done
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- python-version: "3.6"
|
|
||||||
postgres-version: "9.6"
|
|
||||||
|
|
||||||
- python-version: "3.9"
|
|
||||||
postgres-version: "13"
|
|
||||||
|
|
||||||
services:
|
|
||||||
postgres:
|
|
||||||
image: postgres:${{ matrix.postgres-version }}
|
|
||||||
ports:
|
|
||||||
- 5432:5432
|
|
||||||
env:
|
|
||||||
POSTGRES_PASSWORD: "postgres"
|
|
||||||
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
|
|
||||||
options: >-
|
|
||||||
--health-cmd pg_isready
|
|
||||||
--health-interval 10s
|
|
||||||
--health-timeout 5s
|
|
||||||
--health-retries 5
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- run: sudo apt-get -qq install xmlsec1
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
- name: Patch Buildkite-specific test scripts
|
|
||||||
run: |
|
|
||||||
sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/postgres_exec.py
|
|
||||||
sed -i -e 's/host: postgres/host: localhost/' .buildkite/postgres-config.yaml
|
|
||||||
sed -i -e 's|/src/||' .buildkite/{sqlite,postgres}-config.yaml
|
|
||||||
sed -i -e 's/\$TOP/\$GITHUB_WORKSPACE/' .coveragerc
|
|
||||||
- run: .buildkite/scripts/test_synapse_port_db.sh
|
|
||||||
|
|
||||||
complement:
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
needs: linting-done
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
container:
|
|
||||||
# https://github.com/matrix-org/complement/blob/master/dockerfiles/ComplementCIBuildkite.Dockerfile
|
|
||||||
image: matrixdotorg/complement:latest
|
|
||||||
env:
|
|
||||||
CI: true
|
|
||||||
ports:
|
|
||||||
- 8448:8448
|
|
||||||
volumes:
|
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Run actions/checkout@v2 for synapse
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
path: synapse
|
|
||||||
|
|
||||||
# Attempt to check out the same branch of Complement as the PR. If it
|
|
||||||
# doesn't exist, fallback to master.
|
|
||||||
- name: Checkout complement
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
mkdir -p complement
|
|
||||||
# Attempt to use the version of complement which best matches the current
|
|
||||||
# build. Depending on whether this is a PR or release, etc. we need to
|
|
||||||
# use different fallbacks.
|
|
||||||
#
|
|
||||||
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
|
|
||||||
# for pull requests, otherwise GITHUB_REF).
|
|
||||||
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
|
|
||||||
# (GITHUB_BASE_REF for pull requests).
|
|
||||||
# 3. Use the default complement branch ("master").
|
|
||||||
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "master"; do
|
|
||||||
# Skip empty branch names and merge commits.
|
|
||||||
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
|
|
||||||
done
|
|
||||||
|
|
||||||
# Build initial Synapse image
|
|
||||||
- run: docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile .
|
|
||||||
working-directory: synapse
|
|
||||||
|
|
||||||
# Build a ready-to-run Synapse image based on the initial image above.
|
|
||||||
# This new image includes a config file, keys for signing and TLS, and
|
|
||||||
# other settings to make it suitable for testing under Complement.
|
|
||||||
- run: docker build -t complement-synapse -f Synapse.Dockerfile .
|
|
||||||
working-directory: complement/dockerfiles
|
|
||||||
|
|
||||||
# Run Complement
|
|
||||||
- run: go test -v -tags synapse_blacklist,msc2403,msc2946,msc3083 ./tests
|
|
||||||
env:
|
|
||||||
COMPLEMENT_BASE_IMAGE: complement-synapse:latest
|
|
||||||
working-directory: complement
|
|
||||||
|
|
||||||
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
|
|
||||||
tests-done:
|
|
||||||
needs:
|
|
||||||
- trial
|
|
||||||
- trial-olddeps
|
|
||||||
- sytest
|
|
||||||
- portdb
|
|
||||||
- complement
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- run: "true"
|
|
||||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -6,14 +6,13 @@
|
|||||||
*.egg
|
*.egg
|
||||||
*.egg-info
|
*.egg-info
|
||||||
*.lock
|
*.lock
|
||||||
*.py[cod]
|
*.pyc
|
||||||
*.snap
|
*.snap
|
||||||
*.tac
|
*.tac
|
||||||
_trial_temp/
|
_trial_temp/
|
||||||
_trial_temp*/
|
_trial_temp*/
|
||||||
/out
|
/out
|
||||||
.DS_Store
|
.DS_Store
|
||||||
__pycache__/
|
|
||||||
|
|
||||||
# stuff that is likely to exist when you run a server locally
|
# stuff that is likely to exist when you run a server locally
|
||||||
/*.db
|
/*.db
|
||||||
@@ -46,6 +45,3 @@ __pycache__/
|
|||||||
/docs/build/
|
/docs/build/
|
||||||
/htmlcov
|
/htmlcov
|
||||||
/pip-wheel-metadata/
|
/pip-wheel-metadata/
|
||||||
|
|
||||||
# docs
|
|
||||||
book/
|
|
||||||
|
|||||||
1312
CHANGES.md
1312
CHANGES.md
File diff suppressed because it is too large
Load Diff
278
CONTRIBUTING.md
278
CONTRIBUTING.md
@@ -1,31 +1,4 @@
|
|||||||
Welcome to Synapse
|
# Contributing code to Synapse
|
||||||
|
|
||||||
This document aims to get you started with contributing to this repo!
|
|
||||||
|
|
||||||
- [1. Who can contribute to Synapse?](#1-who-can-contribute-to-synapse)
|
|
||||||
- [2. What do I need?](#2-what-do-i-need)
|
|
||||||
- [3. Get the source.](#3-get-the-source)
|
|
||||||
- [4. Install the dependencies](#4-install-the-dependencies)
|
|
||||||
* [Under Unix (macOS, Linux, BSD, ...)](#under-unix-macos-linux-bsd-)
|
|
||||||
* [Under Windows](#under-windows)
|
|
||||||
- [5. Get in touch.](#5-get-in-touch)
|
|
||||||
- [6. Pick an issue.](#6-pick-an-issue)
|
|
||||||
- [7. Turn coffee and documentation into code and documentation!](#7-turn-coffee-and-documentation-into-code-and-documentation)
|
|
||||||
- [8. Test, test, test!](#8-test-test-test)
|
|
||||||
* [Run the linters.](#run-the-linters)
|
|
||||||
* [Run the unit tests.](#run-the-unit-tests)
|
|
||||||
* [Run the integration tests.](#run-the-integration-tests)
|
|
||||||
- [9. Submit your patch.](#9-submit-your-patch)
|
|
||||||
* [Changelog](#changelog)
|
|
||||||
+ [How do I know what to call the changelog file before I create the PR?](#how-do-i-know-what-to-call-the-changelog-file-before-i-create-the-pr)
|
|
||||||
+ [Debian changelog](#debian-changelog)
|
|
||||||
* [Sign off](#sign-off)
|
|
||||||
- [10. Turn feedback into better code.](#10-turn-feedback-into-better-code)
|
|
||||||
- [11. Find a new issue.](#11-find-a-new-issue)
|
|
||||||
- [Notes for maintainers on merging PRs etc](#notes-for-maintainers-on-merging-prs-etc)
|
|
||||||
- [Conclusion](#conclusion)
|
|
||||||
|
|
||||||
# 1. Who can contribute to Synapse?
|
|
||||||
|
|
||||||
Everyone is welcome to contribute code to [matrix.org
|
Everyone is welcome to contribute code to [matrix.org
|
||||||
projects](https://github.com/matrix-org), provided that they are willing to
|
projects](https://github.com/matrix-org), provided that they are willing to
|
||||||
@@ -36,186 +9,70 @@ license the code under the same terms as the project's overall 'outbound'
|
|||||||
license - in our case, this is almost always Apache Software License v2 (see
|
license - in our case, this is almost always Apache Software License v2 (see
|
||||||
[LICENSE](LICENSE)).
|
[LICENSE](LICENSE)).
|
||||||
|
|
||||||
# 2. What do I need?
|
## How to contribute
|
||||||
|
|
||||||
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://wiki.python.org/moin/BeginnersGuide/Download).
|
|
||||||
|
|
||||||
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
|
|
||||||
|
|
||||||
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
|
|
||||||
|
|
||||||
|
|
||||||
# 3. Get the source.
|
|
||||||
|
|
||||||
The preferred and easiest way to contribute changes is to fork the relevant
|
The preferred and easiest way to contribute changes is to fork the relevant
|
||||||
project on GitHub, and then [create a pull request](
|
project on github, and then [create a pull request](
|
||||||
https://help.github.com/articles/using-pull-requests/) to ask us to pull your
|
https://help.github.com/articles/using-pull-requests/) to ask us to pull your
|
||||||
changes into our repo.
|
changes into our repo.
|
||||||
|
|
||||||
Please base your changes on the `develop` branch.
|
Some other points to follow:
|
||||||
|
|
||||||
```sh
|
* Please base your changes on the `develop` branch.
|
||||||
git clone git@github.com:YOUR_GITHUB_USER_NAME/synapse.git
|
|
||||||
git checkout develop
|
|
||||||
```
|
|
||||||
|
|
||||||
If you need help getting started with git, this is beyond the scope of the document, but you
|
* Please follow the [code style requirements](#code-style).
|
||||||
can find many good git tutorials on the web.
|
|
||||||
|
|
||||||
# 4. Install the dependencies
|
* Please include a [changelog entry](#changelog) with each PR.
|
||||||
|
|
||||||
## Under Unix (macOS, Linux, BSD, ...)
|
* Please [sign off](#sign-off) your contribution.
|
||||||
|
|
||||||
Once you have installed Python 3 and added the source, please open a terminal and
|
* Please keep an eye on the pull request for feedback from the [continuous
|
||||||
setup a *virtualenv*, as follows:
|
integration system](#continuous-integration-and-testing) and try to fix any
|
||||||
|
errors that come up.
|
||||||
|
|
||||||
```sh
|
* If you need to [update your PR](#updating-your-pull-request), just add new
|
||||||
cd path/where/you/have/cloned/the/repository
|
commits to your branch rather than rebasing.
|
||||||
python3 -m venv ./env
|
|
||||||
source ./env/bin/activate
|
|
||||||
pip install -e ".[all,lint,mypy,test]"
|
|
||||||
pip install tox
|
|
||||||
```
|
|
||||||
|
|
||||||
This will install the developer dependencies for the project.
|
## Code style
|
||||||
|
|
||||||
## Under Windows
|
|
||||||
|
|
||||||
TBD
|
|
||||||
|
|
||||||
|
|
||||||
# 5. Get in touch.
|
|
||||||
|
|
||||||
Join our developer community on Matrix: #synapse-dev:matrix.org !
|
|
||||||
|
|
||||||
|
|
||||||
# 6. Pick an issue.
|
|
||||||
|
|
||||||
Fix your favorite problem or perhaps find a [Good First Issue](https://github.com/matrix-org/synapse/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22)
|
|
||||||
to work on.
|
|
||||||
|
|
||||||
|
|
||||||
# 7. Turn coffee and documentation into code and documentation!
|
|
||||||
|
|
||||||
Synapse's code style is documented [here](docs/code_style.md). Please follow
|
Synapse's code style is documented [here](docs/code_style.md). Please follow
|
||||||
it, including the conventions for the [sample configuration
|
it, including the conventions for the [sample configuration
|
||||||
file](docs/code_style.md#configuration-file-format).
|
file](docs/code_style.md#configuration-file-format).
|
||||||
|
|
||||||
There is a growing amount of documentation located in the [docs](docs)
|
Many of the conventions are enforced by scripts which are run as part of the
|
||||||
directory. This documentation is intended primarily for sysadmins running their
|
[continuous integration system](#continuous-integration-and-testing). To help
|
||||||
own Synapse instance, as well as developers interacting externally with
|
check if you have followed the code style, you can run `scripts-dev/lint.sh`
|
||||||
Synapse. [docs/dev](docs/dev) exists primarily to house documentation for
|
locally. You'll need python 3.6 or later, and to install a number of tools:
|
||||||
Synapse developers. [docs/admin_api](docs/admin_api) houses documentation
|
|
||||||
regarding Synapse's Admin API, which is used mostly by sysadmins and external
|
|
||||||
service developers.
|
|
||||||
|
|
||||||
If you add new files added to either of these folders, please use [GitHub-Flavoured
|
```
|
||||||
Markdown](https://guides.github.com/features/mastering-markdown/).
|
# Install the dependencies
|
||||||
|
pip install -e ".[lint,mypy]"
|
||||||
|
|
||||||
Some documentation also exists in [Synapse's GitHub
|
# Run the linter script
|
||||||
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
|
|
||||||
contributed to by community authors.
|
|
||||||
|
|
||||||
|
|
||||||
# 8. Test, test, test!
|
|
||||||
<a name="test-test-test"></a>
|
|
||||||
|
|
||||||
While you're developing and before submitting a patch, you'll
|
|
||||||
want to test your code.
|
|
||||||
|
|
||||||
## Run the linters.
|
|
||||||
|
|
||||||
The linters look at your code and do two things:
|
|
||||||
|
|
||||||
- ensure that your code follows the coding style adopted by the project;
|
|
||||||
- catch a number of errors in your code.
|
|
||||||
|
|
||||||
They're pretty fast, don't hesitate!
|
|
||||||
|
|
||||||
```sh
|
|
||||||
source ./env/bin/activate
|
|
||||||
./scripts-dev/lint.sh
|
./scripts-dev/lint.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that this script *will modify your files* to fix styling errors.
|
**Note that the script does not just test/check, but also reformats code, so you
|
||||||
Make sure that you have saved all your files.
|
may wish to ensure any new code is committed first**.
|
||||||
|
|
||||||
If you wish to restrict the linters to only the files changed since the last commit
|
By default, this script checks all files and can take some time; if you alter
|
||||||
(much faster!), you can instead run:
|
only certain files, you might wish to specify paths as arguments to reduce the
|
||||||
|
run-time:
|
||||||
|
|
||||||
```sh
|
|
||||||
source ./env/bin/activate
|
|
||||||
./scripts-dev/lint.sh -d
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Or if you know exactly which files you wish to lint, you can instead run:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
source ./env/bin/activate
|
|
||||||
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
|
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
|
||||||
```
|
```
|
||||||
|
|
||||||
## Run the unit tests.
|
You can also provide the `-d` option, which will lint the files that have been
|
||||||
|
changed since the last git commit. This will often be significantly faster than
|
||||||
|
linting the whole codebase.
|
||||||
|
|
||||||
The unit tests run parts of Synapse, including your changes, to see if anything
|
Before pushing new changes, ensure they don't produce linting errors. Commit any
|
||||||
was broken. They are slower than the linters but will typically catch more errors.
|
files that were corrected.
|
||||||
|
|
||||||
```sh
|
|
||||||
source ./env/bin/activate
|
|
||||||
trial tests
|
|
||||||
```
|
|
||||||
|
|
||||||
If you wish to only run *some* unit tests, you may specify
|
|
||||||
another module instead of `tests` - or a test class or a method:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
source ./env/bin/activate
|
|
||||||
trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
|
|
||||||
```
|
|
||||||
|
|
||||||
If your tests fail, you may wish to look at the logs (the default log level is `ERROR`):
|
|
||||||
|
|
||||||
```sh
|
|
||||||
less _trial_temp/test.log
|
|
||||||
```
|
|
||||||
|
|
||||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Run the integration tests.
|
|
||||||
|
|
||||||
The integration tests are a more comprehensive suite of tests. They
|
|
||||||
run a full version of Synapse, including your changes, to check if
|
|
||||||
anything was broken. They are slower than the unit tests but will
|
|
||||||
typically catch more errors.
|
|
||||||
|
|
||||||
The following command will let you run the integration test with the most common
|
|
||||||
configuration:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:py37
|
|
||||||
```
|
|
||||||
|
|
||||||
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
|
|
||||||
|
|
||||||
|
|
||||||
# 9. Submit your patch.
|
|
||||||
|
|
||||||
Once you're happy with your patch, it's time to prepare a Pull Request.
|
|
||||||
|
|
||||||
To prepare a Pull Request, please:
|
|
||||||
|
|
||||||
1. verify that [all the tests pass](#test-test-test), including the coding style;
|
|
||||||
2. [sign off](#sign-off) your contribution;
|
|
||||||
3. `git push` your commit to your fork of Synapse;
|
|
||||||
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
|
|
||||||
5. add a [changelog entry](#changelog) and push it to your Pull Request;
|
|
||||||
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`.
|
|
||||||
|
|
||||||
|
Please ensure your changes match the cosmetic style of the existing project,
|
||||||
|
and **never** mix cosmetic and functional changes in the same commit, as it
|
||||||
|
makes it horribly hard to review otherwise.
|
||||||
|
|
||||||
## Changelog
|
## Changelog
|
||||||
|
|
||||||
@@ -299,6 +156,24 @@ directory, you will need both a regular newsfragment *and* an entry in the
|
|||||||
debian changelog. (Though typically such changes should be submitted as two
|
debian changelog. (Though typically such changes should be submitted as two
|
||||||
separate pull requests.)
|
separate pull requests.)
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
There is a growing amount of documentation located in the [docs](docs)
|
||||||
|
directory. This documentation is intended primarily for sysadmins running their
|
||||||
|
own Synapse instance, as well as developers interacting externally with
|
||||||
|
Synapse. [docs/dev](docs/dev) exists primarily to house documentation for
|
||||||
|
Synapse developers. [docs/admin_api](docs/admin_api) houses documentation
|
||||||
|
regarding Synapse's Admin API, which is used mostly by sysadmins and external
|
||||||
|
service developers.
|
||||||
|
|
||||||
|
New files added to both folders should be written in [Github-Flavoured
|
||||||
|
Markdown](https://guides.github.com/features/mastering-markdown/), and attempts
|
||||||
|
should be made to migrate existing documents to markdown where possible.
|
||||||
|
|
||||||
|
Some documentation also exists in [Synapse's Github
|
||||||
|
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
|
||||||
|
contributed to by community authors.
|
||||||
|
|
||||||
## Sign off
|
## Sign off
|
||||||
|
|
||||||
In order to have a concrete record that your contribution is intentional
|
In order to have a concrete record that your contribution is intentional
|
||||||
@@ -365,36 +240,47 @@ Git allows you to add this signoff automatically when using the `-s`
|
|||||||
flag to `git commit`, which uses the name and email set in your
|
flag to `git commit`, which uses the name and email set in your
|
||||||
`user.name` and `user.email` git configs.
|
`user.name` and `user.email` git configs.
|
||||||
|
|
||||||
|
## Continuous integration and testing
|
||||||
|
|
||||||
# 10. Turn feedback into better code.
|
[Buildkite](https://buildkite.com/matrix-dot-org/synapse) will automatically
|
||||||
|
run a series of checks and tests against any PR which is opened against the
|
||||||
|
project; if your change breaks the build, this will be shown in GitHub, with
|
||||||
|
links to the build results. If your build fails, please try to fix the errors
|
||||||
|
and update your branch.
|
||||||
|
|
||||||
Once the Pull Request is opened, you will see a few things:
|
To run unit tests in a local development environment, you can use:
|
||||||
|
|
||||||
1. our automated CI (Continuous Integration) pipeline will run (again) the linters, the unit tests, the integration tests and more;
|
- ``tox -e py35`` (requires tox to be installed by ``pip install tox``)
|
||||||
2. one or more of the developers will take a look at your Pull Request and offer feedback.
|
for SQLite-backed Synapse on Python 3.5.
|
||||||
|
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
|
||||||
|
- ``tox -e py36-postgres`` for PostgreSQL-backed Synapse on Python 3.6
|
||||||
|
(requires a running local PostgreSQL with access to create databases).
|
||||||
|
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 3.5
|
||||||
|
(requires Docker). Entirely self-contained, recommended if you don't want to
|
||||||
|
set up PostgreSQL yourself.
|
||||||
|
|
||||||
From this point, you should:
|
Docker images are available for running the integration tests (SyTest) locally,
|
||||||
|
see the [documentation in the SyTest repo](
|
||||||
|
https://github.com/matrix-org/sytest/blob/develop/docker/README.md) for more
|
||||||
|
information.
|
||||||
|
|
||||||
1. Look at the results of the CI pipeline.
|
## Updating your pull request
|
||||||
- If there is any error, fix the error.
|
|
||||||
2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again.
|
|
||||||
3. Create a new commit with the changes.
|
|
||||||
- Please do NOT overwrite the history. New commits make the reviewer's life easier.
|
|
||||||
- Push this commits to your Pull Request.
|
|
||||||
4. Back to 1.
|
|
||||||
|
|
||||||
Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly!
|
If you decide to make changes to your pull request - perhaps to address issues
|
||||||
|
raised in a review, or to fix problems highlighted by [continuous
|
||||||
|
integration](#continuous-integration-and-testing) - just add new commits to your
|
||||||
|
branch, and push to GitHub. The pull request will automatically be updated.
|
||||||
|
|
||||||
# 11. Find a new issue.
|
Please **avoid** rebasing your branch, especially once the PR has been
|
||||||
|
reviewed: doing so makes it very difficult for a reviewer to see what has
|
||||||
|
changed since a previous review.
|
||||||
|
|
||||||
By now, you know the drill!
|
## Notes for maintainers on merging PRs etc
|
||||||
|
|
||||||
# Notes for maintainers on merging PRs etc
|
|
||||||
|
|
||||||
There are some notes for those with commit access to the project on how we
|
There are some notes for those with commit access to the project on how we
|
||||||
manage git [here](docs/dev/git.md).
|
manage git [here](docs/dev/git.md).
|
||||||
|
|
||||||
# Conclusion
|
## Conclusion
|
||||||
|
|
||||||
That's it! Matrix is a very open and collaborative project as you might expect
|
That's it! Matrix is a very open and collaborative project as you might expect
|
||||||
given our obsession with open communication. If we're going to successfully
|
given our obsession with open communication. If we're going to successfully
|
||||||
|
|||||||
597
INSTALL.md
597
INSTALL.md
@@ -1,7 +1,596 @@
|
|||||||
# Installation Instructions
|
# Installation Instructions
|
||||||
|
|
||||||
This document has moved to the
|
There are 3 steps to follow under **Installation Instructions**.
|
||||||
[Synapse documentation website](https://matrix-org.github.io/synapse/latest/setup/installation.html).
|
|
||||||
Please update your links.
|
|
||||||
|
|
||||||
The markdown source is available in [docs/setup/installation.md](docs/setup/installation.md).
|
- [Installation Instructions](#installation-instructions)
|
||||||
|
- [Choosing your server name](#choosing-your-server-name)
|
||||||
|
- [Installing Synapse](#installing-synapse)
|
||||||
|
- [Installing from source](#installing-from-source)
|
||||||
|
- [Platform-Specific Instructions](#platform-specific-instructions)
|
||||||
|
- [Debian/Ubuntu/Raspbian](#debianubunturaspbian)
|
||||||
|
- [ArchLinux](#archlinux)
|
||||||
|
- [CentOS/Fedora](#centosfedora)
|
||||||
|
- [macOS](#macos)
|
||||||
|
- [OpenSUSE](#opensuse)
|
||||||
|
- [OpenBSD](#openbsd)
|
||||||
|
- [Windows](#windows)
|
||||||
|
- [Prebuilt packages](#prebuilt-packages)
|
||||||
|
- [Docker images and Ansible playbooks](#docker-images-and-ansible-playbooks)
|
||||||
|
- [Debian/Ubuntu](#debianubuntu)
|
||||||
|
- [Matrix.org packages](#matrixorg-packages)
|
||||||
|
- [Downstream Debian packages](#downstream-debian-packages)
|
||||||
|
- [Downstream Ubuntu packages](#downstream-ubuntu-packages)
|
||||||
|
- [Fedora](#fedora)
|
||||||
|
- [OpenSUSE](#opensuse-1)
|
||||||
|
- [SUSE Linux Enterprise Server](#suse-linux-enterprise-server)
|
||||||
|
- [ArchLinux](#archlinux-1)
|
||||||
|
- [Void Linux](#void-linux)
|
||||||
|
- [FreeBSD](#freebsd)
|
||||||
|
- [OpenBSD](#openbsd-1)
|
||||||
|
- [NixOS](#nixos)
|
||||||
|
- [Setting up Synapse](#setting-up-synapse)
|
||||||
|
- [Using PostgreSQL](#using-postgresql)
|
||||||
|
- [TLS certificates](#tls-certificates)
|
||||||
|
- [Client Well-Known URI](#client-well-known-uri)
|
||||||
|
- [Email](#email)
|
||||||
|
- [Registering a user](#registering-a-user)
|
||||||
|
- [Setting up a TURN server](#setting-up-a-turn-server)
|
||||||
|
- [URL previews](#url-previews)
|
||||||
|
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||||
|
|
||||||
|
## Choosing your server name
|
||||||
|
|
||||||
|
It is important to choose the name for your server before you install Synapse,
|
||||||
|
because it cannot be changed later.
|
||||||
|
|
||||||
|
The server name determines the "domain" part of user-ids for users on your
|
||||||
|
server: these will all be of the format `@user:my.domain.name`. It also
|
||||||
|
determines how other matrix servers will reach yours for federation.
|
||||||
|
|
||||||
|
For a test configuration, set this to the hostname of your server. For a more
|
||||||
|
production-ready setup, you will probably want to specify your domain
|
||||||
|
(`example.com`) rather than a matrix-specific hostname here (in the same way
|
||||||
|
that your email address is probably `user@example.com` rather than
|
||||||
|
`user@email.example.com`) - but doing so may require more advanced setup: see
|
||||||
|
[Setting up Federation](docs/federate.md).
|
||||||
|
|
||||||
|
## Installing Synapse
|
||||||
|
|
||||||
|
### Installing from source
|
||||||
|
|
||||||
|
(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
|
||||||
|
|
||||||
|
System requirements:
|
||||||
|
|
||||||
|
- POSIX-compliant system (tested on Linux & OS X)
|
||||||
|
- Python 3.5.2 or later, up to Python 3.9.
|
||||||
|
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||||
|
|
||||||
|
Synapse is written in Python but some of the libraries it uses are written in
|
||||||
|
C. So before we can install Synapse itself we need a working C compiler and the
|
||||||
|
header files for Python C extensions. See [Platform-Specific
|
||||||
|
Instructions](#platform-specific-instructions) for information on installing
|
||||||
|
these on various platforms.
|
||||||
|
|
||||||
|
To install the Synapse homeserver run:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
mkdir -p ~/synapse
|
||||||
|
virtualenv -p python3 ~/synapse/env
|
||||||
|
source ~/synapse/env/bin/activate
|
||||||
|
pip install --upgrade pip
|
||||||
|
pip install --upgrade setuptools
|
||||||
|
pip install matrix-synapse
|
||||||
|
```
|
||||||
|
|
||||||
|
This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse)
|
||||||
|
and install it, along with the python libraries it uses, into a virtual environment
|
||||||
|
under `~/synapse/env`. Feel free to pick a different directory if you
|
||||||
|
prefer.
|
||||||
|
|
||||||
|
This Synapse installation can then be later upgraded by using pip again with the
|
||||||
|
update flag:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
source ~/synapse/env/bin/activate
|
||||||
|
pip install -U matrix-synapse
|
||||||
|
```
|
||||||
|
|
||||||
|
Before you can start Synapse, you will need to generate a configuration
|
||||||
|
file. To do this, run (in your virtualenv, as before):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd ~/synapse
|
||||||
|
python -m synapse.app.homeserver \
|
||||||
|
--server-name my.domain.name \
|
||||||
|
--config-path homeserver.yaml \
|
||||||
|
--generate-config \
|
||||||
|
--report-stats=[yes|no]
|
||||||
|
```
|
||||||
|
|
||||||
|
... substituting an appropriate value for `--server-name`.
|
||||||
|
|
||||||
|
This command will generate you a config file that you can then customise, but it will
|
||||||
|
also generate a set of keys for you. These keys will allow your homeserver to
|
||||||
|
identify itself to other homeserver, so don't lose or delete them. It would be
|
||||||
|
wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||||
|
change your homeserver's keys, you may find that other homeserver have the
|
||||||
|
old key cached. If you update the signing key, you should change the name of the
|
||||||
|
key in the `<server name>.signing.key` file (the second word) to something
|
||||||
|
different. See the [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys) for more information on key management).
|
||||||
|
|
||||||
|
To actually run your new homeserver, pick a working directory for Synapse to
|
||||||
|
run (e.g. `~/synapse`), and:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd ~/synapse
|
||||||
|
source env/bin/activate
|
||||||
|
synctl start
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Platform-Specific Instructions
|
||||||
|
|
||||||
|
##### Debian/Ubuntu/Raspbian
|
||||||
|
|
||||||
|
Installing prerequisites on Ubuntu or Debian:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo apt install build-essential python3-dev libffi-dev \
|
||||||
|
python3-pip python3-setuptools sqlite3 \
|
||||||
|
libssl-dev virtualenv libjpeg-dev libxslt1-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
##### ArchLinux
|
||||||
|
|
||||||
|
Installing prerequisites on ArchLinux:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo pacman -S base-devel python python-pip \
|
||||||
|
python-setuptools python-virtualenv sqlite3
|
||||||
|
```
|
||||||
|
|
||||||
|
##### CentOS/Fedora
|
||||||
|
|
||||||
|
Installing prerequisites on CentOS 8 or Fedora>26:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||||
|
libwebp-devel tk-devel redhat-rpm-config \
|
||||||
|
python3-virtualenv libffi-devel openssl-devel
|
||||||
|
sudo dnf groupinstall "Development Tools"
|
||||||
|
```
|
||||||
|
|
||||||
|
Installing prerequisites on CentOS 7 or Fedora<=25:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||||
|
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
|
||||||
|
python3-virtualenv libffi-devel openssl-devel
|
||||||
|
sudo yum groupinstall "Development Tools"
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that Synapse does not support versions of SQLite before 3.11, and CentOS 7
|
||||||
|
uses SQLite 3.7. You may be able to work around this by installing a more
|
||||||
|
recent SQLite version, but it is recommended that you instead use a Postgres
|
||||||
|
database: see [docs/postgres.md](docs/postgres.md).
|
||||||
|
|
||||||
|
##### macOS
|
||||||
|
|
||||||
|
Installing prerequisites on macOS:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
xcode-select --install
|
||||||
|
sudo easy_install pip
|
||||||
|
sudo pip install virtualenv
|
||||||
|
brew install pkg-config libffi
|
||||||
|
```
|
||||||
|
|
||||||
|
On macOS Catalina (10.15) you may need to explicitly install OpenSSL
|
||||||
|
via brew and inform `pip` about it so that `psycopg2` builds:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
brew install openssl@1.1
|
||||||
|
export LDFLAGS="-L/usr/local/opt/openssl/lib"
|
||||||
|
export CPPFLAGS="-I/usr/local/opt/openssl/include"
|
||||||
|
```
|
||||||
|
|
||||||
|
##### OpenSUSE
|
||||||
|
|
||||||
|
Installing prerequisites on openSUSE:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo zypper in -t pattern devel_basis
|
||||||
|
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||||
|
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||||
|
```
|
||||||
|
|
||||||
|
##### OpenBSD
|
||||||
|
|
||||||
|
A port of Synapse is available under `net/synapse`. The filesystem
|
||||||
|
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
||||||
|
mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
|
||||||
|
and mounting it to `/var/synapse` should be taken into consideration.
|
||||||
|
|
||||||
|
To be able to build Synapse's dependency on python the `WRKOBJDIR`
|
||||||
|
(cf. `bsd.port.mk(5)`) for building python, too, needs to be on a filesystem
|
||||||
|
mounted with `wxallowed` (cf. `mount(8)`).
|
||||||
|
|
||||||
|
Creating a `WRKOBJDIR` for building python under `/usr/local` (which on a
|
||||||
|
default OpenBSD installation is mounted with `wxallowed`):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
doas mkdir /usr/local/pobj_wxallowed
|
||||||
|
```
|
||||||
|
|
||||||
|
Assuming `PORTS_PRIVSEP=Yes` (cf. `bsd.port.mk(5)`) and `SUDO=doas` are
|
||||||
|
configured in `/etc/mk.conf`:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
doas chown _pbuild:_pbuild /usr/local/pobj_wxallowed
|
||||||
|
```
|
||||||
|
|
||||||
|
Setting the `WRKOBJDIR` for building python:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
echo WRKOBJDIR_lang/python/3.7=/usr/local/pobj_wxallowed \\nWRKOBJDIR_lang/python/2.7=/usr/local/pobj_wxallowed >> /etc/mk.conf
|
||||||
|
```
|
||||||
|
|
||||||
|
Building Synapse:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd /usr/ports/net/synapse
|
||||||
|
make install
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Windows
|
||||||
|
|
||||||
|
If you wish to run or develop Synapse on Windows, the Windows Subsystem For
|
||||||
|
Linux provides a Linux environment on Windows 10 which is capable of using the
|
||||||
|
Debian, Fedora, or source installation methods. More information about WSL can
|
||||||
|
be found at <https://docs.microsoft.com/en-us/windows/wsl/install-win10> for
|
||||||
|
Windows 10 and <https://docs.microsoft.com/en-us/windows/wsl/install-on-server>
|
||||||
|
for Windows Server.
|
||||||
|
|
||||||
|
### Prebuilt packages
|
||||||
|
|
||||||
|
As an alternative to installing from source, prebuilt packages are available
|
||||||
|
for a number of platforms.
|
||||||
|
|
||||||
|
#### Docker images and Ansible playbooks
|
||||||
|
|
||||||
|
There is an official synapse image available at
|
||||||
|
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
|
||||||
|
the docker-compose file available at [contrib/docker](contrib/docker). Further
|
||||||
|
information on this including configuration options is available in the README
|
||||||
|
on hub.docker.com.
|
||||||
|
|
||||||
|
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||||
|
Dockerfile to automate a synapse server in a single Docker image, at
|
||||||
|
<https://hub.docker.com/r/avhost/docker-matrix/tags/>
|
||||||
|
|
||||||
|
Slavi Pantaleev has created an Ansible playbook,
|
||||||
|
which installs the offical Docker image of Matrix Synapse
|
||||||
|
along with many other Matrix-related services (Postgres database, Element, coturn,
|
||||||
|
ma1sd, SSL support, etc.).
|
||||||
|
For more details, see
|
||||||
|
<https://github.com/spantaleev/matrix-docker-ansible-deploy>
|
||||||
|
|
||||||
|
#### Debian/Ubuntu
|
||||||
|
|
||||||
|
##### Matrix.org packages
|
||||||
|
|
||||||
|
Matrix.org provides Debian/Ubuntu packages of the latest stable version of
|
||||||
|
Synapse via <https://packages.matrix.org/debian/>. They are available for Debian
|
||||||
|
9 (Stretch), Ubuntu 16.04 (Xenial), and later. To use them:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo apt install -y lsb-release wget apt-transport-https
|
||||||
|
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
|
||||||
|
echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
|
||||||
|
sudo tee /etc/apt/sources.list.d/matrix-org.list
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install matrix-synapse-py3
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note**: if you followed a previous version of these instructions which
|
||||||
|
recommended using `apt-key add` to add an old key from
|
||||||
|
`https://matrix.org/packages/debian/`, you should note that this key has been
|
||||||
|
revoked. You should remove the old key with `sudo apt-key remove
|
||||||
|
C35EB17E1EAE708E6603A9B3AD0592FE47F0DF61`, and follow the above instructions to
|
||||||
|
update your configuration.
|
||||||
|
|
||||||
|
The fingerprint of the repository signing key (as shown by `gpg
|
||||||
|
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
|
||||||
|
`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
|
||||||
|
|
||||||
|
##### Downstream Debian packages
|
||||||
|
|
||||||
|
We do not recommend using the packages from the default Debian `buster`
|
||||||
|
repository at this time, as they are old and suffer from known security
|
||||||
|
vulnerabilities. You can install the latest version of Synapse from
|
||||||
|
[our repository](#matrixorg-packages) or from `buster-backports`. Please
|
||||||
|
see the [Debian documentation](https://backports.debian.org/Instructions/)
|
||||||
|
for information on how to use backports.
|
||||||
|
|
||||||
|
If you are using Debian `sid` or testing, Synapse is available in the default
|
||||||
|
repositories and it should be possible to install it simply with:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo apt install matrix-synapse
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Downstream Ubuntu packages
|
||||||
|
|
||||||
|
We do not recommend using the packages in the default Ubuntu repository
|
||||||
|
at this time, as they are old and suffer from known security vulnerabilities.
|
||||||
|
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
|
||||||
|
|
||||||
|
#### Fedora
|
||||||
|
|
||||||
|
Synapse is in the Fedora repositories as `matrix-synapse`:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo dnf install matrix-synapse
|
||||||
|
```
|
||||||
|
|
||||||
|
Oleg Girko provides Fedora RPMs at
|
||||||
|
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
|
||||||
|
|
||||||
|
#### OpenSUSE
|
||||||
|
|
||||||
|
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo zypper install matrix-synapse
|
||||||
|
```
|
||||||
|
|
||||||
|
#### SUSE Linux Enterprise Server
|
||||||
|
|
||||||
|
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
|
||||||
|
<https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/>
|
||||||
|
|
||||||
|
#### ArchLinux
|
||||||
|
|
||||||
|
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||||
|
<https://www.archlinux.org/packages/community/any/matrix-synapse/>, which should pull in most of
|
||||||
|
the necessary dependencies.
|
||||||
|
|
||||||
|
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo pip install --upgrade pip
|
||||||
|
```
|
||||||
|
|
||||||
|
If you encounter an error with lib bcrypt causing an Wrong ELF Class:
|
||||||
|
ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
|
||||||
|
compile it under the right architecture. (This should not be needed if
|
||||||
|
installing under virtualenv):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo pip uninstall py-bcrypt
|
||||||
|
sudo pip install py-bcrypt
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Void Linux
|
||||||
|
|
||||||
|
Synapse can be found in the void repositories as 'synapse':
|
||||||
|
|
||||||
|
```sh
|
||||||
|
xbps-install -Su
|
||||||
|
xbps-install -S synapse
|
||||||
|
```
|
||||||
|
|
||||||
|
#### FreeBSD
|
||||||
|
|
||||||
|
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||||
|
|
||||||
|
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
|
||||||
|
- Packages: `pkg install py37-matrix-synapse`
|
||||||
|
|
||||||
|
#### OpenBSD
|
||||||
|
|
||||||
|
As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
|
||||||
|
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
||||||
|
mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
|
||||||
|
and mounting it to `/var/synapse` should be taken into consideration.
|
||||||
|
|
||||||
|
Installing Synapse:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
doas pkg_add synapse
|
||||||
|
```
|
||||||
|
|
||||||
|
#### NixOS
|
||||||
|
|
||||||
|
Robin Lambertz has packaged Synapse for NixOS at:
|
||||||
|
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
|
||||||
|
|
||||||
|
## Setting up Synapse
|
||||||
|
|
||||||
|
Once you have installed synapse as above, you will need to configure it.
|
||||||
|
|
||||||
|
### Using PostgreSQL
|
||||||
|
|
||||||
|
By default Synapse uses [SQLite](https://sqlite.org/) and in doing so trades performance for convenience.
|
||||||
|
SQLite is only recommended in Synapse for testing purposes or for servers with
|
||||||
|
very light workloads.
|
||||||
|
|
||||||
|
Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org). Advantages include:
|
||||||
|
|
||||||
|
- significant performance improvements due to the superior threading and
|
||||||
|
caching model, smarter query optimiser
|
||||||
|
- allowing the DB to be run on separate hardware
|
||||||
|
|
||||||
|
For information on how to install and use PostgreSQL in Synapse, please see
|
||||||
|
[docs/postgres.md](docs/postgres.md)
|
||||||
|
|
||||||
|
### TLS certificates
|
||||||
|
|
||||||
|
The default configuration exposes a single HTTP port on the local
|
||||||
|
interface: `http://localhost:8008`. It is suitable for local testing,
|
||||||
|
but for any practical use, you will need Synapse's APIs to be served
|
||||||
|
over HTTPS.
|
||||||
|
|
||||||
|
The recommended way to do so is to set up a reverse proxy on port
|
||||||
|
`8448`. You can find documentation on doing so in
|
||||||
|
[docs/reverse_proxy.md](docs/reverse_proxy.md).
|
||||||
|
|
||||||
|
Alternatively, you can configure Synapse to expose an HTTPS port. To do
|
||||||
|
so, you will need to edit `homeserver.yaml`, as follows:
|
||||||
|
|
||||||
|
- First, under the `listeners` section, uncomment the configuration for the
|
||||||
|
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
|
||||||
|
each line). The relevant lines are like this:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- port: 8448
|
||||||
|
type: http
|
||||||
|
tls: true
|
||||||
|
resources:
|
||||||
|
- names: [client, federation]
|
||||||
|
```
|
||||||
|
|
||||||
|
- You will also need to uncomment the `tls_certificate_path` and
|
||||||
|
`tls_private_key_path` lines under the `TLS` section. You will need to manage
|
||||||
|
provisioning of these certificates yourself — Synapse had built-in ACME
|
||||||
|
support, but the ACMEv1 protocol Synapse implements is deprecated, not
|
||||||
|
allowed by LetsEncrypt for new sites, and will break for existing sites in
|
||||||
|
late 2020. See [ACME.md](docs/ACME.md).
|
||||||
|
|
||||||
|
If you are using your own certificate, be sure to use a `.pem` file that
|
||||||
|
includes the full certificate chain including any intermediate certificates
|
||||||
|
(for instance, if using certbot, use `fullchain.pem` as your certificate, not
|
||||||
|
`cert.pem`).
|
||||||
|
|
||||||
|
For a more detailed guide to configuring your server for federation, see
|
||||||
|
[federate.md](docs/federate.md).
|
||||||
|
|
||||||
|
### Client Well-Known URI
|
||||||
|
|
||||||
|
Setting up the client Well-Known URI is optional but if you set it up, it will
|
||||||
|
allow users to enter their full username (e.g. `@user:<server_name>`) into clients
|
||||||
|
which support well-known lookup to automatically configure the homeserver and
|
||||||
|
identity server URLs. This is useful so that users don't have to memorize or think
|
||||||
|
about the actual homeserver URL you are using.
|
||||||
|
|
||||||
|
The URL `https://<server_name>/.well-known/matrix/client` should return JSON in
|
||||||
|
the following format.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"m.homeserver": {
|
||||||
|
"base_url": "https://<matrix.example.com>"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
It can optionally contain identity server information as well.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"m.homeserver": {
|
||||||
|
"base_url": "https://<matrix.example.com>"
|
||||||
|
},
|
||||||
|
"m.identity_server": {
|
||||||
|
"base_url": "https://<identity.example.com>"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
To work in browser based clients, the file must be served with the appropriate
|
||||||
|
Cross-Origin Resource Sharing (CORS) headers. A recommended value would be
|
||||||
|
`Access-Control-Allow-Origin: *` which would allow all browser based clients to
|
||||||
|
view it.
|
||||||
|
|
||||||
|
In nginx this would be something like:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
location /.well-known/matrix/client {
|
||||||
|
return 200 '{"m.homeserver": {"base_url": "https://<matrix.example.com>"}}';
|
||||||
|
default_type application/json;
|
||||||
|
add_header Access-Control-Allow-Origin *;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
You should also ensure the `public_baseurl` option in `homeserver.yaml` is set
|
||||||
|
correctly. `public_baseurl` should be set to the URL that clients will use to
|
||||||
|
connect to your server. This is the same URL you put for the `m.homeserver`
|
||||||
|
`base_url` above.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
public_baseurl: "https://<matrix.example.com>"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Email
|
||||||
|
|
||||||
|
It is desirable for Synapse to have the capability to send email. This allows
|
||||||
|
Synapse to send password reset emails, send verifications when an email address
|
||||||
|
is added to a user's account, and send email notifications to users when they
|
||||||
|
receive new messages.
|
||||||
|
|
||||||
|
To configure an SMTP server for Synapse, modify the configuration section
|
||||||
|
headed `email`, and be sure to have at least the `smtp_host`, `smtp_port`
|
||||||
|
and `notif_from` fields filled out. You may also need to set `smtp_user`,
|
||||||
|
`smtp_pass`, and `require_transport_security`.
|
||||||
|
|
||||||
|
If email is not configured, password reset, registration and notifications via
|
||||||
|
email will be disabled.
|
||||||
|
|
||||||
|
### Registering a user
|
||||||
|
|
||||||
|
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
|
||||||
|
|
||||||
|
Alternatively you can do so from the command line if you have installed via pip.
|
||||||
|
|
||||||
|
This can be done as follows:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
$ source ~/synapse/env/bin/activate
|
||||||
|
$ synctl start # if not already running
|
||||||
|
$ register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||||
|
New user localpart: erikj
|
||||||
|
Password:
|
||||||
|
Confirm password:
|
||||||
|
Make admin [no]:
|
||||||
|
Success!
|
||||||
|
```
|
||||||
|
|
||||||
|
This process uses a setting `registration_shared_secret` in
|
||||||
|
`homeserver.yaml`, which is shared between Synapse itself and the
|
||||||
|
`register_new_matrix_user` script. It doesn't matter what it is (a random
|
||||||
|
value is generated by `--generate-config`), but it should be kept secret, as
|
||||||
|
anyone with knowledge of it can register users, including admin accounts,
|
||||||
|
on your server even if `enable_registration` is `false`.
|
||||||
|
|
||||||
|
### Setting up a TURN server
|
||||||
|
|
||||||
|
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||||
|
a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details.
|
||||||
|
|
||||||
|
### URL previews
|
||||||
|
|
||||||
|
Synapse includes support for previewing URLs, which is disabled by default. To
|
||||||
|
turn it on you must enable the `url_preview_enabled: True` config parameter
|
||||||
|
and explicitly specify the IP ranges that Synapse is not allowed to spider for
|
||||||
|
previewing in the `url_preview_ip_range_blacklist` configuration parameter.
|
||||||
|
This is critical from a security perspective to stop arbitrary Matrix users
|
||||||
|
spidering 'internal' URLs on your network. At the very least we recommend that
|
||||||
|
your loopback and RFC1918 IP addresses are blacklisted.
|
||||||
|
|
||||||
|
This also requires the optional `lxml` python dependency to be installed. This
|
||||||
|
in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
|
||||||
|
means `apt-get install libxml2-dev`, or equivalent for your OS.
|
||||||
|
|
||||||
|
### Troubleshooting Installation
|
||||||
|
|
||||||
|
`pip` seems to leak *lots* of memory during installation. For instance, a Linux
|
||||||
|
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
|
||||||
|
happens, you will have to individually install the dependencies which are
|
||||||
|
failing, e.g.:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pip install twisted
|
||||||
|
```
|
||||||
|
|
||||||
|
If you have any other problems, feel free to ask in
|
||||||
|
[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org).
|
||||||
|
|||||||
@@ -20,10 +20,9 @@ recursive-include scripts *
|
|||||||
recursive-include scripts-dev *
|
recursive-include scripts-dev *
|
||||||
recursive-include synapse *.pyi
|
recursive-include synapse *.pyi
|
||||||
recursive-include tests *.py
|
recursive-include tests *.py
|
||||||
recursive-include tests *.pem
|
include tests/http/ca.crt
|
||||||
recursive-include tests *.p8
|
include tests/http/ca.key
|
||||||
recursive-include tests *.crt
|
include tests/http/server.key
|
||||||
recursive-include tests *.key
|
|
||||||
|
|
||||||
recursive-include synapse/res *
|
recursive-include synapse/res *
|
||||||
recursive-include synapse/static *.css
|
recursive-include synapse/static *.css
|
||||||
@@ -40,7 +39,6 @@ exclude mypy.ini
|
|||||||
exclude sytest-blacklist
|
exclude sytest-blacklist
|
||||||
exclude test_postgresql.sh
|
exclude test_postgresql.sh
|
||||||
|
|
||||||
include book.toml
|
|
||||||
include pyproject.toml
|
include pyproject.toml
|
||||||
recursive-include changelog.d *
|
recursive-include changelog.d *
|
||||||
|
|
||||||
|
|||||||
123
README.rst
123
README.rst
@@ -25,7 +25,7 @@ The overall architecture is::
|
|||||||
|
|
||||||
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
||||||
accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
|
accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
|
||||||
via IRC bridge at irc://irc.libera.chat/matrix.
|
via IRC bridge at irc://irc.freenode.net/matrix.
|
||||||
|
|
||||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||||
@@ -94,8 +94,7 @@ Synapse Installation
|
|||||||
|
|
||||||
.. _federation:
|
.. _federation:
|
||||||
|
|
||||||
* For details on how to install synapse, see
|
* For details on how to install synapse, see `<INSTALL.md>`_.
|
||||||
`Installation Instructions <https://matrix-org.github.io/synapse/latest/setup/installation.html>`_.
|
|
||||||
* For specific details on how to configure Synapse for federation see `docs/federate.md <docs/federate.md>`_
|
* For specific details on how to configure Synapse for federation see `docs/federate.md <docs/federate.md>`_
|
||||||
|
|
||||||
|
|
||||||
@@ -107,8 +106,7 @@ from a web client.
|
|||||||
|
|
||||||
Unless you are running a test instance of Synapse on your local machine, in
|
Unless you are running a test instance of Synapse on your local machine, in
|
||||||
general, you will need to enable TLS support before you can successfully
|
general, you will need to enable TLS support before you can successfully
|
||||||
connect from a client: see
|
connect from a client: see `<INSTALL.md#tls-certificates>`_.
|
||||||
`TLS certificates <https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates>`_.
|
|
||||||
|
|
||||||
An easy way to get started is to login or register via Element at
|
An easy way to get started is to login or register via Element at
|
||||||
https://app.element.io/#/login or https://app.element.io/#/register respectively.
|
https://app.element.io/#/login or https://app.element.io/#/register respectively.
|
||||||
@@ -144,55 +142,38 @@ the form of::
|
|||||||
As when logging in, you will need to specify a "Custom server". Specify your
|
As when logging in, you will need to specify a "Custom server". Specify your
|
||||||
desired ``localpart`` in the 'User name' box.
|
desired ``localpart`` in the 'User name' box.
|
||||||
|
|
||||||
Security note
|
ACME setup
|
||||||
|
==========
|
||||||
|
|
||||||
|
For details on having Synapse manage your federation TLS certificates
|
||||||
|
automatically, please see `<docs/ACME.md>`_.
|
||||||
|
|
||||||
|
|
||||||
|
Security Note
|
||||||
=============
|
=============
|
||||||
|
|
||||||
Matrix serves raw, user-supplied data in some APIs -- specifically the `content
|
Matrix serves raw user generated data in some APIs - specifically the `content
|
||||||
repository endpoints`_.
|
repository endpoints <https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
|
||||||
|
|
||||||
.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid
|
Whilst we have tried to mitigate against possible XSS attacks (e.g.
|
||||||
|
https://github.com/matrix-org/synapse/pull/1021) we recommend running
|
||||||
|
matrix homeservers on a dedicated domain name, to limit any malicious user generated
|
||||||
|
content served to web browsers a matrix API from being able to attack webapps hosted
|
||||||
|
on the same domain. This is particularly true of sharing a matrix webclient and
|
||||||
|
server on the same domain.
|
||||||
|
|
||||||
Whilst we make a reasonable effort to mitigate against XSS attacks (for
|
See https://github.com/vector-im/riot-web/issues/1977 and
|
||||||
instance, by using `CSP`_), a Matrix homeserver should not be hosted on a
|
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
|
||||||
domain hosting other web applications. This especially applies to sharing
|
|
||||||
the domain with Matrix web clients and other sensitive applications like
|
|
||||||
webmail. See
|
|
||||||
https://developer.github.com/changes/2014-04-25-user-content-security for more
|
|
||||||
information.
|
|
||||||
|
|
||||||
.. _CSP: https://github.com/matrix-org/synapse/pull/1021
|
|
||||||
|
|
||||||
Ideally, the homeserver should not simply be on a different subdomain, but on
|
|
||||||
a completely different `registered domain`_ (also known as top-level site or
|
|
||||||
eTLD+1). This is because `some attacks`_ are still possible as long as the two
|
|
||||||
applications share the same registered domain.
|
|
||||||
|
|
||||||
.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3
|
|
||||||
|
|
||||||
.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie
|
|
||||||
|
|
||||||
To illustrate this with an example, if your Element Web or other sensitive web
|
|
||||||
application is hosted on ``A.example1.com``, you should ideally host Synapse on
|
|
||||||
``example2.com``. Some amount of protection is offered by hosting on
|
|
||||||
``B.example1.com`` instead, so this is also acceptable in some scenarios.
|
|
||||||
However, you should *not* host your Synapse on ``A.example1.com``.
|
|
||||||
|
|
||||||
Note that all of the above refers exclusively to the domain used in Synapse's
|
|
||||||
``public_baseurl`` setting. In particular, it has no bearing on the domain
|
|
||||||
mentioned in MXIDs hosted on that server.
|
|
||||||
|
|
||||||
Following this advice ensures that even if an XSS is found in Synapse, the
|
|
||||||
impact to other applications will be minimal.
|
|
||||||
|
|
||||||
|
|
||||||
Upgrading an existing Synapse
|
Upgrading an existing Synapse
|
||||||
=============================
|
=============================
|
||||||
|
|
||||||
The instructions for upgrading synapse are in `the upgrade notes`_.
|
The instructions for upgrading synapse are in `UPGRADE.rst`_.
|
||||||
Please check these instructions as upgrading may require extra steps for some
|
Please check these instructions as upgrading may require extra steps for some
|
||||||
versions of synapse.
|
versions of synapse.
|
||||||
|
|
||||||
.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html
|
.. _UPGRADE.rst: UPGRADE.rst
|
||||||
|
|
||||||
.. _reverse-proxy:
|
.. _reverse-proxy:
|
||||||
|
|
||||||
@@ -202,9 +183,8 @@ Using a reverse proxy with Synapse
|
|||||||
It is recommended to put a reverse proxy such as
|
It is recommended to put a reverse proxy such as
|
||||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
|
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_ or
|
||||||
`HAProxy <https://www.haproxy.org/>`_ or
|
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||||
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
|
|
||||||
doing so is that it means that you can expose the default https port (443) to
|
doing so is that it means that you can expose the default https port (443) to
|
||||||
Matrix clients without needing to run Synapse with root privileges.
|
Matrix clients without needing to run Synapse with root privileges.
|
||||||
|
|
||||||
@@ -267,7 +247,7 @@ Join our developer community on Matrix: `#synapse-dev:matrix.org <https://matrix
|
|||||||
|
|
||||||
Before setting up a development environment for synapse, make sure you have the
|
Before setting up a development environment for synapse, make sure you have the
|
||||||
system dependencies (such as the python header files) installed - see
|
system dependencies (such as the python header files) installed - see
|
||||||
`Installing from source <https://matrix-org.github.io/synapse/latest/setup/installation.html#installing-from-source>`_.
|
`Installing from source <INSTALL.md#installing-from-source>`_.
|
||||||
|
|
||||||
To check out a synapse for development, clone the git repo into a working
|
To check out a synapse for development, clone the git repo into a working
|
||||||
directory of your choice::
|
directory of your choice::
|
||||||
@@ -288,6 +268,18 @@ try installing the failing modules individually::
|
|||||||
|
|
||||||
pip install -e "module-name"
|
pip install -e "module-name"
|
||||||
|
|
||||||
|
Once this is done, you may wish to run Synapse's unit tests to
|
||||||
|
check that everything is installed correctly::
|
||||||
|
|
||||||
|
python -m twisted.trial tests
|
||||||
|
|
||||||
|
This should end with a 'PASSED' result (note that exact numbers will
|
||||||
|
differ)::
|
||||||
|
|
||||||
|
Ran 1337 tests in 716.064s
|
||||||
|
|
||||||
|
PASSED (skips=15, successes=1322)
|
||||||
|
|
||||||
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
|
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
|
||||||
|
|
||||||
./demo/start.sh
|
./demo/start.sh
|
||||||
@@ -307,23 +299,6 @@ If you just want to start a single instance of the app and run it directly::
|
|||||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||||
|
|
||||||
|
|
||||||
Running the unit tests
|
|
||||||
======================
|
|
||||||
|
|
||||||
After getting up and running, you may wish to run Synapse's unit tests to
|
|
||||||
check that everything is installed correctly::
|
|
||||||
|
|
||||||
trial tests
|
|
||||||
|
|
||||||
This should end with a 'PASSED' result (note that exact numbers will
|
|
||||||
differ)::
|
|
||||||
|
|
||||||
Ran 1337 tests in 716.064s
|
|
||||||
|
|
||||||
PASSED (skips=15, successes=1322)
|
|
||||||
|
|
||||||
For more tips on running the unit tests, like running a specific test or
|
|
||||||
to see the logging output, see the `CONTRIBUTING doc <CONTRIBUTING.md#run-the-unit-tests>`_.
|
|
||||||
|
|
||||||
|
|
||||||
Running the Integration Tests
|
Running the Integration Tests
|
||||||
@@ -335,17 +310,8 @@ access the API as a Matrix client would. It is able to run Synapse directly from
|
|||||||
the source tree, so installation of the server is not required.
|
the source tree, so installation of the server is not required.
|
||||||
|
|
||||||
Testing with SyTest is recommended for verifying that changes related to the
|
Testing with SyTest is recommended for verifying that changes related to the
|
||||||
Client-Server API are functioning correctly. See the `SyTest installation
|
Client-Server API are functioning correctly. See the `installation instructions
|
||||||
instructions <https://github.com/matrix-org/sytest#installing>`_ for details.
|
<https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||||
|
|
||||||
|
|
||||||
Platform dependencies
|
|
||||||
=====================
|
|
||||||
|
|
||||||
Synapse uses a number of platform dependencies such as Python and PostgreSQL,
|
|
||||||
and aims to follow supported upstream versions. See the
|
|
||||||
`<docs/deprecation_policy.md>`_ document for more details.
|
|
||||||
|
|
||||||
|
|
||||||
Troubleshooting
|
Troubleshooting
|
||||||
===============
|
===============
|
||||||
@@ -417,17 +383,12 @@ massive excess of outgoing federation requests (see `discussion
|
|||||||
indicate that your server is also issuing far more outgoing federation
|
indicate that your server is also issuing far more outgoing federation
|
||||||
requests than can be accounted for by your users' activity, this is a
|
requests than can be accounted for by your users' activity, this is a
|
||||||
likely cause. The misbehavior can be worked around by setting
|
likely cause. The misbehavior can be worked around by setting
|
||||||
the following in the Synapse config file:
|
``use_presence: false`` in the Synapse config file.
|
||||||
|
|
||||||
.. code-block:: yaml
|
|
||||||
|
|
||||||
presence:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
People can't accept room invitations from me
|
People can't accept room invitations from me
|
||||||
--------------------------------------------
|
--------------------------------------------
|
||||||
|
|
||||||
The typical failure mode here is that you send an invitation to someone
|
The typical failure mode here is that you send an invitation to someone
|
||||||
to join a room or direct chat, but when they go to accept it, they get an
|
to join a room or direct chat, but when they go to accept it, they get an
|
||||||
error (typically along the lines of "Invalid signature"). They might see
|
error (typically along the lines of "Invalid signature"). They might see
|
||||||
something like the following in their logs::
|
something like the following in their logs::
|
||||||
|
|||||||
1148
UPGRADE.rst
1148
UPGRADE.rst
File diff suppressed because it is too large
Load Diff
39
book.toml
39
book.toml
@@ -1,39 +0,0 @@
|
|||||||
# Documentation for possible options in this file is at
|
|
||||||
# https://rust-lang.github.io/mdBook/format/config.html
|
|
||||||
[book]
|
|
||||||
title = "Synapse"
|
|
||||||
authors = ["The Matrix.org Foundation C.I.C."]
|
|
||||||
language = "en"
|
|
||||||
multilingual = false
|
|
||||||
|
|
||||||
# The directory that documentation files are stored in
|
|
||||||
src = "docs"
|
|
||||||
|
|
||||||
[build]
|
|
||||||
# Prevent markdown pages from being automatically generated when they're
|
|
||||||
# linked to in SUMMARY.md
|
|
||||||
create-missing = false
|
|
||||||
|
|
||||||
[output.html]
|
|
||||||
# The URL visitors will be directed to when they try to edit a page
|
|
||||||
edit-url-template = "https://github.com/matrix-org/synapse/edit/develop/{path}"
|
|
||||||
|
|
||||||
# Remove the numbers that appear before each item in the sidebar, as they can
|
|
||||||
# get quite messy as we nest deeper
|
|
||||||
no-section-label = true
|
|
||||||
|
|
||||||
# The source code URL of the repository
|
|
||||||
git-repository-url = "https://github.com/matrix-org/synapse"
|
|
||||||
|
|
||||||
# The path that the docs are hosted on
|
|
||||||
site-url = "/synapse/"
|
|
||||||
|
|
||||||
# Additional HTML, JS, CSS that's injected into each page of the book.
|
|
||||||
# More information available in docs/website_files/README.md
|
|
||||||
additional-css = [
|
|
||||||
"docs/website_files/table-of-contents.css",
|
|
||||||
"docs/website_files/remove-nav-buttons.css",
|
|
||||||
"docs/website_files/indent-section-headers.css",
|
|
||||||
]
|
|
||||||
additional-js = ["docs/website_files/table-of-contents.js"]
|
|
||||||
theme = "docs/website_files/theme"
|
|
||||||
1
changelog.d/9045.misc
Normal file
1
changelog.d/9045.misc
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Add tests to `test_user.UsersListTestCase` for List Users Admin API.
|
||||||
1
changelog.d/9129.misc
Normal file
1
changelog.d/9129.misc
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Various improvements to the federation client.
|
||||||
1
changelog.d/9135.doc
Normal file
1
changelog.d/9135.doc
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Add link to Matrix VoIP tester for turn-howto.
|
||||||
1
changelog.d/9163.bugfix
Normal file
1
changelog.d/9163.bugfix
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Fix a long-standing bug where Synapse would return a 500 error when a thumbnail did not exist (and auto-generation of thumbnails was not enabled).
|
||||||
1
changelog.d/9176.misc
Normal file
1
changelog.d/9176.misc
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Speed up chain cover calculation when persisting a batch of state events at once.
|
||||||
1
changelog.d/9180.misc
Normal file
1
changelog.d/9180.misc
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Add a `long_description_type` to the package metadata.
|
||||||
1
changelog.d/9181.misc
Normal file
1
changelog.d/9181.misc
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Speed up batch insertion when using PostgreSQL.
|
||||||
1
changelog.d/9184.misc
Normal file
1
changelog.d/9184.misc
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Emit an error at startup if different Identity Providers are configured with the same `idp_id`.
|
||||||
1
changelog.d/9188.misc
Normal file
1
changelog.d/9188.misc
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Speed up batch insertion when using PostgreSQL.
|
||||||
1
changelog.d/9189.misc
Normal file
1
changelog.d/9189.misc
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Add an `oidc-` prefix to any `idp_id`s which are given in the `oidc_providers` configuration.
|
||||||
1
changelog.d/9190.misc
Normal file
1
changelog.d/9190.misc
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Improve performance of concurrent use of `StreamIDGenerators`.
|
||||||
1
changelog.d/9191.misc
Normal file
1
changelog.d/9191.misc
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Add some missing source directories to the automatic linting script.
|
||||||
1
changelog.d/9193.bugfix
Normal file
1
changelog.d/9193.bugfix
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Fix receipts or account data not being sent down sync. Introduced in v1.26.0rc1.
|
||||||
1
changelog.d/9195.bugfix
Normal file
1
changelog.d/9195.bugfix
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Fix receipts or account data not being sent down sync. Introduced in v1.26.0rc1.
|
||||||
@@ -24,7 +24,6 @@ import sys
|
|||||||
import time
|
import time
|
||||||
import urllib
|
import urllib
|
||||||
from http import TwistedHttpClient
|
from http import TwistedHttpClient
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import nacl.encoding
|
import nacl.encoding
|
||||||
import nacl.signing
|
import nacl.signing
|
||||||
@@ -93,7 +92,7 @@ class SynapseCmd(cmd.Cmd):
|
|||||||
return self.config["user"].split(":")[1]
|
return self.config["user"].split(":")[1]
|
||||||
|
|
||||||
def do_config(self, line):
|
def do_config(self, line):
|
||||||
"""Show the config for this client: "config"
|
""" Show the config for this client: "config"
|
||||||
Edit a key value mapping: "config key value" e.g. "config token 1234"
|
Edit a key value mapping: "config key value" e.g. "config token 1234"
|
||||||
Config variables:
|
Config variables:
|
||||||
user: The username to auth with.
|
user: The username to auth with.
|
||||||
@@ -361,7 +360,7 @@ class SynapseCmd(cmd.Cmd):
|
|||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
def do_topic(self, line):
|
def do_topic(self, line):
|
||||||
""" "topic [set|get] <roomid> [<newtopic>]"
|
""""topic [set|get] <roomid> [<newtopic>]"
|
||||||
Set the topic for a room: topic set <roomid> <newtopic>
|
Set the topic for a room: topic set <roomid> <newtopic>
|
||||||
Get the topic for a room: topic get <roomid>
|
Get the topic for a room: topic get <roomid>
|
||||||
"""
|
"""
|
||||||
@@ -691,7 +690,7 @@ class SynapseCmd(cmd.Cmd):
|
|||||||
self._do_presence_state(2, line)
|
self._do_presence_state(2, line)
|
||||||
|
|
||||||
def _parse(self, line, keys, force_keys=False):
|
def _parse(self, line, keys, force_keys=False):
|
||||||
"""Parses the given line.
|
""" Parses the given line.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
line : The line to parse
|
line : The line to parse
|
||||||
@@ -719,10 +718,10 @@ class SynapseCmd(cmd.Cmd):
|
|||||||
method,
|
method,
|
||||||
path,
|
path,
|
||||||
data=None,
|
data=None,
|
||||||
query_params: Optional[dict] = None,
|
query_params={"access_token": None},
|
||||||
alt_text=None,
|
alt_text=None,
|
||||||
):
|
):
|
||||||
"""Runs an HTTP request and pretty prints the output.
|
""" Runs an HTTP request and pretty prints the output.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
method: HTTP method
|
method: HTTP method
|
||||||
@@ -730,8 +729,6 @@ class SynapseCmd(cmd.Cmd):
|
|||||||
data: Raw JSON data if any
|
data: Raw JSON data if any
|
||||||
query_params: dict of query parameters to add to the url
|
query_params: dict of query parameters to add to the url
|
||||||
"""
|
"""
|
||||||
query_params = query_params or {"access_token": None}
|
|
||||||
|
|
||||||
url = self._url() + path
|
url = self._url() + path
|
||||||
if "access_token" in query_params:
|
if "access_token" in query_params:
|
||||||
query_params["access_token"] = self._tok()
|
query_params["access_token"] = self._tok()
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
@@ -15,7 +16,6 @@
|
|||||||
import json
|
import json
|
||||||
import urllib
|
import urllib
|
||||||
from pprint import pformat
|
from pprint import pformat
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
from twisted.internet import defer, reactor
|
from twisted.internet import defer, reactor
|
||||||
from twisted.web.client import Agent, readBody
|
from twisted.web.client import Agent, readBody
|
||||||
@@ -23,10 +23,11 @@ from twisted.web.http_headers import Headers
|
|||||||
|
|
||||||
|
|
||||||
class HttpClient:
|
class HttpClient:
|
||||||
"""Interface for talking json over http"""
|
""" Interface for talking json over http
|
||||||
|
"""
|
||||||
|
|
||||||
def put_json(self, url, data):
|
def put_json(self, url, data):
|
||||||
"""Sends the specifed json data using PUT
|
""" Sends the specifed json data using PUT
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
url (str): The URL to PUT data to.
|
url (str): The URL to PUT data to.
|
||||||
@@ -40,7 +41,7 @@ class HttpClient:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def get_json(self, url, args=None):
|
def get_json(self, url, args=None):
|
||||||
"""Gets some json from the given host homeserver and path
|
""" Gets some json from the given host homeserver and path
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
url (str): The URL to GET data from.
|
url (str): The URL to GET data from.
|
||||||
@@ -57,7 +58,7 @@ class HttpClient:
|
|||||||
|
|
||||||
|
|
||||||
class TwistedHttpClient(HttpClient):
|
class TwistedHttpClient(HttpClient):
|
||||||
"""Wrapper around the twisted HTTP client api.
|
""" Wrapper around the twisted HTTP client api.
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
agent (twisted.web.client.Agent): The twisted Agent used to send the
|
agent (twisted.web.client.Agent): The twisted Agent used to send the
|
||||||
@@ -85,9 +86,9 @@ class TwistedHttpClient(HttpClient):
|
|||||||
body = yield readBody(response)
|
body = yield readBody(response)
|
||||||
defer.returnValue(json.loads(body))
|
defer.returnValue(json.loads(body))
|
||||||
|
|
||||||
def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None):
|
def _create_put_request(self, url, json_data, headers_dict={}):
|
||||||
"""Wrapper of _create_request to issue a PUT request"""
|
""" Wrapper of _create_request to issue a PUT request
|
||||||
headers_dict = headers_dict or {}
|
"""
|
||||||
|
|
||||||
if "Content-Type" not in headers_dict:
|
if "Content-Type" not in headers_dict:
|
||||||
raise defer.error(RuntimeError("Must include Content-Type header for PUTs"))
|
raise defer.error(RuntimeError("Must include Content-Type header for PUTs"))
|
||||||
@@ -96,22 +97,15 @@ class TwistedHttpClient(HttpClient):
|
|||||||
"PUT", url, producer=_JsonProducer(json_data), headers_dict=headers_dict
|
"PUT", url, producer=_JsonProducer(json_data), headers_dict=headers_dict
|
||||||
)
|
)
|
||||||
|
|
||||||
def _create_get_request(self, url, headers_dict: Optional[dict] = None):
|
def _create_get_request(self, url, headers_dict={}):
|
||||||
"""Wrapper of _create_request to issue a GET request"""
|
""" Wrapper of _create_request to issue a GET request
|
||||||
return self._create_request("GET", url, headers_dict=headers_dict or {})
|
"""
|
||||||
|
return self._create_request("GET", url, headers_dict=headers_dict)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def do_request(
|
def do_request(
|
||||||
self,
|
self, method, url, data=None, qparams=None, jsonreq=True, headers={}
|
||||||
method,
|
|
||||||
url,
|
|
||||||
data=None,
|
|
||||||
qparams=None,
|
|
||||||
jsonreq=True,
|
|
||||||
headers: Optional[dict] = None,
|
|
||||||
):
|
):
|
||||||
headers = headers or {}
|
|
||||||
|
|
||||||
if qparams:
|
if qparams:
|
||||||
url = "%s?%s" % (url, urllib.urlencode(qparams, True))
|
url = "%s?%s" % (url, urllib.urlencode(qparams, True))
|
||||||
|
|
||||||
@@ -132,12 +126,9 @@ class TwistedHttpClient(HttpClient):
|
|||||||
defer.returnValue(json.loads(body))
|
defer.returnValue(json.loads(body))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _create_request(
|
def _create_request(self, method, url, producer=None, headers_dict={}):
|
||||||
self, method, url, producer=None, headers_dict: Optional[dict] = None
|
""" Creates and sends a request to the given url
|
||||||
):
|
"""
|
||||||
"""Creates and sends a request to the given url"""
|
|
||||||
headers_dict = headers_dict or {}
|
|
||||||
|
|
||||||
headers_dict["User-Agent"] = ["Synapse Cmd Client"]
|
headers_dict["User-Agent"] = ["Synapse Cmd Client"]
|
||||||
|
|
||||||
retries_left = 5
|
retries_left = 5
|
||||||
@@ -194,7 +185,8 @@ class _RawProducer:
|
|||||||
|
|
||||||
|
|
||||||
class _JsonProducer:
|
class _JsonProducer:
|
||||||
"""Used by the twisted http client to create the HTTP body from json"""
|
""" Used by the twisted http client to create the HTTP body from json
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, jsn):
|
def __init__(self, jsn):
|
||||||
self.data = jsn
|
self.data = jsn
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ services:
|
|||||||
- POSTGRES_USER=synapse
|
- POSTGRES_USER=synapse
|
||||||
- POSTGRES_PASSWORD=changeme
|
- POSTGRES_PASSWORD=changeme
|
||||||
# ensure the database gets created correctly
|
# ensure the database gets created correctly
|
||||||
# https://matrix-org.github.io/synapse/latest/postgres.html#set-up-database
|
# https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database
|
||||||
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
|
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
|
||||||
volumes:
|
volumes:
|
||||||
# You may store the database tables in a local folder..
|
# You may store the database tables in a local folder..
|
||||||
|
|||||||
@@ -46,14 +46,14 @@ class CursesStdIO:
|
|||||||
self.callback = callback
|
self.callback = callback
|
||||||
|
|
||||||
def fileno(self):
|
def fileno(self):
|
||||||
"""We want to select on FD 0"""
|
""" We want to select on FD 0 """
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def connectionLost(self, reason):
|
def connectionLost(self, reason):
|
||||||
self.close()
|
self.close()
|
||||||
|
|
||||||
def print_line(self, text):
|
def print_line(self, text):
|
||||||
"""add a line to the internal list of lines"""
|
""" add a line to the internal list of lines"""
|
||||||
|
|
||||||
self.lines.append(text)
|
self.lines.append(text)
|
||||||
self.redraw()
|
self.redraw()
|
||||||
@@ -63,7 +63,8 @@ class CursesStdIO:
|
|||||||
self.redraw()
|
self.redraw()
|
||||||
|
|
||||||
def redraw(self):
|
def redraw(self):
|
||||||
"""method for redisplaying lines based on internal list of lines"""
|
""" method for redisplaying lines
|
||||||
|
based on internal list of lines """
|
||||||
|
|
||||||
self.stdscr.clear()
|
self.stdscr.clear()
|
||||||
self.paintStatus(self.statusText)
|
self.paintStatus(self.statusText)
|
||||||
@@ -92,7 +93,7 @@ class CursesStdIO:
|
|||||||
)
|
)
|
||||||
|
|
||||||
def doRead(self):
|
def doRead(self):
|
||||||
"""Input is ready!"""
|
""" Input is ready! """
|
||||||
curses.noecho()
|
curses.noecho()
|
||||||
c = self.stdscr.getch() # read a character
|
c = self.stdscr.getch() # read a character
|
||||||
|
|
||||||
@@ -132,7 +133,7 @@ class CursesStdIO:
|
|||||||
return "CursesStdIO"
|
return "CursesStdIO"
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
"""clean up"""
|
""" clean up """
|
||||||
|
|
||||||
curses.nocbreak()
|
curses.nocbreak()
|
||||||
self.stdscr.keypad(0)
|
self.stdscr.keypad(0)
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
@@ -55,7 +56,7 @@ def excpetion_errback(failure):
|
|||||||
|
|
||||||
|
|
||||||
class InputOutput:
|
class InputOutput:
|
||||||
"""This is responsible for basic I/O so that a user can interact with
|
""" This is responsible for basic I/O so that a user can interact with
|
||||||
the example app.
|
the example app.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -67,7 +68,8 @@ class InputOutput:
|
|||||||
self.server = server
|
self.server = server
|
||||||
|
|
||||||
def on_line(self, line):
|
def on_line(self, line):
|
||||||
"""This is where we process commands."""
|
""" This is where we process commands.
|
||||||
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
m = re.match(r"^join (\S+)$", line)
|
m = re.match(r"^join (\S+)$", line)
|
||||||
@@ -131,7 +133,7 @@ class IOLoggerHandler(logging.Handler):
|
|||||||
|
|
||||||
|
|
||||||
class Room:
|
class Room:
|
||||||
"""Used to store (in memory) the current membership state of a room, and
|
""" Used to store (in memory) the current membership state of a room, and
|
||||||
which home servers we should send PDUs associated with the room to.
|
which home servers we should send PDUs associated with the room to.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -146,7 +148,8 @@ class Room:
|
|||||||
self.have_got_metadata = False
|
self.have_got_metadata = False
|
||||||
|
|
||||||
def add_participant(self, participant):
|
def add_participant(self, participant):
|
||||||
"""Someone has joined the room"""
|
""" Someone has joined the room
|
||||||
|
"""
|
||||||
self.participants.add(participant)
|
self.participants.add(participant)
|
||||||
self.invited.discard(participant)
|
self.invited.discard(participant)
|
||||||
|
|
||||||
@@ -157,13 +160,14 @@ class Room:
|
|||||||
self.oldest_server = server
|
self.oldest_server = server
|
||||||
|
|
||||||
def add_invited(self, invitee):
|
def add_invited(self, invitee):
|
||||||
"""Someone has been invited to the room"""
|
""" Someone has been invited to the room
|
||||||
|
"""
|
||||||
self.invited.add(invitee)
|
self.invited.add(invitee)
|
||||||
self.servers.add(origin_from_ucid(invitee))
|
self.servers.add(origin_from_ucid(invitee))
|
||||||
|
|
||||||
|
|
||||||
class HomeServer(ReplicationHandler):
|
class HomeServer(ReplicationHandler):
|
||||||
"""A very basic home server implentation that allows people to join a
|
""" A very basic home server implentation that allows people to join a
|
||||||
room and then invite other people.
|
room and then invite other people.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -177,7 +181,8 @@ class HomeServer(ReplicationHandler):
|
|||||||
self.output = output
|
self.output = output
|
||||||
|
|
||||||
def on_receive_pdu(self, pdu):
|
def on_receive_pdu(self, pdu):
|
||||||
"""We just received a PDU"""
|
""" We just received a PDU
|
||||||
|
"""
|
||||||
pdu_type = pdu.pdu_type
|
pdu_type = pdu.pdu_type
|
||||||
|
|
||||||
if pdu_type == "sy.room.message":
|
if pdu_type == "sy.room.message":
|
||||||
@@ -194,20 +199,23 @@ class HomeServer(ReplicationHandler):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _on_message(self, pdu):
|
def _on_message(self, pdu):
|
||||||
"""We received a message"""
|
""" We received a message
|
||||||
|
"""
|
||||||
self.output.print_line(
|
self.output.print_line(
|
||||||
"#%s %s %s" % (pdu.context, pdu.content["sender"], pdu.content["body"])
|
"#%s %s %s" % (pdu.context, pdu.content["sender"], pdu.content["body"])
|
||||||
)
|
)
|
||||||
|
|
||||||
def _on_join(self, context, joinee):
|
def _on_join(self, context, joinee):
|
||||||
"""Someone has joined a room, either a remote user or a local user"""
|
""" Someone has joined a room, either a remote user or a local user
|
||||||
|
"""
|
||||||
room = self._get_or_create_room(context)
|
room = self._get_or_create_room(context)
|
||||||
room.add_participant(joinee)
|
room.add_participant(joinee)
|
||||||
|
|
||||||
self.output.print_line("#%s %s %s" % (context, joinee, "*** JOINED"))
|
self.output.print_line("#%s %s %s" % (context, joinee, "*** JOINED"))
|
||||||
|
|
||||||
def _on_invite(self, origin, context, invitee):
|
def _on_invite(self, origin, context, invitee):
|
||||||
"""Someone has been invited"""
|
""" Someone has been invited
|
||||||
|
"""
|
||||||
room = self._get_or_create_room(context)
|
room = self._get_or_create_room(context)
|
||||||
room.add_invited(invitee)
|
room.add_invited(invitee)
|
||||||
|
|
||||||
@@ -220,7 +228,8 @@ class HomeServer(ReplicationHandler):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_message(self, room_name, sender, body):
|
def send_message(self, room_name, sender, body):
|
||||||
"""Send a message to a room!"""
|
""" Send a message to a room!
|
||||||
|
"""
|
||||||
destinations = yield self.get_servers_for_context(room_name)
|
destinations = yield self.get_servers_for_context(room_name)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -238,7 +247,8 @@ class HomeServer(ReplicationHandler):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def join_room(self, room_name, sender, joinee):
|
def join_room(self, room_name, sender, joinee):
|
||||||
"""Join a room!"""
|
""" Join a room!
|
||||||
|
"""
|
||||||
self._on_join(room_name, joinee)
|
self._on_join(room_name, joinee)
|
||||||
|
|
||||||
destinations = yield self.get_servers_for_context(room_name)
|
destinations = yield self.get_servers_for_context(room_name)
|
||||||
@@ -259,7 +269,8 @@ class HomeServer(ReplicationHandler):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def invite_to_room(self, room_name, sender, invitee):
|
def invite_to_room(self, room_name, sender, invitee):
|
||||||
"""Invite someone to a room!"""
|
""" Invite someone to a room!
|
||||||
|
"""
|
||||||
self._on_invite(self.server_name, room_name, invitee)
|
self._on_invite(self.server_name, room_name, invitee)
|
||||||
|
|
||||||
destinations = yield self.get_servers_for_context(room_name)
|
destinations = yield self.get_servers_for_context(room_name)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Using the Synapse Grafana dashboard
|
# Using the Synapse Grafana dashboard
|
||||||
|
|
||||||
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
|
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
|
||||||
1. Have your Prometheus scrape your Synapse. https://matrix-org.github.io/synapse/latest/metrics-howto.html
|
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md
|
||||||
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
|
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
|
||||||
3. Set up required recording rules. [contrib/prometheus](../prometheus)
|
3. Set up required recording rules. https://github.com/matrix-org/synapse/tree/master/contrib/prometheus
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -193,12 +193,15 @@ class TrivialXmppClient:
|
|||||||
time.sleep(7)
|
time.sleep(7)
|
||||||
print("SSRC spammer started")
|
print("SSRC spammer started")
|
||||||
while self.running:
|
while self.running:
|
||||||
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % {
|
ssrcMsg = (
|
||||||
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
|
"<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>"
|
||||||
"nick": self.userId,
|
% {
|
||||||
"assrc": self.ssrcs["audio"],
|
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
|
||||||
"vssrc": self.ssrcs["video"],
|
"nick": self.userId,
|
||||||
}
|
"assrc": self.ssrcs["audio"],
|
||||||
|
"vssrc": self.ssrcs["video"],
|
||||||
|
}
|
||||||
|
)
|
||||||
res = self.sendIq(ssrcMsg)
|
res = self.sendIq(ssrcMsg)
|
||||||
print("reply from ssrc announce: ", res)
|
print("reply from ssrc announce: ", res)
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ Add a new job to the main prometheus.yml file:
|
|||||||
```
|
```
|
||||||
|
|
||||||
An example of a Prometheus configuration with workers can be found in
|
An example of a Prometheus configuration with workers can be found in
|
||||||
[metrics-howto.md](https://matrix-org.github.io/synapse/latest/metrics-howto.html).
|
[metrics-howto.md](https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md).
|
||||||
|
|
||||||
To use `synapse.rules` add
|
To use `synapse.rules` add
|
||||||
|
|
||||||
|
|||||||
@@ -3,9 +3,8 @@ Purge history API examples
|
|||||||
|
|
||||||
# `purge_history.sh`
|
# `purge_history.sh`
|
||||||
|
|
||||||
A bash file, that uses the
|
A bash file, that uses the [purge history API](/docs/admin_api/purge_history_api.rst) to
|
||||||
[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
|
purge all messages in a list of rooms up to a certain event. You can select a
|
||||||
to purge all messages in a list of rooms up to a certain event. You can select a
|
|
||||||
timeframe or a number of messages that you want to keep in the room.
|
timeframe or a number of messages that you want to keep in the room.
|
||||||
|
|
||||||
Just configure the variables DOMAIN, ADMIN, ROOMS_ARRAY and TIME at the top of
|
Just configure the variables DOMAIN, ADMIN, ROOMS_ARRAY and TIME at the top of
|
||||||
@@ -13,6 +12,5 @@ the script.
|
|||||||
|
|
||||||
# `purge_remote_media.sh`
|
# `purge_remote_media.sh`
|
||||||
|
|
||||||
A bash file, that uses the
|
A bash file, that uses the [purge history API](/docs/admin_api/purge_history_api.rst) to
|
||||||
[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
|
purge all old cached remote media.
|
||||||
to purge all old cached remote media.
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
|
||||||
# this script will use the api:
|
# this script will use the api:
|
||||||
# https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html
|
# https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst
|
||||||
#
|
#
|
||||||
# It will purge all messages in a list of rooms up to a cetrain event
|
# It will purge all messages in a list of rooms up to a cetrain event
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
|
||||||
DOMAIN=yourserver.tld
|
DOMAIN=yourserver.tld
|
||||||
# add this user as admin in your home server:
|
# add this user as admin in your home server:
|
||||||
|
|||||||
@@ -1,3 +1,2 @@
|
|||||||
The documentation for using systemd to manage synapse workers is now part of
|
The documentation for using systemd to manage synapse workers is now part of
|
||||||
the main synapse distribution. See
|
the main synapse distribution. See [docs/systemd-with-workers](../../docs/systemd-with-workers).
|
||||||
[docs/systemd-with-workers](https://matrix-org.github.io/synapse/latest/systemd-with-workers/index.html).
|
|
||||||
|
|||||||
@@ -2,8 +2,7 @@
|
|||||||
This is a setup for managing synapse with a user contributed systemd unit
|
This is a setup for managing synapse with a user contributed systemd unit
|
||||||
file. It provides a `matrix-synapse` systemd unit file that should be tailored
|
file. It provides a `matrix-synapse` systemd unit file that should be tailored
|
||||||
to accommodate your installation in accordance with the installation
|
to accommodate your installation in accordance with the installation
|
||||||
instructions provided in
|
instructions provided in [installation instructions](../../INSTALL.md).
|
||||||
[installation instructions](https://matrix-org.github.io/synapse/latest/setup/installation.html).
|
|
||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
1. Under the service section, ensure the `User` variable matches which user
|
1. Under the service section, ensure the `User` variable matches which user
|
||||||
|
|||||||
@@ -1,71 +0,0 @@
|
|||||||
[Service]
|
|
||||||
# The following directives give the synapse service R/W access to:
|
|
||||||
# - /run/matrix-synapse
|
|
||||||
# - /var/lib/matrix-synapse
|
|
||||||
# - /var/log/matrix-synapse
|
|
||||||
|
|
||||||
RuntimeDirectory=matrix-synapse
|
|
||||||
StateDirectory=matrix-synapse
|
|
||||||
LogsDirectory=matrix-synapse
|
|
||||||
|
|
||||||
######################
|
|
||||||
## Security Sandbox ##
|
|
||||||
######################
|
|
||||||
|
|
||||||
# Make sure that the service has its own unshared tmpfs at /tmp and that it
|
|
||||||
# cannot see or change any real devices
|
|
||||||
PrivateTmp=true
|
|
||||||
PrivateDevices=true
|
|
||||||
|
|
||||||
# We give no capabilities to a service by default
|
|
||||||
CapabilityBoundingSet=
|
|
||||||
AmbientCapabilities=
|
|
||||||
|
|
||||||
# Protect the following from modification:
|
|
||||||
# - The entire filesystem
|
|
||||||
# - sysctl settings and loaded kernel modules
|
|
||||||
# - No modifications allowed to Control Groups
|
|
||||||
# - Hostname
|
|
||||||
# - System Clock
|
|
||||||
ProtectSystem=strict
|
|
||||||
ProtectKernelTunables=true
|
|
||||||
ProtectKernelModules=true
|
|
||||||
ProtectControlGroups=true
|
|
||||||
ProtectClock=true
|
|
||||||
ProtectHostname=true
|
|
||||||
|
|
||||||
# Prevent access to the following:
|
|
||||||
# - /home directory
|
|
||||||
# - Kernel logs
|
|
||||||
ProtectHome=tmpfs
|
|
||||||
ProtectKernelLogs=true
|
|
||||||
|
|
||||||
# Make sure that the process can only see PIDs and process details of itself,
|
|
||||||
# and the second option disables seeing details of things like system load and
|
|
||||||
# I/O etc
|
|
||||||
ProtectProc=invisible
|
|
||||||
ProcSubset=pid
|
|
||||||
|
|
||||||
# While not needed, we set these options explicitly
|
|
||||||
# - This process has been given access to the host network
|
|
||||||
# - It can also communicate with any IP Address
|
|
||||||
PrivateNetwork=false
|
|
||||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
|
||||||
IPAddressAllow=any
|
|
||||||
|
|
||||||
# Restrict system calls to a sane bunch
|
|
||||||
SystemCallArchitectures=native
|
|
||||||
SystemCallFilter=@system-service
|
|
||||||
SystemCallFilter=~@privileged @resources @obsolete
|
|
||||||
|
|
||||||
# Misc restrictions
|
|
||||||
# - Since the process is a python process it needs to be able to write and
|
|
||||||
# execute memory regions, so we set MemoryDenyWriteExecute to false
|
|
||||||
RestrictSUIDSGID=true
|
|
||||||
RemoveIPC=true
|
|
||||||
NoNewPrivileges=true
|
|
||||||
RestrictRealtime=true
|
|
||||||
RestrictNamespaces=true
|
|
||||||
LockPersonality=true
|
|
||||||
PrivateUsers=true
|
|
||||||
MemoryDenyWriteExecute=false
|
|
||||||
31
debian/build_virtualenv
vendored
31
debian/build_virtualenv
vendored
@@ -33,13 +33,11 @@ esac
|
|||||||
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
|
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
|
||||||
# than the 2/3 compatible `virtualenv`.
|
# than the 2/3 compatible `virtualenv`.
|
||||||
|
|
||||||
# Pin pip to 20.3.4 to fix breakage in 21.0 on py3.5 (xenial)
|
|
||||||
|
|
||||||
dh_virtualenv \
|
dh_virtualenv \
|
||||||
--install-suffix "matrix-synapse" \
|
--install-suffix "matrix-synapse" \
|
||||||
--builtin-venv \
|
--builtin-venv \
|
||||||
--python "$SNAKE" \
|
--python "$SNAKE" \
|
||||||
--upgrade-pip-to="20.3.4" \
|
--upgrade-pip \
|
||||||
--preinstall="lxml" \
|
--preinstall="lxml" \
|
||||||
--preinstall="mock" \
|
--preinstall="mock" \
|
||||||
--extra-pip-arg="--no-cache-dir" \
|
--extra-pip-arg="--no-cache-dir" \
|
||||||
@@ -50,27 +48,18 @@ PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
|||||||
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||||
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
|
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
|
||||||
|
|
||||||
case "$DEB_BUILD_OPTIONS" in
|
# we copy the tests to a temporary directory so that we can put them on the
|
||||||
*nocheck*)
|
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
|
||||||
# Skip running tests if "nocheck" present in $DEB_BUILD_OPTIONS
|
tmpdir=`mktemp -d`
|
||||||
;;
|
trap "rm -r $tmpdir" EXIT
|
||||||
|
|
||||||
*)
|
cp -r tests "$tmpdir"
|
||||||
# Copy tests to a temporary directory so that we can put them on the
|
|
||||||
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
|
|
||||||
tmpdir=`mktemp -d`
|
|
||||||
trap "rm -r $tmpdir" EXIT
|
|
||||||
|
|
||||||
cp -r tests "$tmpdir"
|
PYTHONPATH="$tmpdir" \
|
||||||
|
"${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
|
||||||
PYTHONPATH="$tmpdir" \
|
|
||||||
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
|
|
||||||
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# build the config file
|
# build the config file
|
||||||
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \
|
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||||
--config-dir="/etc/matrix-synapse" \
|
--config-dir="/etc/matrix-synapse" \
|
||||||
--data-dir="/var/lib/matrix-synapse" |
|
--data-dir="/var/lib/matrix-synapse" |
|
||||||
perl -pe '
|
perl -pe '
|
||||||
@@ -96,7 +85,7 @@ esac
|
|||||||
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
|
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
|
||||||
|
|
||||||
# build the log config file
|
# build the log config file
|
||||||
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_log_config" \
|
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_log_config" \
|
||||||
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
|
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
|
||||||
|
|
||||||
# add a dependency on the right version of python to substvars.
|
# add a dependency on the right version of python to substvars.
|
||||||
|
|||||||
168
debian/changelog
vendored
168
debian/changelog
vendored
@@ -1,172 +1,8 @@
|
|||||||
matrix-synapse-py3 (1.39.0) stable; urgency=medium
|
matrix-synapse-py3 (1.25.0ubuntu1) UNRELEASED; urgency=medium
|
||||||
|
|
||||||
* New synapse release 1.39.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Thu, 29 Jul 2021 09:59:00 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.39.0~rc3) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.39.0~rc3.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Wed, 28 Jul 2021 13:30:58 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.38.1) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.38.1.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Thu, 22 Jul 2021 15:37:06 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.39.0~rc1) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.39.0rc1.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Jul 2021 14:28:34 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.38.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.38.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Jul 2021 13:20:56 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.38.0rc3) prerelease; urgency=medium
|
|
||||||
|
|
||||||
[ Erik Johnston ]
|
|
||||||
* Add synapse_review_recent_signups script
|
|
||||||
|
|
||||||
[ Synapse Packaging team ]
|
|
||||||
* New synapse release 1.38.0rc3.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Jul 2021 11:53:56 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.37.1) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.37.1.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Wed, 30 Jun 2021 12:24:06 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.37.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.37.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Jun 2021 10:15:25 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.36.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.36.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Jun 2021 15:41:53 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.35.1) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.35.1.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Thu, 03 Jun 2021 08:11:29 -0400
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.35.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.35.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Jun 2021 13:23:35 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.34.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.34.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Mon, 17 May 2021 11:34:18 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.33.2) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.33.2.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 May 2021 11:17:59 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.33.1) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.33.1.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Thu, 06 May 2021 14:06:33 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.33.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.33.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Wed, 05 May 2021 14:15:27 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.32.2) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.32.2.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Wed, 22 Apr 2021 12:43:52 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.32.1) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.32.1.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Wed, 21 Apr 2021 14:00:55 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.32.0) stable; urgency=medium
|
|
||||||
|
|
||||||
[ Dan Callahan ]
|
|
||||||
* Skip tests when DEB_BUILD_OPTIONS contains "nocheck".
|
|
||||||
|
|
||||||
[ Synapse Packaging team ]
|
|
||||||
* New synapse release 1.32.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Apr 2021 14:28:39 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.31.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.31.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Apr 2021 13:08:29 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.30.1) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.30.1.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Fri, 26 Mar 2021 12:01:28 +0000
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.30.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.30.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Mon, 22 Mar 2021 13:15:34 +0000
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.29.0) stable; urgency=medium
|
|
||||||
|
|
||||||
[ Jonathan de Jong ]
|
|
||||||
* Remove the python -B flag (don't generate bytecode) in scripts and documentation.
|
|
||||||
|
|
||||||
[ Synapse Packaging team ]
|
|
||||||
* New synapse release 1.29.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Mon, 08 Mar 2021 13:51:50 +0000
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.28.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.28.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Feb 2021 10:21:57 +0000
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.27.0) stable; urgency=medium
|
|
||||||
|
|
||||||
[ Dan Callahan ]
|
|
||||||
* Fix build on Ubuntu 16.04 LTS (Xenial).
|
|
||||||
|
|
||||||
[ Synapse Packaging team ]
|
|
||||||
* New synapse release 1.27.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Feb 2021 13:11:28 +0000
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.26.0) stable; urgency=medium
|
|
||||||
|
|
||||||
[ Richard van der Hoff ]
|
|
||||||
* Remove dependency on `python3-distutils`.
|
* Remove dependency on `python3-distutils`.
|
||||||
|
|
||||||
[ Synapse Packaging team ]
|
-- Richard van der Hoff <richard@matrix.org> Fri, 15 Jan 2021 12:44:19 +0000
|
||||||
* New synapse release 1.26.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Wed, 27 Jan 2021 12:43:35 -0500
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.25.0) stable; urgency=medium
|
matrix-synapse-py3 (1.25.0) stable; urgency=medium
|
||||||
|
|
||||||
|
|||||||
42
debian/hash_password.1
vendored
42
debian/hash_password.1
vendored
@@ -1,58 +1,90 @@
|
|||||||
.\" generated with Ronn-NG/v0.8.0
|
.\" generated with Ronn/v0.7.3
|
||||||
.\" http://github.com/apjanke/ronn-ng/tree/0.8.0
|
.\" http://github.com/rtomayko/ronn/tree/0.7.3
|
||||||
.TH "HASH_PASSWORD" "1" "July 2021" "" ""
|
.
|
||||||
|
.TH "HASH_PASSWORD" "1" "February 2017" "" ""
|
||||||
|
.
|
||||||
.SH "NAME"
|
.SH "NAME"
|
||||||
\fBhash_password\fR \- Calculate the hash of a new password, so that passwords can be reset
|
\fBhash_password\fR \- Calculate the hash of a new password, so that passwords can be reset
|
||||||
|
.
|
||||||
.SH "SYNOPSIS"
|
.SH "SYNOPSIS"
|
||||||
\fBhash_password\fR [\fB\-p\fR|\fB\-\-password\fR [password]] [\fB\-c\fR|\fB\-\-config\fR \fIfile\fR]
|
\fBhash_password\fR [\fB\-p\fR|\fB\-\-password\fR [password]] [\fB\-c\fR|\fB\-\-config\fR \fIfile\fR]
|
||||||
|
.
|
||||||
.SH "DESCRIPTION"
|
.SH "DESCRIPTION"
|
||||||
\fBhash_password\fR calculates the hash of a supplied password using bcrypt\.
|
\fBhash_password\fR calculates the hash of a supplied password using bcrypt\.
|
||||||
|
.
|
||||||
.P
|
.P
|
||||||
\fBhash_password\fR takes a password as an parameter either on the command line or the \fBSTDIN\fR if not supplied\.
|
\fBhash_password\fR takes a password as an parameter either on the command line or the \fBSTDIN\fR if not supplied\.
|
||||||
|
.
|
||||||
.P
|
.P
|
||||||
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB10\fR\.
|
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB10\fR\.
|
||||||
|
.
|
||||||
.P
|
.P
|
||||||
The hashed password is written on the \fBSTDOUT\fR\.
|
The hashed password is written on the \fBSTDOUT\fR\.
|
||||||
|
.
|
||||||
.SH "FILES"
|
.SH "FILES"
|
||||||
A sample YAML file accepted by \fBhash_password\fR is described below:
|
A sample YAML file accepted by \fBhash_password\fR is described below:
|
||||||
|
.
|
||||||
.P
|
.P
|
||||||
bcrypt_rounds: 17 password_config: pepper: "random hashing pepper"
|
bcrypt_rounds: 17 password_config: pepper: "random hashing pepper"
|
||||||
|
.
|
||||||
.SH "OPTIONS"
|
.SH "OPTIONS"
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fB\-p\fR, \fB\-\-password\fR
|
\fB\-p\fR, \fB\-\-password\fR
|
||||||
Read the password form the command line if [password] is supplied\. If not, prompt the user and read the password form the \fBSTDIN\fR\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\.
|
Read the password form the command line if [password] is supplied\. If not, prompt the user and read the password form the \fBSTDIN\fR\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\.
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fB\-c\fR, \fB\-\-config\fR
|
\fB\-c\fR, \fB\-\-config\fR
|
||||||
Read the supplied YAML \fIfile\fR containing the options \fBbcrypt_rounds\fR and the \fBpassword_config\fR section containing the \fBpepper\fR value\.
|
Read the supplied YAML \fIfile\fR containing the options \fBbcrypt_rounds\fR and the \fBpassword_config\fR section containing the \fBpepper\fR value\.
|
||||||
|
.
|
||||||
.SH "EXAMPLES"
|
.SH "EXAMPLES"
|
||||||
Hash from the command line:
|
Hash from the command line:
|
||||||
|
.
|
||||||
.IP "" 4
|
.IP "" 4
|
||||||
|
.
|
||||||
.nf
|
.nf
|
||||||
|
|
||||||
$ hash_password \-p "p@ssw0rd"
|
$ hash_password \-p "p@ssw0rd"
|
||||||
$2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8\.X8fWFpum7SxZ9MFe
|
$2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8\.X8fWFpum7SxZ9MFe
|
||||||
|
.
|
||||||
.fi
|
.fi
|
||||||
|
.
|
||||||
.IP "" 0
|
.IP "" 0
|
||||||
|
.
|
||||||
.P
|
.P
|
||||||
Hash from the STDIN:
|
Hash from the STDIN:
|
||||||
|
.
|
||||||
.IP "" 4
|
.IP "" 4
|
||||||
|
.
|
||||||
.nf
|
.nf
|
||||||
|
|
||||||
$ hash_password
|
$ hash_password
|
||||||
Password:
|
Password:
|
||||||
Confirm password:
|
Confirm password:
|
||||||
$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX\.rcuAbM8ErLoUhybG
|
$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX\.rcuAbM8ErLoUhybG
|
||||||
|
.
|
||||||
.fi
|
.fi
|
||||||
|
.
|
||||||
.IP "" 0
|
.IP "" 0
|
||||||
|
.
|
||||||
.P
|
.P
|
||||||
Using a config file:
|
Using a config file:
|
||||||
|
.
|
||||||
.IP "" 4
|
.IP "" 4
|
||||||
|
.
|
||||||
.nf
|
.nf
|
||||||
|
|
||||||
$ hash_password \-c config\.yml
|
$ hash_password \-c config\.yml
|
||||||
Password:
|
Password:
|
||||||
Confirm password:
|
Confirm password:
|
||||||
$2b$12$CwI\.wBNr\.w3kmiUlV3T5s\.GT2wH7uebDCovDrCOh18dFedlANK99O
|
$2b$12$CwI\.wBNr\.w3kmiUlV3T5s\.GT2wH7uebDCovDrCOh18dFedlANK99O
|
||||||
|
.
|
||||||
.fi
|
.fi
|
||||||
|
.
|
||||||
.IP "" 0
|
.IP "" 0
|
||||||
|
.
|
||||||
.SH "COPYRIGHT"
|
.SH "COPYRIGHT"
|
||||||
This man page was written by Rahul De <\fI\%mailto:rahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
|
This man page was written by Rahul De <\fIrahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
|
||||||
|
.
|
||||||
.SH "SEE ALSO"
|
.SH "SEE ALSO"
|
||||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
synctl(1), synapse_port_db(1), register_new_matrix_user(1)
|
||||||
|
|||||||
2
debian/hash_password.ronn
vendored
2
debian/hash_password.ronn
vendored
@@ -66,4 +66,4 @@ for Debian GNU/Linux distribution.
|
|||||||
|
|
||||||
## SEE ALSO
|
## SEE ALSO
|
||||||
|
|
||||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
synctl(1), synapse_port_db(1), register_new_matrix_user(1)
|
||||||
|
|||||||
1
debian/manpages
vendored
1
debian/manpages
vendored
@@ -1,5 +1,4 @@
|
|||||||
debian/hash_password.1
|
debian/hash_password.1
|
||||||
debian/register_new_matrix_user.1
|
debian/register_new_matrix_user.1
|
||||||
debian/synapse_port_db.1
|
debian/synapse_port_db.1
|
||||||
debian/synapse_review_recent_signups.1
|
|
||||||
debian/synctl.1
|
debian/synctl.1
|
||||||
|
|||||||
1
debian/matrix-synapse-py3.links
vendored
1
debian/matrix-synapse-py3.links
vendored
@@ -1,5 +1,4 @@
|
|||||||
opt/venvs/matrix-synapse/bin/hash_password usr/bin/hash_password
|
opt/venvs/matrix-synapse/bin/hash_password usr/bin/hash_password
|
||||||
opt/venvs/matrix-synapse/bin/register_new_matrix_user usr/bin/register_new_matrix_user
|
opt/venvs/matrix-synapse/bin/register_new_matrix_user usr/bin/register_new_matrix_user
|
||||||
opt/venvs/matrix-synapse/bin/synapse_port_db usr/bin/synapse_port_db
|
opt/venvs/matrix-synapse/bin/synapse_port_db usr/bin/synapse_port_db
|
||||||
opt/venvs/matrix-synapse/bin/synapse_review_recent_signups usr/bin/synapse_review_recent_signups
|
|
||||||
opt/venvs/matrix-synapse/bin/synctl usr/bin/synctl
|
opt/venvs/matrix-synapse/bin/synctl usr/bin/synctl
|
||||||
|
|||||||
37
debian/register_new_matrix_user.1
vendored
37
debian/register_new_matrix_user.1
vendored
@@ -1,47 +1,72 @@
|
|||||||
.\" generated with Ronn-NG/v0.8.0
|
.\" generated with Ronn/v0.7.3
|
||||||
.\" http://github.com/apjanke/ronn-ng/tree/0.8.0
|
.\" http://github.com/rtomayko/ronn/tree/0.7.3
|
||||||
.TH "REGISTER_NEW_MATRIX_USER" "1" "July 2021" "" ""
|
.
|
||||||
|
.TH "REGISTER_NEW_MATRIX_USER" "1" "February 2017" "" ""
|
||||||
|
.
|
||||||
.SH "NAME"
|
.SH "NAME"
|
||||||
\fBregister_new_matrix_user\fR \- Used to register new users with a given home server when registration has been disabled
|
\fBregister_new_matrix_user\fR \- Used to register new users with a given home server when registration has been disabled
|
||||||
|
.
|
||||||
.SH "SYNOPSIS"
|
.SH "SYNOPSIS"
|
||||||
\fBregister_new_matrix_user\fR options\|\.\|\.\|\.
|
\fBregister_new_matrix_user\fR options\.\.\.
|
||||||
|
.
|
||||||
.SH "DESCRIPTION"
|
.SH "DESCRIPTION"
|
||||||
\fBregister_new_matrix_user\fR registers new users with a given home server when registration has been disabled\. For this to work, the home server must be configured with the \'registration_shared_secret\' option set\.
|
\fBregister_new_matrix_user\fR registers new users with a given home server when registration has been disabled\. For this to work, the home server must be configured with the \'registration_shared_secret\' option set\.
|
||||||
|
.
|
||||||
.P
|
.P
|
||||||
This accepts the user credentials like the username, password, is user an admin or not and registers the user onto the homeserver database\. Also, a YAML file containing the shared secret can be provided\. If not, the shared secret can be provided via the command line\.
|
This accepts the user credentials like the username, password, is user an admin or not and registers the user onto the homeserver database\. Also, a YAML file containing the shared secret can be provided\. If not, the shared secret can be provided via the command line\.
|
||||||
|
.
|
||||||
.P
|
.P
|
||||||
By default it assumes the home server URL to be \fBhttps://localhost:8448\fR\. This can be changed via the \fBserver_url\fR command line option\.
|
By default it assumes the home server URL to be \fBhttps://localhost:8448\fR\. This can be changed via the \fBserver_url\fR command line option\.
|
||||||
|
.
|
||||||
.SH "FILES"
|
.SH "FILES"
|
||||||
A sample YAML file accepted by \fBregister_new_matrix_user\fR is described below:
|
A sample YAML file accepted by \fBregister_new_matrix_user\fR is described below:
|
||||||
|
.
|
||||||
.IP "" 4
|
.IP "" 4
|
||||||
|
.
|
||||||
.nf
|
.nf
|
||||||
|
|
||||||
registration_shared_secret: "s3cr3t"
|
registration_shared_secret: "s3cr3t"
|
||||||
|
.
|
||||||
.fi
|
.fi
|
||||||
|
.
|
||||||
.IP "" 0
|
.IP "" 0
|
||||||
|
.
|
||||||
.SH "OPTIONS"
|
.SH "OPTIONS"
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fB\-u\fR, \fB\-\-user\fR
|
\fB\-u\fR, \fB\-\-user\fR
|
||||||
Local part of the new user\. Will prompt if omitted\.
|
Local part of the new user\. Will prompt if omitted\.
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fB\-p\fR, \fB\-\-password\fR
|
\fB\-p\fR, \fB\-\-password\fR
|
||||||
New password for user\. Will prompt if omitted\. Supplying the password on the command line is not recommended\. Use the STDIN instead\.
|
New password for user\. Will prompt if omitted\. Supplying the password on the command line is not recommended\. Use the STDIN instead\.
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fB\-a\fR, \fB\-\-admin\fR
|
\fB\-a\fR, \fB\-\-admin\fR
|
||||||
Register new user as an admin\. Will prompt if omitted\.
|
Register new user as an admin\. Will prompt if omitted\.
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fB\-c\fR, \fB\-\-config\fR
|
\fB\-c\fR, \fB\-\-config\fR
|
||||||
Path to server config file containing the shared secret\.
|
Path to server config file containing the shared secret\.
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fB\-k\fR, \fB\-\-shared\-secret\fR
|
\fB\-k\fR, \fB\-\-shared\-secret\fR
|
||||||
Shared secret as defined in server config file\. This is an optional parameter as it can be also supplied via the YAML file\.
|
Shared secret as defined in server config file\. This is an optional parameter as it can be also supplied via the YAML file\.
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fBserver_url\fR
|
\fBserver_url\fR
|
||||||
URL of the home server\. Defaults to \'https://localhost:8448\'\.
|
URL of the home server\. Defaults to \'https://localhost:8448\'\.
|
||||||
|
.
|
||||||
.SH "EXAMPLES"
|
.SH "EXAMPLES"
|
||||||
|
.
|
||||||
.nf
|
.nf
|
||||||
|
|
||||||
$ register_new_matrix_user \-u user1 \-p p@ssword \-a \-c config\.yaml
|
$ register_new_matrix_user \-u user1 \-p p@ssword \-a \-c config\.yaml
|
||||||
|
.
|
||||||
.fi
|
.fi
|
||||||
|
.
|
||||||
.SH "COPYRIGHT"
|
.SH "COPYRIGHT"
|
||||||
This man page was written by Rahul De <\fI\%mailto:rahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
|
This man page was written by Rahul De <\fIrahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
|
||||||
|
.
|
||||||
.SH "SEE ALSO"
|
.SH "SEE ALSO"
|
||||||
synctl(1), synapse_port_db(1), hash_password(1), synapse_review_recent_signups(1)
|
synctl(1), synapse_port_db(1), hash_password(1)
|
||||||
|
|||||||
2
debian/register_new_matrix_user.ronn
vendored
2
debian/register_new_matrix_user.ronn
vendored
@@ -58,4 +58,4 @@ for Debian GNU/Linux distribution.
|
|||||||
|
|
||||||
## SEE ALSO
|
## SEE ALSO
|
||||||
|
|
||||||
synctl(1), synapse_port_db(1), hash_password(1), synapse_review_recent_signups(1)
|
synctl(1), synapse_port_db(1), hash_password(1)
|
||||||
|
|||||||
59
debian/synapse_port_db.1
vendored
59
debian/synapse_port_db.1
vendored
@@ -1,56 +1,83 @@
|
|||||||
.\" generated with Ronn-NG/v0.8.0
|
.\" generated with Ronn/v0.7.3
|
||||||
.\" http://github.com/apjanke/ronn-ng/tree/0.8.0
|
.\" http://github.com/rtomayko/ronn/tree/0.7.3
|
||||||
.TH "SYNAPSE_PORT_DB" "1" "July 2021" "" ""
|
.
|
||||||
|
.TH "SYNAPSE_PORT_DB" "1" "February 2017" "" ""
|
||||||
|
.
|
||||||
.SH "NAME"
|
.SH "NAME"
|
||||||
\fBsynapse_port_db\fR \- A script to port an existing synapse SQLite database to a new PostgreSQL database\.
|
\fBsynapse_port_db\fR \- A script to port an existing synapse SQLite database to a new PostgreSQL database\.
|
||||||
|
.
|
||||||
.SH "SYNOPSIS"
|
.SH "SYNOPSIS"
|
||||||
\fBsynapse_port_db\fR [\-v] \-\-sqlite\-database=\fIdbfile\fR \-\-postgres\-config=\fIyamlconfig\fR [\-\-curses] [\-\-batch\-size=\fIbatch\-size\fR]
|
\fBsynapse_port_db\fR [\-v] \-\-sqlite\-database=\fIdbfile\fR \-\-postgres\-config=\fIyamlconfig\fR [\-\-curses] [\-\-batch\-size=\fIbatch\-size\fR]
|
||||||
|
.
|
||||||
.SH "DESCRIPTION"
|
.SH "DESCRIPTION"
|
||||||
\fBsynapse_port_db\fR ports an existing synapse SQLite database to a new PostgreSQL database\.
|
\fBsynapse_port_db\fR ports an existing synapse SQLite database to a new PostgreSQL database\.
|
||||||
|
.
|
||||||
.P
|
.P
|
||||||
SQLite database is specified with \fB\-\-sqlite\-database\fR option and PostgreSQL configuration required to connect to PostgreSQL database is provided using \fB\-\-postgres\-config\fR configuration\. The configuration is specified in YAML format\.
|
SQLite database is specified with \fB\-\-sqlite\-database\fR option and PostgreSQL configuration required to connect to PostgreSQL database is provided using \fB\-\-postgres\-config\fR configuration\. The configuration is specified in YAML format\.
|
||||||
|
.
|
||||||
.SH "OPTIONS"
|
.SH "OPTIONS"
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fB\-v\fR
|
\fB\-v\fR
|
||||||
Print log messages in \fBdebug\fR level instead of \fBinfo\fR level\.
|
Print log messages in \fBdebug\fR level instead of \fBinfo\fR level\.
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fB\-\-sqlite\-database\fR
|
\fB\-\-sqlite\-database\fR
|
||||||
The snapshot of the SQLite database file\. This must not be currently used by a running synapse server\.
|
The snapshot of the SQLite database file\. This must not be currently used by a running synapse server\.
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fB\-\-postgres\-config\fR
|
\fB\-\-postgres\-config\fR
|
||||||
The database config file for the PostgreSQL database\.
|
The database config file for the PostgreSQL database\.
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fB\-\-curses\fR
|
\fB\-\-curses\fR
|
||||||
Display a curses based progress UI\.
|
Display a curses based progress UI\.
|
||||||
|
.
|
||||||
.SH "CONFIG FILE"
|
.SH "CONFIG FILE"
|
||||||
The postgres configuration file must be a valid YAML file with the following options\.
|
The postgres configuration file must be a valid YAML file with the following options\.
|
||||||
.IP "\[ci]" 4
|
.
|
||||||
|
.IP "\(bu" 4
|
||||||
\fBdatabase\fR: Database configuration section\. This section header can be ignored and the options below may be specified as top level keys\.
|
\fBdatabase\fR: Database configuration section\. This section header can be ignored and the options below may be specified as top level keys\.
|
||||||
.IP "\[ci]" 4
|
.
|
||||||
|
.IP "\(bu" 4
|
||||||
\fBname\fR: Connector to use when connecting to the database\. This value must be \fBpsycopg2\fR\.
|
\fBname\fR: Connector to use when connecting to the database\. This value must be \fBpsycopg2\fR\.
|
||||||
.IP "\[ci]" 4
|
.
|
||||||
|
.IP "\(bu" 4
|
||||||
\fBargs\fR: DB API 2\.0 compatible arguments to send to the \fBpsycopg2\fR module\.
|
\fBargs\fR: DB API 2\.0 compatible arguments to send to the \fBpsycopg2\fR module\.
|
||||||
.IP "\[ci]" 4
|
.
|
||||||
|
.IP "\(bu" 4
|
||||||
\fBdbname\fR \- the database name
|
\fBdbname\fR \- the database name
|
||||||
.IP "\[ci]" 4
|
.
|
||||||
|
.IP "\(bu" 4
|
||||||
\fBuser\fR \- user name used to authenticate
|
\fBuser\fR \- user name used to authenticate
|
||||||
.IP "\[ci]" 4
|
.
|
||||||
|
.IP "\(bu" 4
|
||||||
\fBpassword\fR \- password used to authenticate
|
\fBpassword\fR \- password used to authenticate
|
||||||
.IP "\[ci]" 4
|
.
|
||||||
|
.IP "\(bu" 4
|
||||||
\fBhost\fR \- database host address (defaults to UNIX socket if not provided)
|
\fBhost\fR \- database host address (defaults to UNIX socket if not provided)
|
||||||
.IP "\[ci]" 4
|
.
|
||||||
|
.IP "\(bu" 4
|
||||||
\fBport\fR \- connection port number (defaults to 5432 if not provided)
|
\fBport\fR \- connection port number (defaults to 5432 if not provided)
|
||||||
|
.
|
||||||
.IP "" 0
|
.IP "" 0
|
||||||
|
|
||||||
.IP "\[ci]" 4
|
.
|
||||||
|
.IP "\(bu" 4
|
||||||
\fBsynchronous_commit\fR: Optional\. Default is True\. If the value is \fBFalse\fR, enable asynchronous commit and don\'t wait for the server to call fsync before ending the transaction\. See: https://www\.postgresql\.org/docs/current/static/wal\-async\-commit\.html
|
\fBsynchronous_commit\fR: Optional\. Default is True\. If the value is \fBFalse\fR, enable asynchronous commit and don\'t wait for the server to call fsync before ending the transaction\. See: https://www\.postgresql\.org/docs/current/static/wal\-async\-commit\.html
|
||||||
|
.
|
||||||
.IP "" 0
|
.IP "" 0
|
||||||
|
|
||||||
|
.
|
||||||
.IP "" 0
|
.IP "" 0
|
||||||
|
.
|
||||||
.P
|
.P
|
||||||
Following example illustrates the configuration file format\.
|
Following example illustrates the configuration file format\.
|
||||||
|
.
|
||||||
.IP "" 4
|
.IP "" 4
|
||||||
|
.
|
||||||
.nf
|
.nf
|
||||||
|
|
||||||
database:
|
database:
|
||||||
name: psycopg2
|
name: psycopg2
|
||||||
args:
|
args:
|
||||||
@@ -59,9 +86,13 @@ database:
|
|||||||
password: ORohmi9Eet=ohphi
|
password: ORohmi9Eet=ohphi
|
||||||
host: localhost
|
host: localhost
|
||||||
synchronous_commit: false
|
synchronous_commit: false
|
||||||
|
.
|
||||||
.fi
|
.fi
|
||||||
|
.
|
||||||
.IP "" 0
|
.IP "" 0
|
||||||
|
.
|
||||||
.SH "COPYRIGHT"
|
.SH "COPYRIGHT"
|
||||||
This man page was written by Sunil Mohan Adapa <\fI\%mailto:sunil@medhas\.org\fR> for Debian GNU/Linux distribution\.
|
This man page was written by Sunil Mohan Adapa <\fIsunil@medhas\.org\fR> for Debian GNU/Linux distribution\.
|
||||||
|
.
|
||||||
.SH "SEE ALSO"
|
.SH "SEE ALSO"
|
||||||
synctl(1), hash_password(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
synctl(1), hash_password(1), register_new_matrix_user(1)
|
||||||
|
|||||||
8
debian/synapse_port_db.ronn
vendored
8
debian/synapse_port_db.ronn
vendored
@@ -47,7 +47,7 @@ following options.
|
|||||||
* `args`:
|
* `args`:
|
||||||
DB API 2.0 compatible arguments to send to the `psycopg2` module.
|
DB API 2.0 compatible arguments to send to the `psycopg2` module.
|
||||||
|
|
||||||
* `dbname` - the database name
|
* `dbname` - the database name
|
||||||
|
|
||||||
* `user` - user name used to authenticate
|
* `user` - user name used to authenticate
|
||||||
|
|
||||||
@@ -58,7 +58,7 @@ following options.
|
|||||||
|
|
||||||
* `port` - connection port number (defaults to 5432 if not
|
* `port` - connection port number (defaults to 5432 if not
|
||||||
provided)
|
provided)
|
||||||
|
|
||||||
|
|
||||||
* `synchronous_commit`:
|
* `synchronous_commit`:
|
||||||
Optional. Default is True. If the value is `False`, enable
|
Optional. Default is True. If the value is `False`, enable
|
||||||
@@ -76,7 +76,7 @@ Following example illustrates the configuration file format.
|
|||||||
password: ORohmi9Eet=ohphi
|
password: ORohmi9Eet=ohphi
|
||||||
host: localhost
|
host: localhost
|
||||||
synchronous_commit: false
|
synchronous_commit: false
|
||||||
|
|
||||||
## COPYRIGHT
|
## COPYRIGHT
|
||||||
|
|
||||||
This man page was written by Sunil Mohan Adapa <<sunil@medhas.org>> for
|
This man page was written by Sunil Mohan Adapa <<sunil@medhas.org>> for
|
||||||
@@ -84,4 +84,4 @@ Debian GNU/Linux distribution.
|
|||||||
|
|
||||||
## SEE ALSO
|
## SEE ALSO
|
||||||
|
|
||||||
synctl(1), hash_password(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
synctl(1), hash_password(1), register_new_matrix_user(1)
|
||||||
|
|||||||
26
debian/synapse_review_recent_signups.1
vendored
26
debian/synapse_review_recent_signups.1
vendored
@@ -1,26 +0,0 @@
|
|||||||
.\" generated with Ronn-NG/v0.8.0
|
|
||||||
.\" http://github.com/apjanke/ronn-ng/tree/0.8.0
|
|
||||||
.TH "SYNAPSE_REVIEW_RECENT_SIGNUPS" "1" "July 2021" "" ""
|
|
||||||
.SH "NAME"
|
|
||||||
\fBsynapse_review_recent_signups\fR \- Print users that have recently registered on Synapse
|
|
||||||
.SH "SYNOPSIS"
|
|
||||||
\fBsynapse_review_recent_signups\fR \fB\-c\fR|\fB\-\-config\fR \fIfile\fR [\fB\-s\fR|\fB\-\-since\fR \fIperiod\fR] [\fB\-e\fR|\fB\-\-exclude\-emails\fR] [\fB\-u\fR|\fB\-\-only\-users\fR]
|
|
||||||
.SH "DESCRIPTION"
|
|
||||||
\fBsynapse_review_recent_signups\fR prints out recently registered users on a Synapse server, as well as some basic information about the user\.
|
|
||||||
.P
|
|
||||||
\fBsynapse_review_recent_signups\fR must be supplied with the config of the Synapse server, so that it can fetch the database config and connect to the database\.
|
|
||||||
.SH "OPTIONS"
|
|
||||||
.TP
|
|
||||||
\fB\-c\fR, \fB\-\-config\fR
|
|
||||||
The config file(s) used by the Synapse server\.
|
|
||||||
.TP
|
|
||||||
\fB\-s\fR, \fB\-\-since\fR
|
|
||||||
How far back to search for newly registered users\. Defaults to 7d, i\.e\. up to seven days in the past\. Valid units are \'s\', \'m\', \'h\', \'d\', \'w\', or \'y\'\.
|
|
||||||
.TP
|
|
||||||
\fB\-e\fR, \fB\-\-exclude\-emails\fR
|
|
||||||
Do not print out users that have validated emails associated with their account\.
|
|
||||||
.TP
|
|
||||||
\fB\-u\fR, \fB\-\-only\-users\fR
|
|
||||||
Only print out the user IDs of recently registered users, without any additional information
|
|
||||||
.SH "SEE ALSO"
|
|
||||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1), hash_password(1)
|
|
||||||
37
debian/synapse_review_recent_signups.ronn
vendored
37
debian/synapse_review_recent_signups.ronn
vendored
@@ -1,37 +0,0 @@
|
|||||||
synapse_review_recent_signups(1) -- Print users that have recently registered on Synapse
|
|
||||||
========================================================================================
|
|
||||||
|
|
||||||
## SYNOPSIS
|
|
||||||
|
|
||||||
`synapse_review_recent_signups` `-c`|`--config` <file> [`-s`|`--since` <period>] [`-e`|`--exclude-emails`] [`-u`|`--only-users`]
|
|
||||||
|
|
||||||
## DESCRIPTION
|
|
||||||
|
|
||||||
**synapse_review_recent_signups** prints out recently registered users on a
|
|
||||||
Synapse server, as well as some basic information about the user.
|
|
||||||
|
|
||||||
`synapse_review_recent_signups` must be supplied with the config of the Synapse
|
|
||||||
server, so that it can fetch the database config and connect to the database.
|
|
||||||
|
|
||||||
|
|
||||||
## OPTIONS
|
|
||||||
|
|
||||||
* `-c`, `--config`:
|
|
||||||
The config file(s) used by the Synapse server.
|
|
||||||
|
|
||||||
* `-s`, `--since`:
|
|
||||||
How far back to search for newly registered users. Defaults to 7d, i.e. up
|
|
||||||
to seven days in the past. Valid units are 's', 'm', 'h', 'd', 'w', or 'y'.
|
|
||||||
|
|
||||||
* `-e`, `--exclude-emails`:
|
|
||||||
Do not print out users that have validated emails associated with their
|
|
||||||
account.
|
|
||||||
|
|
||||||
* `-u`, `--only-users`:
|
|
||||||
Only print out the user IDs of recently registered users, without any
|
|
||||||
additional information
|
|
||||||
|
|
||||||
|
|
||||||
## SEE ALSO
|
|
||||||
|
|
||||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1), hash_password(1)
|
|
||||||
44
debian/synctl.1
vendored
44
debian/synctl.1
vendored
@@ -1,41 +1,63 @@
|
|||||||
.\" generated with Ronn-NG/v0.8.0
|
.\" generated with Ronn/v0.7.3
|
||||||
.\" http://github.com/apjanke/ronn-ng/tree/0.8.0
|
.\" http://github.com/rtomayko/ronn/tree/0.7.3
|
||||||
.TH "SYNCTL" "1" "July 2021" "" ""
|
.
|
||||||
|
.TH "SYNCTL" "1" "February 2017" "" ""
|
||||||
|
.
|
||||||
.SH "NAME"
|
.SH "NAME"
|
||||||
\fBsynctl\fR \- Synapse server control interface
|
\fBsynctl\fR \- Synapse server control interface
|
||||||
|
.
|
||||||
.SH "SYNOPSIS"
|
.SH "SYNOPSIS"
|
||||||
Start, stop or restart synapse server\.
|
Start, stop or restart synapse server\.
|
||||||
|
.
|
||||||
.P
|
.P
|
||||||
\fBsynctl\fR {start|stop|restart} [configfile] [\-w|\-\-worker=\fIWORKERCONFIG\fR] [\-a|\-\-all\-processes=\fIWORKERCONFIGDIR\fR]
|
\fBsynctl\fR {start|stop|restart} [configfile] [\-w|\-\-worker=\fIWORKERCONFIG\fR] [\-a|\-\-all\-processes=\fIWORKERCONFIGDIR\fR]
|
||||||
|
.
|
||||||
.SH "DESCRIPTION"
|
.SH "DESCRIPTION"
|
||||||
\fBsynctl\fR can be used to start, stop or restart Synapse server\. The control operation can be done on all processes or a single worker process\.
|
\fBsynctl\fR can be used to start, stop or restart Synapse server\. The control operation can be done on all processes or a single worker process\.
|
||||||
|
.
|
||||||
.SH "OPTIONS"
|
.SH "OPTIONS"
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fBaction\fR
|
\fBaction\fR
|
||||||
The value of action should be one of \fBstart\fR, \fBstop\fR or \fBrestart\fR\.
|
The value of action should be one of \fBstart\fR, \fBstop\fR or \fBrestart\fR\.
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fBconfigfile\fR
|
\fBconfigfile\fR
|
||||||
Optional path of the configuration file to use\. Default value is \fBhomeserver\.yaml\fR\. The configuration file must exist for the operation to succeed\.
|
Optional path of the configuration file to use\. Default value is \fBhomeserver\.yaml\fR\. The configuration file must exist for the operation to succeed\.
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fB\-w\fR, \fB\-\-worker\fR:
|
\fB\-w\fR, \fB\-\-worker\fR:
|
||||||
|
.
|
||||||
|
.IP
|
||||||
|
Perform start, stop or restart operations on a single worker\. Incompatible with \fB\-a\fR|\fB\-\-all\-processes\fR\. Value passed must be a valid worker\'s configuration file\.
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fB\-a\fR, \fB\-\-all\-processes\fR:
|
\fB\-a\fR, \fB\-\-all\-processes\fR:
|
||||||
|
.
|
||||||
|
.IP
|
||||||
|
Perform start, stop or restart operations on all the workers in the given directory and the main synapse process\. Incompatible with \fB\-w\fR|\fB\-\-worker\fR\. Value passed must be a directory containing valid work configuration files\. All files ending with \fB\.yaml\fR extension shall be considered as configuration files and all other files in the directory are ignored\.
|
||||||
|
.
|
||||||
.SH "CONFIGURATION FILE"
|
.SH "CONFIGURATION FILE"
|
||||||
Configuration file may be generated as follows:
|
Configuration file may be generated as follows:
|
||||||
|
.
|
||||||
.IP "" 4
|
.IP "" 4
|
||||||
|
.
|
||||||
.nf
|
.nf
|
||||||
$ python \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
|
|
||||||
|
$ python \-B \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
|
||||||
|
.
|
||||||
.fi
|
.fi
|
||||||
|
.
|
||||||
.IP "" 0
|
.IP "" 0
|
||||||
|
.
|
||||||
.SH "ENVIRONMENT"
|
.SH "ENVIRONMENT"
|
||||||
|
.
|
||||||
.TP
|
.TP
|
||||||
\fBSYNAPSE_CACHE_FACTOR\fR
|
\fBSYNAPSE_CACHE_FACTOR\fR
|
||||||
Synapse\'s architecture is quite RAM hungry currently \- we deliberately cache a lot of recent room data and metadata in RAM in order to speed up common requests\. We\'ll improve this in the future, but for now the easiest way to either reduce the RAM usage (at the risk of slowing things down) is to set the almost\-undocumented \fBSYNAPSE_CACHE_FACTOR\fR environment variable\. The default is 0\.5, which can be decreased to reduce RAM usage in memory constrained enviroments, or increased if performance starts to degrade\.
|
Synapse\'s architecture is quite RAM hungry currently \- a lot of recent room data and metadata is deliberately cached in RAM in order to speed up common requests\. This will be improved in future, but for now the easiest way to either reduce the RAM usage (at the risk of slowing things down) is to set the SYNAPSE_CACHE_FACTOR environment variable\. Roughly speaking, a SYNAPSE_CACHE_FACTOR of 1\.0 will max out at around 3\-4GB of resident memory \- this is what we currently run the matrix\.org on\. The default setting is currently 0\.1, which is probably around a ~700MB footprint\. You can dial it down further to 0\.02 if desired, which targets roughly ~512MB\. Conversely you can dial it up if you need performance for lots of users and have a box with a lot of RAM\.
|
||||||
.IP
|
.
|
||||||
However, degraded performance due to a low cache factor, common on machines with slow disks, often leads to explosions in memory use due backlogged requests\. In this case, reducing the cache factor will make things worse\. Instead, try increasing it drastically\. 2\.0 is a good starting value\.
|
|
||||||
.SH "COPYRIGHT"
|
.SH "COPYRIGHT"
|
||||||
This man page was written by Sunil Mohan Adapa <\fI\%mailto:sunil@medhas\.org\fR> for Debian GNU/Linux distribution\.
|
This man page was written by Sunil Mohan Adapa <\fIsunil@medhas\.org\fR> for Debian GNU/Linux distribution\.
|
||||||
|
.
|
||||||
.SH "SEE ALSO"
|
.SH "SEE ALSO"
|
||||||
synapse_port_db(1), hash_password(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
synapse_port_db(1), hash_password(1), register_new_matrix_user(1)
|
||||||
|
|||||||
4
debian/synctl.ronn
vendored
4
debian/synctl.ronn
vendored
@@ -41,7 +41,7 @@ process.
|
|||||||
|
|
||||||
Configuration file may be generated as follows:
|
Configuration file may be generated as follows:
|
||||||
|
|
||||||
$ python -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
|
$ python -B -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
|
||||||
|
|
||||||
## ENVIRONMENT
|
## ENVIRONMENT
|
||||||
|
|
||||||
@@ -68,4 +68,4 @@ Debian GNU/Linux distribution.
|
|||||||
|
|
||||||
## SEE ALSO
|
## SEE ALSO
|
||||||
|
|
||||||
synapse_port_db(1), hash_password(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
synapse_port_db(1), hash_password(1), register_new_matrix_user(1)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
|
||||||
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||||
|
|
||||||
@@ -96,48 +96,18 @@ for port in 8080 8081 8082; do
|
|||||||
# Check script parameters
|
# Check script parameters
|
||||||
if [ $# -eq 1 ]; then
|
if [ $# -eq 1 ]; then
|
||||||
if [ $1 = "--no-rate-limit" ]; then
|
if [ $1 = "--no-rate-limit" ]; then
|
||||||
|
# messages rate limit
|
||||||
|
echo 'rc_messages_per_second: 1000' >> $DIR/etc/$port.config
|
||||||
|
echo 'rc_message_burst_count: 1000' >> $DIR/etc/$port.config
|
||||||
|
|
||||||
# Disable any rate limiting
|
# registration rate limit
|
||||||
ratelimiting=$(cat <<-RC
|
printf 'rc_registration:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
|
||||||
rc_message:
|
|
||||||
per_second: 1000
|
# login rate limit
|
||||||
burst_count: 1000
|
echo 'rc_login:' >> $DIR/etc/$port.config
|
||||||
rc_registration:
|
printf ' address:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
|
||||||
per_second: 1000
|
printf ' account:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
|
||||||
burst_count: 1000
|
printf ' failed_attempts:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
|
||||||
rc_login:
|
|
||||||
address:
|
|
||||||
per_second: 1000
|
|
||||||
burst_count: 1000
|
|
||||||
account:
|
|
||||||
per_second: 1000
|
|
||||||
burst_count: 1000
|
|
||||||
failed_attempts:
|
|
||||||
per_second: 1000
|
|
||||||
burst_count: 1000
|
|
||||||
rc_admin_redaction:
|
|
||||||
per_second: 1000
|
|
||||||
burst_count: 1000
|
|
||||||
rc_joins:
|
|
||||||
local:
|
|
||||||
per_second: 1000
|
|
||||||
burst_count: 1000
|
|
||||||
remote:
|
|
||||||
per_second: 1000
|
|
||||||
burst_count: 1000
|
|
||||||
rc_3pid_validation:
|
|
||||||
per_second: 1000
|
|
||||||
burst_count: 1000
|
|
||||||
rc_invites:
|
|
||||||
per_room:
|
|
||||||
per_second: 1000
|
|
||||||
burst_count: 1000
|
|
||||||
per_user:
|
|
||||||
per_second: 1000
|
|
||||||
burst_count: 1000
|
|
||||||
RC
|
|
||||||
)
|
|
||||||
echo "${ratelimiting}" >> $DIR/etc/$port.config
|
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
|
||||||
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||||
|
|
||||||
|
|||||||
@@ -28,32 +28,31 @@ RUN apt-get update && apt-get install -y \
|
|||||||
libwebp-dev \
|
libwebp-dev \
|
||||||
libxml++2.6-dev \
|
libxml++2.6-dev \
|
||||||
libxslt1-dev \
|
libxslt1-dev \
|
||||||
openssl \
|
|
||||||
rustc \
|
|
||||||
zlib1g-dev \
|
zlib1g-dev \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Copy just what we need to pip install
|
# Build dependencies that are not available as wheels, to speed up rebuilds
|
||||||
|
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||||
|
frozendict \
|
||||||
|
jaeger-client \
|
||||||
|
opentracing \
|
||||||
|
# Match the version constraints of Synapse
|
||||||
|
"prometheus_client>=0.4.0" \
|
||||||
|
psycopg2 \
|
||||||
|
pycparser \
|
||||||
|
pyrsistent \
|
||||||
|
pyyaml \
|
||||||
|
simplejson \
|
||||||
|
threadloop \
|
||||||
|
thrift
|
||||||
|
|
||||||
|
# now install synapse and all of the python deps to /install.
|
||||||
|
COPY synapse /synapse/synapse/
|
||||||
COPY scripts /synapse/scripts/
|
COPY scripts /synapse/scripts/
|
||||||
COPY MANIFEST.in README.rst setup.py synctl /synapse/
|
COPY MANIFEST.in README.rst setup.py synctl /synapse/
|
||||||
COPY synapse/__init__.py /synapse/synapse/__init__.py
|
|
||||||
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
|
|
||||||
|
|
||||||
# To speed up rebuilds, install all of the dependencies before we copy over
|
|
||||||
# the whole synapse project so that we this layer in the Docker cache can be
|
|
||||||
# used while you develop on the source
|
|
||||||
#
|
|
||||||
# This is aiming at installing the `install_requires` and `extras_require` from `setup.py`
|
|
||||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||||
/synapse[all]
|
/synapse[all]
|
||||||
|
|
||||||
# Copy over the rest of the project
|
|
||||||
COPY synapse /synapse/synapse/
|
|
||||||
|
|
||||||
# Install the synapse package itself and all of its children packages.
|
|
||||||
#
|
|
||||||
# This is aiming at installing only the `packages=find_packages(...)` from `setup.py
|
|
||||||
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
|
|
||||||
|
|
||||||
###
|
###
|
||||||
### Stage 1: runtime
|
### Stage 1: runtime
|
||||||
@@ -61,11 +60,6 @@ RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
|
|||||||
|
|
||||||
FROM docker.io/python:${PYTHON_VERSION}-slim
|
FROM docker.io/python:${PYTHON_VERSION}-slim
|
||||||
|
|
||||||
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
|
|
||||||
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
|
|
||||||
LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git'
|
|
||||||
LABEL org.opencontainers.image.licenses='Apache-2.0'
|
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
curl \
|
curl \
|
||||||
gosu \
|
gosu \
|
||||||
@@ -73,10 +67,7 @@ RUN apt-get update && apt-get install -y \
|
|||||||
libpq5 \
|
libpq5 \
|
||||||
libwebp6 \
|
libwebp6 \
|
||||||
xmlsec1 \
|
xmlsec1 \
|
||||||
libjemalloc2 \
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
libssl-dev \
|
|
||||||
openssl \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
COPY --from=builder /install /usr/local
|
COPY --from=builder /install /usr/local
|
||||||
COPY ./docker/start.py /start.py
|
COPY ./docker/start.py /start.py
|
||||||
@@ -88,5 +79,5 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
|||||||
|
|
||||||
ENTRYPOINT ["/start.py"]
|
ENTRYPOINT ["/start.py"]
|
||||||
|
|
||||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
HEALTHCHECK --interval=1m --timeout=5s \
|
||||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||||
|
|||||||
@@ -27,7 +27,6 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
|||||||
wget
|
wget
|
||||||
|
|
||||||
# fetch and unpack the package
|
# fetch and unpack the package
|
||||||
# TODO: Upgrade to 1.2.2 once xenial is dropped
|
|
||||||
RUN mkdir /dh-virtualenv
|
RUN mkdir /dh-virtualenv
|
||||||
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz
|
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz
|
||||||
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
|
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
|
||||||
|
|||||||
@@ -1,23 +0,0 @@
|
|||||||
# Inherit from the official Synapse docker image
|
|
||||||
FROM matrixdotorg/synapse
|
|
||||||
|
|
||||||
# Install deps
|
|
||||||
RUN apt-get update
|
|
||||||
RUN apt-get install -y supervisor redis nginx
|
|
||||||
|
|
||||||
# Remove the default nginx sites
|
|
||||||
RUN rm /etc/nginx/sites-enabled/default
|
|
||||||
|
|
||||||
# Copy Synapse worker, nginx and supervisord configuration template files
|
|
||||||
COPY ./docker/conf-workers/* /conf/
|
|
||||||
|
|
||||||
# Expose nginx listener port
|
|
||||||
EXPOSE 8080/tcp
|
|
||||||
|
|
||||||
# Volume for user-editable config files, logs etc.
|
|
||||||
VOLUME ["/data"]
|
|
||||||
|
|
||||||
# A script to read environment variables and create the necessary
|
|
||||||
# files to run the desired worker configuration. Will start supervisord.
|
|
||||||
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
|
|
||||||
ENTRYPOINT ["/configure_workers_and_start.py"]
|
|
||||||
@@ -1,140 +0,0 @@
|
|||||||
# Running tests against a dockerised Synapse
|
|
||||||
|
|
||||||
It's possible to run integration tests against Synapse
|
|
||||||
using [Complement](https://github.com/matrix-org/complement). Complement is a Matrix Spec
|
|
||||||
compliance test suite for homeservers, and supports any homeserver docker image configured
|
|
||||||
to listen on ports 8008/8448. This document contains instructions for building Synapse
|
|
||||||
docker images that can be run inside Complement for testing purposes.
|
|
||||||
|
|
||||||
Note that running Synapse's unit tests from within the docker image is not supported.
|
|
||||||
|
|
||||||
## Testing with SQLite and single-process Synapse
|
|
||||||
|
|
||||||
> Note that `scripts-dev/complement.sh` is a script that will automatically build
|
|
||||||
> and run an SQLite-based, single-process of Synapse against Complement.
|
|
||||||
|
|
||||||
The instructions below will set up Complement testing for a single-process,
|
|
||||||
SQLite-based Synapse deployment.
|
|
||||||
|
|
||||||
Start by building the base Synapse docker image. If you wish to run tests with the latest
|
|
||||||
release of Synapse, instead of your current checkout, you can skip this step. From the
|
|
||||||
root of the repository:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
|
||||||
```
|
|
||||||
|
|
||||||
This will build an image with the tag `matrixdotorg/synapse`.
|
|
||||||
|
|
||||||
Next, build the Synapse image for Complement. You will need a local checkout
|
|
||||||
of Complement. Change to the root of your Complement checkout and run:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker build -t complement-synapse -f "dockerfiles/Synapse.Dockerfile" dockerfiles
|
|
||||||
```
|
|
||||||
|
|
||||||
This will build an image with the tag `complement-synapse`, which can be handed to
|
|
||||||
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
|
|
||||||
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
|
|
||||||
how to run the tests, as well as the various available command line flags.
|
|
||||||
|
|
||||||
## Testing with PostgreSQL and single or multi-process Synapse
|
|
||||||
|
|
||||||
The above docker image only supports running Synapse with SQLite and in a
|
|
||||||
single-process topology. The following instructions are used to build a Synapse image for
|
|
||||||
Complement that supports either single or multi-process topology with a PostgreSQL
|
|
||||||
database backend.
|
|
||||||
|
|
||||||
As with the single-process image, build the base Synapse docker image. If you wish to run
|
|
||||||
tests with the latest release of Synapse, instead of your current checkout, you can skip
|
|
||||||
this step. From the root of the repository:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
|
||||||
```
|
|
||||||
|
|
||||||
This will build an image with the tag `matrixdotorg/synapse`.
|
|
||||||
|
|
||||||
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
|
|
||||||
Again, from the root of the repository:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
|
|
||||||
```
|
|
||||||
|
|
||||||
This will build an image with the tag` matrixdotorg/synapse-workers`.
|
|
||||||
|
|
||||||
It's worth noting at this point that this image is fully functional, and
|
|
||||||
can be used for testing against locally. See instructions for using the container
|
|
||||||
under
|
|
||||||
[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone)
|
|
||||||
below.
|
|
||||||
|
|
||||||
Finally, build the Synapse image for Complement, which is based on
|
|
||||||
`matrixdotorg/synapse-workers`. You will need a local checkout of Complement. Change to
|
|
||||||
the root of your Complement checkout and run:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker build -t matrixdotorg/complement-synapse-workers -f dockerfiles/SynapseWorkers.Dockerfile dockerfiles
|
|
||||||
```
|
|
||||||
|
|
||||||
This will build an image with the tag `complement-synapse`, which can be handed to
|
|
||||||
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
|
|
||||||
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
|
|
||||||
how to run the tests, as well as the various available command line flags.
|
|
||||||
|
|
||||||
## Running the Dockerfile-worker image standalone
|
|
||||||
|
|
||||||
For manual testing of a multi-process Synapse instance in Docker,
|
|
||||||
[Dockerfile-workers](Dockerfile-workers) is a Dockerfile that will produce an image
|
|
||||||
bundling all necessary components together for a workerised homeserver instance.
|
|
||||||
|
|
||||||
This includes any desired Synapse worker processes, a nginx to route traffic accordingly,
|
|
||||||
a redis for worker communication and a supervisord instance to start up and monitor all
|
|
||||||
processes. You will need to provide your own postgres container to connect to, and TLS
|
|
||||||
is not handled by the container.
|
|
||||||
|
|
||||||
Once you've built the image using the above instructions, you can run it. Be sure
|
|
||||||
you've set up a volume according to the [usual Synapse docker instructions](README.md).
|
|
||||||
Then run something along the lines of:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker run -d --name synapse \
|
|
||||||
--mount type=volume,src=synapse-data,dst=/data \
|
|
||||||
-p 8008:8008 \
|
|
||||||
-e SYNAPSE_SERVER_NAME=my.matrix.host \
|
|
||||||
-e SYNAPSE_REPORT_STATS=no \
|
|
||||||
-e POSTGRES_HOST=postgres \
|
|
||||||
-e POSTGRES_USER=postgres \
|
|
||||||
-e POSTGRES_PASSWORD=somesecret \
|
|
||||||
-e SYNAPSE_WORKER_TYPES=synchrotron,media_repository,user_dir \
|
|
||||||
-e SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1 \
|
|
||||||
matrixdotorg/synapse-workers
|
|
||||||
```
|
|
||||||
|
|
||||||
...substituting `POSTGRES*` variables for those that match a postgres host you have
|
|
||||||
available (usually a running postgres docker container).
|
|
||||||
|
|
||||||
The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to
|
|
||||||
use when running the container. All possible worker names are defined by the keys of the
|
|
||||||
`WORKERS_CONFIG` variable in [this script](configure_workers_and_start.py), which the
|
|
||||||
Dockerfile makes use of to generate appropriate worker, nginx and supervisord config
|
|
||||||
files.
|
|
||||||
|
|
||||||
Sharding is supported for a subset of workers, in line with the
|
|
||||||
[worker documentation](../docs/workers.md). To run multiple instances of a given worker
|
|
||||||
type, simply specify the type multiple times in `SYNAPSE_WORKER_TYPES`
|
|
||||||
(e.g `SYNAPSE_WORKER_TYPES=event_creator,event_creator...`).
|
|
||||||
|
|
||||||
Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no workers
|
|
||||||
(leaving only the main process). The container is configured to use redis-based worker
|
|
||||||
mode.
|
|
||||||
|
|
||||||
Logs for workers and the main process are logged to stdout and can be viewed with
|
|
||||||
standard `docker logs` tooling. Worker logs contain their worker name
|
|
||||||
after the timestamp.
|
|
||||||
|
|
||||||
Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be written to
|
|
||||||
`<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00:
|
|
||||||
00, according to the container's clock. Logging for the main process must still be
|
|
||||||
configured by modifying the homeserver's log config in your Synapse data volume.
|
|
||||||
@@ -2,28 +2,26 @@
|
|||||||
|
|
||||||
This Docker image will run Synapse as a single process. By default it uses a
|
This Docker image will run Synapse as a single process. By default it uses a
|
||||||
sqlite database; for production use you should connect it to a separate
|
sqlite database; for production use you should connect it to a separate
|
||||||
postgres database. The image also does *not* provide a TURN server.
|
postgres database.
|
||||||
|
|
||||||
This image should work on all platforms that are supported by Docker upstream.
|
The image also does *not* provide a TURN server.
|
||||||
Note that Docker's WS1-backend Linux Containers on Windows
|
|
||||||
platform is [experimental](https://github.com/docker/for-win/issues/6470) and
|
|
||||||
is not supported by this image.
|
|
||||||
|
|
||||||
## Volumes
|
## Volumes
|
||||||
|
|
||||||
By default, the image expects a single volume, located at `/data`, that will hold:
|
By default, the image expects a single volume, located at ``/data``, that will hold:
|
||||||
|
|
||||||
* configuration files;
|
* configuration files;
|
||||||
|
* temporary files during uploads;
|
||||||
* uploaded media and thumbnails;
|
* uploaded media and thumbnails;
|
||||||
* the SQLite database if you do not configure postgres;
|
* the SQLite database if you do not configure postgres;
|
||||||
* the appservices configuration.
|
* the appservices configuration.
|
||||||
|
|
||||||
You are free to use separate volumes depending on storage endpoints at your
|
You are free to use separate volumes depending on storage endpoints at your
|
||||||
disposal. For instance, `/data/media` could be stored on a large but low
|
disposal. For instance, ``/data/media`` could be stored on a large but low
|
||||||
performance hdd storage while other files could be stored on high performance
|
performance hdd storage while other files could be stored on high performance
|
||||||
endpoints.
|
endpoints.
|
||||||
|
|
||||||
In order to setup an application service, simply create an `appservices`
|
In order to setup an application service, simply create an ``appservices``
|
||||||
directory in the data volume and write the application service Yaml
|
directory in the data volume and write the application service Yaml
|
||||||
configuration file there. Multiple application services are supported.
|
configuration file there. Multiple application services are supported.
|
||||||
|
|
||||||
@@ -45,7 +43,7 @@ docker run -it --rm \
|
|||||||
```
|
```
|
||||||
|
|
||||||
For information on picking a suitable server name, see
|
For information on picking a suitable server name, see
|
||||||
https://matrix-org.github.io/synapse/latest/setup/installation.html.
|
https://github.com/matrix-org/synapse/blob/master/INSTALL.md.
|
||||||
|
|
||||||
The above command will generate a `homeserver.yaml` in (typically)
|
The above command will generate a `homeserver.yaml` in (typically)
|
||||||
`/var/lib/docker/volumes/synapse-data/_data`. You should check this file, and
|
`/var/lib/docker/volumes/synapse-data/_data`. You should check this file, and
|
||||||
@@ -56,8 +54,6 @@ The following environment variables are supported in `generate` mode:
|
|||||||
* `SYNAPSE_SERVER_NAME` (mandatory): the server public hostname.
|
* `SYNAPSE_SERVER_NAME` (mandatory): the server public hostname.
|
||||||
* `SYNAPSE_REPORT_STATS` (mandatory, `yes` or `no`): whether to enable
|
* `SYNAPSE_REPORT_STATS` (mandatory, `yes` or `no`): whether to enable
|
||||||
anonymous statistics reporting.
|
anonymous statistics reporting.
|
||||||
* `SYNAPSE_HTTP_PORT`: the port Synapse should listen on for http traffic.
|
|
||||||
Defaults to `8008`.
|
|
||||||
* `SYNAPSE_CONFIG_DIR`: where additional config files (such as the log config
|
* `SYNAPSE_CONFIG_DIR`: where additional config files (such as the log config
|
||||||
and event signing key) will be stored. Defaults to `/data`.
|
and event signing key) will be stored. Defaults to `/data`.
|
||||||
* `SYNAPSE_CONFIG_PATH`: path to the file to be generated. Defaults to
|
* `SYNAPSE_CONFIG_PATH`: path to the file to be generated. Defaults to
|
||||||
@@ -78,8 +74,6 @@ docker run -d --name synapse \
|
|||||||
matrixdotorg/synapse:latest
|
matrixdotorg/synapse:latest
|
||||||
```
|
```
|
||||||
|
|
||||||
(assuming 8008 is the port Synapse is configured to listen on for http traffic.)
|
|
||||||
|
|
||||||
You can then check that it has started correctly with:
|
You can then check that it has started correctly with:
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -139,7 +133,7 @@ For documentation on using a reverse proxy, see
|
|||||||
https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
|
https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
|
||||||
|
|
||||||
For more information on enabling TLS support in synapse itself, see
|
For more information on enabling TLS support in synapse itself, see
|
||||||
https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates. Of
|
https://github.com/matrix-org/synapse/blob/master/INSTALL.md#tls-certificates. Of
|
||||||
course, you will need to expose the TLS port from the container with a `-p`
|
course, you will need to expose the TLS port from the container with a `-p`
|
||||||
argument to `docker run`.
|
argument to `docker run`.
|
||||||
|
|
||||||
@@ -191,16 +185,6 @@ whilst running the above `docker run` commands.
|
|||||||
```
|
```
|
||||||
--no-healthcheck
|
--no-healthcheck
|
||||||
```
|
```
|
||||||
|
|
||||||
## Disabling the healthcheck in docker-compose file
|
|
||||||
|
|
||||||
If you wish to disable the healthcheck via docker-compose, append the following to your service configuration.
|
|
||||||
|
|
||||||
```
|
|
||||||
healthcheck:
|
|
||||||
disable: true
|
|
||||||
```
|
|
||||||
|
|
||||||
## Setting custom healthcheck on docker run
|
## Setting custom healthcheck on docker run
|
||||||
|
|
||||||
If you wish to point the healthcheck at a different port with docker command, add the following
|
If you wish to point the healthcheck at a different port with docker command, add the following
|
||||||
@@ -212,18 +196,12 @@ If you wish to point the healthcheck at a different port with docker command, ad
|
|||||||
## Setting the healthcheck in docker-compose file
|
## Setting the healthcheck in docker-compose file
|
||||||
|
|
||||||
You can add the following to set a custom healthcheck in a docker compose file.
|
You can add the following to set a custom healthcheck in a docker compose file.
|
||||||
You will need docker-compose version >2.1 for this to work.
|
You will need version >2.1 for this to work.
|
||||||
|
|
||||||
```
|
```
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "curl", "-fSs", "http://localhost:8008/health"]
|
test: ["CMD", "curl", "-fSs", "http://localhost:8008/health"]
|
||||||
interval: 15s
|
interval: 1m
|
||||||
timeout: 5s
|
timeout: 10s
|
||||||
retries: 3
|
retries: 3
|
||||||
start_period: 5s
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Using jemalloc
|
|
||||||
|
|
||||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
|
||||||
You can read about jemalloc by reading the Synapse [README](../README.rst).
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
|
||||||
# The script to build the Debian package, as ran inside the Docker image.
|
# The script to build the Debian package, as ran inside the Docker image.
|
||||||
|
|
||||||
@@ -15,20 +15,6 @@ cd /synapse/build
|
|||||||
dch -M -l "+$DIST" "build for $DIST"
|
dch -M -l "+$DIST" "build for $DIST"
|
||||||
dch -M -r "" --force-distribution --distribution "$DIST"
|
dch -M -r "" --force-distribution --distribution "$DIST"
|
||||||
|
|
||||||
# if this is a prerelease, set the Section accordingly.
|
|
||||||
#
|
|
||||||
# When the package is later added to the package repo, reprepro will use the
|
|
||||||
# Section to determine which "component" it should go into (see
|
|
||||||
# https://manpages.debian.org/stretch/reprepro/reprepro.1.en.html#GUESSING)
|
|
||||||
|
|
||||||
DEB_VERSION=`dpkg-parsechangelog -SVersion`
|
|
||||||
case $DEB_VERSION in
|
|
||||||
*rc*|*a*|*b*|*c*)
|
|
||||||
sed -ie '/^Section:/c\Section: prerelease' debian/control
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
|
|
||||||
dpkg-buildpackage -us -uc
|
dpkg-buildpackage -us -uc
|
||||||
|
|
||||||
ls -l ..
|
ls -l ..
|
||||||
|
|||||||
@@ -1,27 +0,0 @@
|
|||||||
# This file contains the base config for the reverse proxy, as part of ../Dockerfile-workers.
|
|
||||||
# configure_workers_and_start.py uses and amends to this file depending on the workers
|
|
||||||
# that have been selected.
|
|
||||||
|
|
||||||
{{ upstream_directives }}
|
|
||||||
|
|
||||||
server {
|
|
||||||
# Listen on an unoccupied port number
|
|
||||||
listen 8008;
|
|
||||||
listen [::]:8008;
|
|
||||||
|
|
||||||
server_name localhost;
|
|
||||||
|
|
||||||
# Nginx by default only allows file uploads up to 1M in size
|
|
||||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
|
||||||
client_max_body_size 100M;
|
|
||||||
|
|
||||||
{{ worker_locations }}
|
|
||||||
|
|
||||||
# Send all other traffic to the main process
|
|
||||||
location ~* ^(\\/_matrix|\\/_synapse) {
|
|
||||||
proxy_pass http://localhost:8080;
|
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
# This file contains the base for the shared homeserver config file between Synapse workers,
|
|
||||||
# as part of ./Dockerfile-workers.
|
|
||||||
# configure_workers_and_start.py uses and amends to this file depending on the workers
|
|
||||||
# that have been selected.
|
|
||||||
|
|
||||||
redis:
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
{{ shared_worker_config }}
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
# This file contains the base config for supervisord, as part of ../Dockerfile-workers.
|
|
||||||
# configure_workers_and_start.py uses and amends to this file depending on the workers
|
|
||||||
# that have been selected.
|
|
||||||
[supervisord]
|
|
||||||
nodaemon=true
|
|
||||||
user=root
|
|
||||||
|
|
||||||
[program:nginx]
|
|
||||||
command=/usr/sbin/nginx -g "daemon off;"
|
|
||||||
priority=500
|
|
||||||
stdout_logfile=/dev/stdout
|
|
||||||
stdout_logfile_maxbytes=0
|
|
||||||
stderr_logfile=/dev/stderr
|
|
||||||
stderr_logfile_maxbytes=0
|
|
||||||
username=www-data
|
|
||||||
autorestart=true
|
|
||||||
|
|
||||||
[program:redis]
|
|
||||||
command=/usr/bin/redis-server /etc/redis/redis.conf --daemonize no
|
|
||||||
priority=1
|
|
||||||
stdout_logfile=/dev/stdout
|
|
||||||
stdout_logfile_maxbytes=0
|
|
||||||
stderr_logfile=/dev/stderr
|
|
||||||
stderr_logfile_maxbytes=0
|
|
||||||
username=redis
|
|
||||||
autorestart=true
|
|
||||||
|
|
||||||
[program:synapse_main]
|
|
||||||
command=/usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
|
|
||||||
priority=10
|
|
||||||
# Log startup failures to supervisord's stdout/err
|
|
||||||
# Regular synapse logs will still go in the configured data directory
|
|
||||||
stdout_logfile=/dev/stdout
|
|
||||||
stdout_logfile_maxbytes=0
|
|
||||||
stderr_logfile=/dev/stderr
|
|
||||||
stderr_logfile_maxbytes=0
|
|
||||||
autorestart=unexpected
|
|
||||||
exitcodes=0
|
|
||||||
|
|
||||||
# Additional process blocks
|
|
||||||
{{ worker_config }}
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
# This is a configuration template for a single worker instance, and is
|
|
||||||
# used by Dockerfile-workers.
|
|
||||||
# Values will be change depending on whichever workers are selected when
|
|
||||||
# running that image.
|
|
||||||
|
|
||||||
worker_app: "{{ app }}"
|
|
||||||
worker_name: "{{ name }}"
|
|
||||||
|
|
||||||
# The replication listener on the main synapse process.
|
|
||||||
worker_replication_host: 127.0.0.1
|
|
||||||
worker_replication_http_port: 9093
|
|
||||||
|
|
||||||
worker_listeners:
|
|
||||||
- type: http
|
|
||||||
port: {{ port }}
|
|
||||||
{% if listener_resources %}
|
|
||||||
resources:
|
|
||||||
- names:
|
|
||||||
{%- for resource in listener_resources %}
|
|
||||||
- {{ resource }}
|
|
||||||
{%- endfor %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
worker_log_config: {{ worker_log_config_filepath }}
|
|
||||||
|
|
||||||
{{ worker_extra_conf }}
|
|
||||||
@@ -7,6 +7,12 @@
|
|||||||
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
|
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
|
||||||
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
|
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
|
||||||
|
|
||||||
|
{% if SYNAPSE_ACME %}
|
||||||
|
acme:
|
||||||
|
enabled: true
|
||||||
|
port: 8009
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
## Server ##
|
## Server ##
|
||||||
@@ -34,9 +40,7 @@ listeners:
|
|||||||
compress: false
|
compress: false
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Allow configuring in case we want to reverse proxy 8008
|
- port: 8008
|
||||||
# using another process in the same container
|
|
||||||
- port: {{ SYNAPSE_HTTP_PORT or 8008 }}
|
|
||||||
tls: false
|
tls: false
|
||||||
bind_addresses: ['::']
|
bind_addresses: ['::']
|
||||||
type: http
|
type: http
|
||||||
@@ -85,6 +89,7 @@ federation_rc_concurrent: 3
|
|||||||
## Files ##
|
## Files ##
|
||||||
|
|
||||||
media_store_path: "/data/media"
|
media_store_path: "/data/media"
|
||||||
|
uploads_path: "/data/uploads"
|
||||||
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "50M" }}"
|
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "50M" }}"
|
||||||
max_image_pixels: "32M"
|
max_image_pixels: "32M"
|
||||||
dynamic_thumbnails: false
|
dynamic_thumbnails: false
|
||||||
@@ -169,10 +174,18 @@ report_stats: False
|
|||||||
|
|
||||||
## API Configuration ##
|
## API Configuration ##
|
||||||
|
|
||||||
|
room_invite_state_types:
|
||||||
|
- "m.room.join_rules"
|
||||||
|
- "m.room.canonical_alias"
|
||||||
|
- "m.room.avatar"
|
||||||
|
- "m.room.name"
|
||||||
|
|
||||||
{% if SYNAPSE_APPSERVICES %}
|
{% if SYNAPSE_APPSERVICES %}
|
||||||
app_service_config_files:
|
app_service_config_files:
|
||||||
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
|
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
{% else %}
|
||||||
|
app_service_config_files: []
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
||||||
|
|||||||
@@ -2,36 +2,9 @@ version: 1
|
|||||||
|
|
||||||
formatters:
|
formatters:
|
||||||
precise:
|
precise:
|
||||||
{% if worker_name %}
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||||
format: '%(asctime)s - worker:{{ worker_name }} - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
|
||||||
{% else %}
|
|
||||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
{% if LOG_FILE_PATH %}
|
|
||||||
file:
|
|
||||||
class: logging.handlers.TimedRotatingFileHandler
|
|
||||||
formatter: precise
|
|
||||||
filename: {{ LOG_FILE_PATH }}
|
|
||||||
when: "midnight"
|
|
||||||
backupCount: 6 # Does not include the current log file.
|
|
||||||
encoding: utf8
|
|
||||||
|
|
||||||
# Default to buffering writes to log file for efficiency. This means that
|
|
||||||
# there will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
|
|
||||||
# logs will still be flushed immediately.
|
|
||||||
buffer:
|
|
||||||
class: logging.handlers.MemoryHandler
|
|
||||||
target: file
|
|
||||||
# The capacity is the number of log lines that are buffered before
|
|
||||||
# being written to disk. Increasing this will lead to better
|
|
||||||
# performance, at the expensive of it taking longer for log lines to
|
|
||||||
# be written to disk.
|
|
||||||
capacity: 10
|
|
||||||
flushLevel: 30 # Flush for WARNING logs as well
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
console:
|
console:
|
||||||
class: logging.StreamHandler
|
class: logging.StreamHandler
|
||||||
formatter: precise
|
formatter: precise
|
||||||
@@ -44,11 +17,6 @@ loggers:
|
|||||||
|
|
||||||
root:
|
root:
|
||||||
level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}
|
level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}
|
||||||
|
|
||||||
{% if LOG_FILE_PATH %}
|
|
||||||
handlers: [console, buffer]
|
|
||||||
{% else %}
|
|
||||||
handlers: [console]
|
handlers: [console]
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
disable_existing_loggers: false
|
disable_existing_loggers: false
|
||||||
|
|||||||
@@ -1,558 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
# This script reads environment variables and generates a shared Synapse worker,
|
|
||||||
# nginx and supervisord configs depending on the workers requested.
|
|
||||||
#
|
|
||||||
# The environment variables it reads are:
|
|
||||||
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
|
|
||||||
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
|
||||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
|
|
||||||
# below. Leave empty for no workers, or set to '*' for all possible workers.
|
|
||||||
#
|
|
||||||
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
|
|
||||||
# in the project's README), this script may be run multiple times, and functionality should
|
|
||||||
# continue to work if so.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import jinja2
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
|
||||||
|
|
||||||
|
|
||||||
WORKERS_CONFIG = {
|
|
||||||
"pusher": {
|
|
||||||
"app": "synapse.app.pusher",
|
|
||||||
"listener_resources": [],
|
|
||||||
"endpoint_patterns": [],
|
|
||||||
"shared_extra_conf": {"start_pushers": False},
|
|
||||||
"worker_extra_conf": "",
|
|
||||||
},
|
|
||||||
"user_dir": {
|
|
||||||
"app": "synapse.app.user_dir",
|
|
||||||
"listener_resources": ["client"],
|
|
||||||
"endpoint_patterns": [
|
|
||||||
"^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$"
|
|
||||||
],
|
|
||||||
"shared_extra_conf": {"update_user_directory": False},
|
|
||||||
"worker_extra_conf": "",
|
|
||||||
},
|
|
||||||
"media_repository": {
|
|
||||||
"app": "synapse.app.media_repository",
|
|
||||||
"listener_resources": ["media"],
|
|
||||||
"endpoint_patterns": [
|
|
||||||
"^/_matrix/media/",
|
|
||||||
"^/_synapse/admin/v1/purge_media_cache$",
|
|
||||||
"^/_synapse/admin/v1/room/.*/media.*$",
|
|
||||||
"^/_synapse/admin/v1/user/.*/media.*$",
|
|
||||||
"^/_synapse/admin/v1/media/.*$",
|
|
||||||
"^/_synapse/admin/v1/quarantine_media/.*$",
|
|
||||||
],
|
|
||||||
"shared_extra_conf": {"enable_media_repo": False},
|
|
||||||
"worker_extra_conf": "enable_media_repo: true",
|
|
||||||
},
|
|
||||||
"appservice": {
|
|
||||||
"app": "synapse.app.appservice",
|
|
||||||
"listener_resources": [],
|
|
||||||
"endpoint_patterns": [],
|
|
||||||
"shared_extra_conf": {"notify_appservices": False},
|
|
||||||
"worker_extra_conf": "",
|
|
||||||
},
|
|
||||||
"federation_sender": {
|
|
||||||
"app": "synapse.app.federation_sender",
|
|
||||||
"listener_resources": [],
|
|
||||||
"endpoint_patterns": [],
|
|
||||||
"shared_extra_conf": {"send_federation": False},
|
|
||||||
"worker_extra_conf": "",
|
|
||||||
},
|
|
||||||
"synchrotron": {
|
|
||||||
"app": "synapse.app.generic_worker",
|
|
||||||
"listener_resources": ["client"],
|
|
||||||
"endpoint_patterns": [
|
|
||||||
"^/_matrix/client/(v2_alpha|r0)/sync$",
|
|
||||||
"^/_matrix/client/(api/v1|v2_alpha|r0)/events$",
|
|
||||||
"^/_matrix/client/(api/v1|r0)/initialSync$",
|
|
||||||
"^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$",
|
|
||||||
],
|
|
||||||
"shared_extra_conf": {},
|
|
||||||
"worker_extra_conf": "",
|
|
||||||
},
|
|
||||||
"federation_reader": {
|
|
||||||
"app": "synapse.app.generic_worker",
|
|
||||||
"listener_resources": ["federation"],
|
|
||||||
"endpoint_patterns": [
|
|
||||||
"^/_matrix/federation/(v1|v2)/event/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/state/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/state_ids/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/backfill/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/get_missing_events/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/publicRooms",
|
|
||||||
"^/_matrix/federation/(v1|v2)/query/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/make_join/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/make_leave/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/send_join/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/send_leave/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/invite/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/query_auth/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/event_auth/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/user/devices/",
|
|
||||||
"^/_matrix/federation/(v1|v2)/get_groups_publicised$",
|
|
||||||
"^/_matrix/key/v2/query",
|
|
||||||
],
|
|
||||||
"shared_extra_conf": {},
|
|
||||||
"worker_extra_conf": "",
|
|
||||||
},
|
|
||||||
"federation_inbound": {
|
|
||||||
"app": "synapse.app.generic_worker",
|
|
||||||
"listener_resources": ["federation"],
|
|
||||||
"endpoint_patterns": ["/_matrix/federation/(v1|v2)/send/"],
|
|
||||||
"shared_extra_conf": {},
|
|
||||||
"worker_extra_conf": "",
|
|
||||||
},
|
|
||||||
"event_persister": {
|
|
||||||
"app": "synapse.app.generic_worker",
|
|
||||||
"listener_resources": ["replication"],
|
|
||||||
"endpoint_patterns": [],
|
|
||||||
"shared_extra_conf": {},
|
|
||||||
"worker_extra_conf": "",
|
|
||||||
},
|
|
||||||
"background_worker": {
|
|
||||||
"app": "synapse.app.generic_worker",
|
|
||||||
"listener_resources": [],
|
|
||||||
"endpoint_patterns": [],
|
|
||||||
# This worker cannot be sharded. Therefore there should only ever be one background
|
|
||||||
# worker, and it should be named background_worker1
|
|
||||||
"shared_extra_conf": {"run_background_tasks_on": "background_worker1"},
|
|
||||||
"worker_extra_conf": "",
|
|
||||||
},
|
|
||||||
"event_creator": {
|
|
||||||
"app": "synapse.app.generic_worker",
|
|
||||||
"listener_resources": ["client"],
|
|
||||||
"endpoint_patterns": [
|
|
||||||
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact",
|
|
||||||
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send",
|
|
||||||
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
|
|
||||||
"^/_matrix/client/(api/v1|r0|unstable)/join/",
|
|
||||||
"^/_matrix/client/(api/v1|r0|unstable)/profile/",
|
|
||||||
],
|
|
||||||
"shared_extra_conf": {},
|
|
||||||
"worker_extra_conf": "",
|
|
||||||
},
|
|
||||||
"frontend_proxy": {
|
|
||||||
"app": "synapse.app.frontend_proxy",
|
|
||||||
"listener_resources": ["client", "replication"],
|
|
||||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|unstable)/keys/upload"],
|
|
||||||
"shared_extra_conf": {},
|
|
||||||
"worker_extra_conf": (
|
|
||||||
"worker_main_http_uri: http://127.0.0.1:%d"
|
|
||||||
% (MAIN_PROCESS_HTTP_LISTENER_PORT,),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
# Templates for sections that may be inserted multiple times in config files
|
|
||||||
SUPERVISORD_PROCESS_CONFIG_BLOCK = """
|
|
||||||
[program:synapse_{name}]
|
|
||||||
command=/usr/local/bin/python -m {app} \
|
|
||||||
--config-path="{config_path}" \
|
|
||||||
--config-path=/conf/workers/shared.yaml \
|
|
||||||
--config-path=/conf/workers/{name}.yaml
|
|
||||||
autorestart=unexpected
|
|
||||||
priority=500
|
|
||||||
exitcodes=0
|
|
||||||
stdout_logfile=/dev/stdout
|
|
||||||
stdout_logfile_maxbytes=0
|
|
||||||
stderr_logfile=/dev/stderr
|
|
||||||
stderr_logfile_maxbytes=0
|
|
||||||
"""
|
|
||||||
|
|
||||||
NGINX_LOCATION_CONFIG_BLOCK = """
|
|
||||||
location ~* {endpoint} {{
|
|
||||||
proxy_pass {upstream};
|
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
}}
|
|
||||||
"""
|
|
||||||
|
|
||||||
NGINX_UPSTREAM_CONFIG_BLOCK = """
|
|
||||||
upstream {upstream_worker_type} {{
|
|
||||||
{body}
|
|
||||||
}}
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
# Utility functions
|
|
||||||
def log(txt: str):
|
|
||||||
"""Log something to the stdout.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
txt: The text to log.
|
|
||||||
"""
|
|
||||||
print(txt)
|
|
||||||
|
|
||||||
|
|
||||||
def error(txt: str):
|
|
||||||
"""Log something and exit with an error code.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
txt: The text to log in error.
|
|
||||||
"""
|
|
||||||
log(txt)
|
|
||||||
sys.exit(2)
|
|
||||||
|
|
||||||
|
|
||||||
def convert(src: str, dst: str, **template_vars):
|
|
||||||
"""Generate a file from a template
|
|
||||||
|
|
||||||
Args:
|
|
||||||
src: Path to the input file.
|
|
||||||
dst: Path to write to.
|
|
||||||
template_vars: The arguments to replace placeholder variables in the template with.
|
|
||||||
"""
|
|
||||||
# Read the template file
|
|
||||||
with open(src) as infile:
|
|
||||||
template = infile.read()
|
|
||||||
|
|
||||||
# Generate a string from the template. We disable autoescape to prevent template
|
|
||||||
# variables from being escaped.
|
|
||||||
rendered = jinja2.Template(template, autoescape=False).render(**template_vars)
|
|
||||||
|
|
||||||
# Write the generated contents to a file
|
|
||||||
#
|
|
||||||
# We use append mode in case the files have already been written to by something else
|
|
||||||
# (for instance, as part of the instructions in a dockerfile).
|
|
||||||
with open(dst, "a") as outfile:
|
|
||||||
# In case the existing file doesn't end with a newline
|
|
||||||
outfile.write("\n")
|
|
||||||
|
|
||||||
outfile.write(rendered)
|
|
||||||
|
|
||||||
|
|
||||||
def add_sharding_to_shared_config(
|
|
||||||
shared_config: dict,
|
|
||||||
worker_type: str,
|
|
||||||
worker_name: str,
|
|
||||||
worker_port: int,
|
|
||||||
) -> None:
|
|
||||||
"""Given a dictionary representing a config file shared across all workers,
|
|
||||||
append sharded worker information to it for the current worker_type instance.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
shared_config: The config dict that all worker instances share (after being converted to YAML)
|
|
||||||
worker_type: The type of worker (one of those defined in WORKERS_CONFIG).
|
|
||||||
worker_name: The name of the worker instance.
|
|
||||||
worker_port: The HTTP replication port that the worker instance is listening on.
|
|
||||||
"""
|
|
||||||
# The instance_map config field marks the workers that write to various replication streams
|
|
||||||
instance_map = shared_config.setdefault("instance_map", {})
|
|
||||||
|
|
||||||
# Worker-type specific sharding config
|
|
||||||
if worker_type == "pusher":
|
|
||||||
shared_config.setdefault("pusher_instances", []).append(worker_name)
|
|
||||||
|
|
||||||
elif worker_type == "federation_sender":
|
|
||||||
shared_config.setdefault("federation_sender_instances", []).append(worker_name)
|
|
||||||
|
|
||||||
elif worker_type == "event_persister":
|
|
||||||
# Event persisters write to the events stream, so we need to update
|
|
||||||
# the list of event stream writers
|
|
||||||
shared_config.setdefault("stream_writers", {}).setdefault("events", []).append(
|
|
||||||
worker_name
|
|
||||||
)
|
|
||||||
|
|
||||||
# Map of stream writer instance names to host/ports combos
|
|
||||||
instance_map[worker_name] = {
|
|
||||||
"host": "localhost",
|
|
||||||
"port": worker_port,
|
|
||||||
}
|
|
||||||
|
|
||||||
elif worker_type == "media_repository":
|
|
||||||
# The first configured media worker will run the media background jobs
|
|
||||||
shared_config.setdefault("media_instance_running_background_jobs", worker_name)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_base_homeserver_config():
|
|
||||||
"""Starts Synapse and generates a basic homeserver config, which will later be
|
|
||||||
modified for worker support.
|
|
||||||
|
|
||||||
Raises: CalledProcessError if calling start.py returned a non-zero exit code.
|
|
||||||
"""
|
|
||||||
# start.py already does this for us, so just call that.
|
|
||||||
# note that this script is copied in in the official, monolith dockerfile
|
|
||||||
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
|
|
||||||
subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
|
|
||||||
|
|
||||||
|
|
||||||
def generate_worker_files(environ, config_path: str, data_dir: str):
|
|
||||||
"""Read the desired list of workers from environment variables and generate
|
|
||||||
shared homeserver, nginx and supervisord configs.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
environ: _Environ[str]
|
|
||||||
config_path: Where to output the generated Synapse main worker config file.
|
|
||||||
data_dir: The location of the synapse data directory. Where log and
|
|
||||||
user-facing config files live.
|
|
||||||
"""
|
|
||||||
# Note that yaml cares about indentation, so care should be taken to insert lines
|
|
||||||
# into files at the correct indentation below.
|
|
||||||
|
|
||||||
# shared_config is the contents of a Synapse config file that will be shared amongst
|
|
||||||
# the main Synapse process as well as all workers.
|
|
||||||
# It is intended mainly for disabling functionality when certain workers are spun up,
|
|
||||||
# and adding a replication listener.
|
|
||||||
|
|
||||||
# First read the original config file and extract the listeners block. Then we'll add
|
|
||||||
# another listener for replication. Later we'll write out the result.
|
|
||||||
listeners = [
|
|
||||||
{
|
|
||||||
"port": 9093,
|
|
||||||
"bind_address": "127.0.0.1",
|
|
||||||
"type": "http",
|
|
||||||
"resources": [{"names": ["replication"]}],
|
|
||||||
}
|
|
||||||
]
|
|
||||||
with open(config_path) as file_stream:
|
|
||||||
original_config = yaml.safe_load(file_stream)
|
|
||||||
original_listeners = original_config.get("listeners")
|
|
||||||
if original_listeners:
|
|
||||||
listeners += original_listeners
|
|
||||||
|
|
||||||
# The shared homeserver config. The contents of which will be inserted into the
|
|
||||||
# base shared worker jinja2 template.
|
|
||||||
#
|
|
||||||
# This config file will be passed to all workers, included Synapse's main process.
|
|
||||||
shared_config = {"listeners": listeners}
|
|
||||||
|
|
||||||
# The supervisord config. The contents of which will be inserted into the
|
|
||||||
# base supervisord jinja2 template.
|
|
||||||
#
|
|
||||||
# Supervisord will be in charge of running everything, from redis to nginx to Synapse
|
|
||||||
# and all of its worker processes. Load the config template, which defines a few
|
|
||||||
# services that are necessary to run.
|
|
||||||
supervisord_config = ""
|
|
||||||
|
|
||||||
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
|
|
||||||
# ports of each worker. For example:
|
|
||||||
# {
|
|
||||||
# worker_type: {1234, 1235, ...}}
|
|
||||||
# }
|
|
||||||
# and will be used to construct 'upstream' nginx directives.
|
|
||||||
nginx_upstreams = {}
|
|
||||||
|
|
||||||
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
|
|
||||||
# placed after the proxy_pass directive. The main benefit to representing this data as a
|
|
||||||
# dict over a str is that we can easily deduplicate endpoints across multiple instances
|
|
||||||
# of the same worker.
|
|
||||||
#
|
|
||||||
# An nginx site config that will be amended to depending on the workers that are
|
|
||||||
# spun up. To be placed in /etc/nginx/conf.d.
|
|
||||||
nginx_locations = {}
|
|
||||||
|
|
||||||
# Read the desired worker configuration from the environment
|
|
||||||
worker_types = environ.get("SYNAPSE_WORKER_TYPES")
|
|
||||||
if worker_types is None:
|
|
||||||
# No workers, just the main process
|
|
||||||
worker_types = []
|
|
||||||
else:
|
|
||||||
# Split type names by comma
|
|
||||||
worker_types = worker_types.split(",")
|
|
||||||
|
|
||||||
# Create the worker configuration directory if it doesn't already exist
|
|
||||||
os.makedirs("/conf/workers", exist_ok=True)
|
|
||||||
|
|
||||||
# Start worker ports from this arbitrary port
|
|
||||||
worker_port = 18009
|
|
||||||
|
|
||||||
# A counter of worker_type -> int. Used for determining the name for a given
|
|
||||||
# worker type when generating its config file, as each worker's name is just
|
|
||||||
# worker_type + instance #
|
|
||||||
worker_type_counter = {}
|
|
||||||
|
|
||||||
# For each worker type specified by the user, create config values
|
|
||||||
for worker_type in worker_types:
|
|
||||||
worker_type = worker_type.strip()
|
|
||||||
|
|
||||||
worker_config = WORKERS_CONFIG.get(worker_type)
|
|
||||||
if worker_config:
|
|
||||||
worker_config = worker_config.copy()
|
|
||||||
else:
|
|
||||||
log(worker_type + " is an unknown worker type! It will be ignored")
|
|
||||||
continue
|
|
||||||
|
|
||||||
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
|
|
||||||
worker_type_counter[worker_type] = new_worker_count
|
|
||||||
|
|
||||||
# Name workers by their type concatenated with an incrementing number
|
|
||||||
# e.g. federation_reader1
|
|
||||||
worker_name = worker_type + str(new_worker_count)
|
|
||||||
worker_config.update(
|
|
||||||
{"name": worker_name, "port": worker_port, "config_path": config_path}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Update the shared config with any worker-type specific options
|
|
||||||
shared_config.update(worker_config["shared_extra_conf"])
|
|
||||||
|
|
||||||
# Check if more than one instance of this worker type has been specified
|
|
||||||
worker_type_total_count = worker_types.count(worker_type)
|
|
||||||
if worker_type_total_count > 1:
|
|
||||||
# Update the shared config with sharding-related options if necessary
|
|
||||||
add_sharding_to_shared_config(
|
|
||||||
shared_config, worker_type, worker_name, worker_port
|
|
||||||
)
|
|
||||||
|
|
||||||
# Enable the worker in supervisord
|
|
||||||
supervisord_config += SUPERVISORD_PROCESS_CONFIG_BLOCK.format_map(worker_config)
|
|
||||||
|
|
||||||
# Add nginx location blocks for this worker's endpoints (if any are defined)
|
|
||||||
for pattern in worker_config["endpoint_patterns"]:
|
|
||||||
# Determine whether we need to load-balance this worker
|
|
||||||
if worker_type_total_count > 1:
|
|
||||||
# Create or add to a load-balanced upstream for this worker
|
|
||||||
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
|
|
||||||
|
|
||||||
# Upstreams are named after the worker_type
|
|
||||||
upstream = "http://" + worker_type
|
|
||||||
else:
|
|
||||||
upstream = "http://localhost:%d" % (worker_port,)
|
|
||||||
|
|
||||||
# Note that this endpoint should proxy to this upstream
|
|
||||||
nginx_locations[pattern] = upstream
|
|
||||||
|
|
||||||
# Write out the worker's logging config file
|
|
||||||
|
|
||||||
# Check whether we should write worker logs to disk, in addition to the console
|
|
||||||
extra_log_template_args = {}
|
|
||||||
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
|
|
||||||
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
|
|
||||||
dir=data_dir, name=worker_name
|
|
||||||
)
|
|
||||||
|
|
||||||
# Render and write the file
|
|
||||||
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
|
|
||||||
convert(
|
|
||||||
"/conf/log.config",
|
|
||||||
log_config_filepath,
|
|
||||||
worker_name=worker_name,
|
|
||||||
**extra_log_template_args,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Then a worker config file
|
|
||||||
convert(
|
|
||||||
"/conf/worker.yaml.j2",
|
|
||||||
"/conf/workers/{name}.yaml".format(name=worker_name),
|
|
||||||
**worker_config,
|
|
||||||
worker_log_config_filepath=log_config_filepath,
|
|
||||||
)
|
|
||||||
|
|
||||||
worker_port += 1
|
|
||||||
|
|
||||||
# Build the nginx location config blocks
|
|
||||||
nginx_location_config = ""
|
|
||||||
for endpoint, upstream in nginx_locations.items():
|
|
||||||
nginx_location_config += NGINX_LOCATION_CONFIG_BLOCK.format(
|
|
||||||
endpoint=endpoint,
|
|
||||||
upstream=upstream,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Determine the load-balancing upstreams to configure
|
|
||||||
nginx_upstream_config = ""
|
|
||||||
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
|
|
||||||
body = ""
|
|
||||||
for port in upstream_worker_ports:
|
|
||||||
body += " server localhost:%d;\n" % (port,)
|
|
||||||
|
|
||||||
# Add to the list of configured upstreams
|
|
||||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
|
||||||
upstream_worker_type=upstream_worker_type,
|
|
||||||
body=body,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Finally, we'll write out the config files.
|
|
||||||
|
|
||||||
# Shared homeserver config
|
|
||||||
convert(
|
|
||||||
"/conf/shared.yaml.j2",
|
|
||||||
"/conf/workers/shared.yaml",
|
|
||||||
shared_worker_config=yaml.dump(shared_config),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Nginx config
|
|
||||||
convert(
|
|
||||||
"/conf/nginx.conf.j2",
|
|
||||||
"/etc/nginx/conf.d/matrix-synapse.conf",
|
|
||||||
worker_locations=nginx_location_config,
|
|
||||||
upstream_directives=nginx_upstream_config,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Supervisord config
|
|
||||||
convert(
|
|
||||||
"/conf/supervisord.conf.j2",
|
|
||||||
"/etc/supervisor/conf.d/supervisord.conf",
|
|
||||||
main_config_path=config_path,
|
|
||||||
worker_config=supervisord_config,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Ensure the logging directory exists
|
|
||||||
log_dir = data_dir + "/logs"
|
|
||||||
if not os.path.exists(log_dir):
|
|
||||||
os.mkdir(log_dir)
|
|
||||||
|
|
||||||
|
|
||||||
def start_supervisord():
|
|
||||||
"""Starts up supervisord which then starts and monitors all other necessary processes
|
|
||||||
|
|
||||||
Raises: CalledProcessError if calling start.py return a non-zero exit code.
|
|
||||||
"""
|
|
||||||
subprocess.run(["/usr/bin/supervisord"], stdin=subprocess.PIPE)
|
|
||||||
|
|
||||||
|
|
||||||
def main(args, environ):
|
|
||||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
|
||||||
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
|
|
||||||
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
|
|
||||||
|
|
||||||
# override SYNAPSE_NO_TLS, we don't support TLS in worker mode,
|
|
||||||
# this needs to be handled by a frontend proxy
|
|
||||||
environ["SYNAPSE_NO_TLS"] = "yes"
|
|
||||||
|
|
||||||
# Generate the base homeserver config if one does not yet exist
|
|
||||||
if not os.path.exists(config_path):
|
|
||||||
log("Generating base homeserver config")
|
|
||||||
generate_base_homeserver_config()
|
|
||||||
|
|
||||||
# This script may be run multiple times (mostly by Complement, see note at top of file).
|
|
||||||
# Don't re-configure workers in this instance.
|
|
||||||
mark_filepath = "/conf/workers_have_been_configured"
|
|
||||||
if not os.path.exists(mark_filepath):
|
|
||||||
# Always regenerate all other config files
|
|
||||||
generate_worker_files(environ, config_path, data_dir)
|
|
||||||
|
|
||||||
# Mark workers as being configured
|
|
||||||
with open(mark_filepath, "w") as f:
|
|
||||||
f.write("")
|
|
||||||
|
|
||||||
# Start supervisord, which will start Synapse, all of the configured worker
|
|
||||||
# processes, redis, nginx etc. according to the config we created above.
|
|
||||||
start_supervisord()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main(sys.argv, os.environ)
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
|
||||||
# This script runs the PostgreSQL tests inside a Docker container. It expects
|
# This script runs the PostgreSQL tests inside a Docker container. It expects
|
||||||
# the relevant source files to be mounted into /src (done automatically by the
|
# the relevant source files to be mounted into /src (done automatically by the
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
import codecs
|
import codecs
|
||||||
import glob
|
import glob
|
||||||
import os
|
import os
|
||||||
import platform
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -214,13 +213,6 @@ def main(args, environ):
|
|||||||
if "-m" not in args:
|
if "-m" not in args:
|
||||||
args = ["-m", synapse_worker] + args
|
args = ["-m", synapse_worker] + args
|
||||||
|
|
||||||
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
|
|
||||||
|
|
||||||
if os.path.isfile(jemallocpath):
|
|
||||||
environ["LD_PRELOAD"] = jemallocpath
|
|
||||||
else:
|
|
||||||
log("Could not find %s, will not use" % (jemallocpath,))
|
|
||||||
|
|
||||||
# if there are no config files passed to synapse, try adding the default file
|
# if there are no config files passed to synapse, try adding the default file
|
||||||
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
|
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
|
||||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||||
@@ -256,9 +248,9 @@ running with 'migrate_config'. See the README for more details.
|
|||||||
args = ["python"] + args
|
args = ["python"] + args
|
||||||
if ownership is not None:
|
if ownership is not None:
|
||||||
args = ["gosu", ownership] + args
|
args = ["gosu", ownership] + args
|
||||||
os.execve("/usr/sbin/gosu", args, environ)
|
os.execv("/usr/sbin/gosu", args)
|
||||||
else:
|
else:
|
||||||
os.execve("/usr/local/bin/python", args, environ)
|
os.execv("/usr/local/bin/python", args)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -8,8 +8,7 @@
|
|||||||
#
|
#
|
||||||
# It is *not* intended to be copied and used as the basis for a real
|
# It is *not* intended to be copied and used as the basis for a real
|
||||||
# homeserver.yaml. Instead, if you are starting from scratch, please generate
|
# homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||||
# a fresh config using Synapse by following the instructions in
|
# a fresh config using Synapse by following the instructions in INSTALL.md.
|
||||||
# https://matrix-org.github.io/synapse/latest/setup/installation.html.
|
|
||||||
|
|
||||||
# Configuration options that take a time period can be set using a number
|
# Configuration options that take a time period can be set using a number
|
||||||
# followed by a letter. Letters have the following meanings:
|
# followed by a letter. Letters have the following meanings:
|
||||||
|
|||||||
161
docs/ACME.md
Normal file
161
docs/ACME.md
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
# ACME
|
||||||
|
|
||||||
|
From version 1.0 (June 2019) onwards, Synapse requires valid TLS
|
||||||
|
certificates for communication between servers (by default on port
|
||||||
|
`8448`) in addition to those that are client-facing (port `443`). To
|
||||||
|
help homeserver admins fulfil this new requirement, Synapse v0.99.0
|
||||||
|
introduced support for automatically provisioning certificates through
|
||||||
|
[Let's Encrypt](https://letsencrypt.org/) using the ACME protocol.
|
||||||
|
|
||||||
|
## Deprecation of ACME v1
|
||||||
|
|
||||||
|
In [March 2019](https://community.letsencrypt.org/t/end-of-life-plan-for-acmev1/88430),
|
||||||
|
Let's Encrypt announced that they were deprecating version 1 of the ACME
|
||||||
|
protocol, with the plan to disable the use of it for new accounts in
|
||||||
|
November 2019, for new domains in June 2020, and for existing accounts and
|
||||||
|
domains in June 2021.
|
||||||
|
|
||||||
|
Synapse doesn't currently support version 2 of the ACME protocol, which
|
||||||
|
means that:
|
||||||
|
|
||||||
|
* for existing installs, Synapse's built-in ACME support will continue
|
||||||
|
to work until June 2021.
|
||||||
|
* for new installs, this feature will not work at all.
|
||||||
|
|
||||||
|
Either way, it is recommended to move from Synapse's ACME support
|
||||||
|
feature to an external automated tool such as [certbot](https://github.com/certbot/certbot)
|
||||||
|
(or browse [this list](https://letsencrypt.org/fr/docs/client-options/)
|
||||||
|
for an alternative ACME client).
|
||||||
|
|
||||||
|
It's also recommended to use a reverse proxy for the server-facing
|
||||||
|
communications (more documentation about this can be found
|
||||||
|
[here](/docs/reverse_proxy.md)) as well as the client-facing ones and
|
||||||
|
have it serve the certificates.
|
||||||
|
|
||||||
|
In case you can't do that and need Synapse to serve them itself, make
|
||||||
|
sure to set the `tls_certificate_path` configuration setting to the path
|
||||||
|
of the certificate (make sure to use the certificate containing the full
|
||||||
|
certification chain, e.g. `fullchain.pem` if using certbot) and
|
||||||
|
`tls_private_key_path` to the path of the matching private key. Note
|
||||||
|
that in this case you will need to restart Synapse after each
|
||||||
|
certificate renewal so that Synapse stops using the old certificate.
|
||||||
|
|
||||||
|
If you still want to use Synapse's built-in ACME support, the rest of
|
||||||
|
this document explains how to set it up.
|
||||||
|
|
||||||
|
## Initial setup
|
||||||
|
|
||||||
|
In the case that your `server_name` config variable is the same as
|
||||||
|
the hostname that the client connects to, then the same certificate can be
|
||||||
|
used between client and federation ports without issue.
|
||||||
|
|
||||||
|
If your configuration file does not already have an `acme` section, you can
|
||||||
|
generate an example config by running the `generate_config` executable. For
|
||||||
|
example:
|
||||||
|
|
||||||
|
```
|
||||||
|
~/synapse/env3/bin/generate_config
|
||||||
|
```
|
||||||
|
|
||||||
|
You will need to provide Let's Encrypt (or another ACME provider) access to
|
||||||
|
your Synapse ACME challenge responder on port 80, at the domain of your
|
||||||
|
homeserver. This requires you to either change the port of the ACME listener
|
||||||
|
provided by Synapse to a high port and reverse proxy to it, or use a tool
|
||||||
|
like `authbind` to allow Synapse to listen on port 80 without root access.
|
||||||
|
(Do not run Synapse with root permissions!) Detailed instructions are
|
||||||
|
available under "ACME setup" below.
|
||||||
|
|
||||||
|
If you already have certificates, you will need to back up or delete them
|
||||||
|
(files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root
|
||||||
|
directory), Synapse's ACME implementation will not overwrite them.
|
||||||
|
|
||||||
|
## ACME setup
|
||||||
|
|
||||||
|
The main steps for enabling ACME support in short summary are:
|
||||||
|
|
||||||
|
1. Allow Synapse to listen for incoming ACME challenges.
|
||||||
|
1. Enable ACME support in `homeserver.yaml`.
|
||||||
|
1. Move your old certificates (files `example.com.tls.crt` and `example.com.tls.key` out of the way if they currently exist at the paths specified in `homeserver.yaml`.
|
||||||
|
1. Restart Synapse.
|
||||||
|
|
||||||
|
Detailed instructions for each step are provided below.
|
||||||
|
|
||||||
|
### Listening on port 80
|
||||||
|
|
||||||
|
In order for Synapse to complete the ACME challenge to provision a
|
||||||
|
certificate, it needs access to port 80. Typically listening on port 80 is
|
||||||
|
only granted to applications running as root. There are thus two solutions to
|
||||||
|
this problem.
|
||||||
|
|
||||||
|
#### Using a reverse proxy
|
||||||
|
|
||||||
|
A reverse proxy such as Apache or nginx allows a single process (the web
|
||||||
|
server) to listen on port 80 and proxy traffic to the appropriate program
|
||||||
|
running on your server. It is the recommended method for setting up ACME as
|
||||||
|
it allows you to use your existing webserver while also allowing Synapse to
|
||||||
|
provision certificates as needed.
|
||||||
|
|
||||||
|
For nginx users, add the following line to your existing `server` block:
|
||||||
|
|
||||||
|
```
|
||||||
|
location /.well-known/acme-challenge {
|
||||||
|
proxy_pass http://localhost:8009;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For Apache, add the following to your existing webserver config:
|
||||||
|
|
||||||
|
```
|
||||||
|
ProxyPass /.well-known/acme-challenge http://localhost:8009/.well-known/acme-challenge
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure to restart/reload your webserver after making changes.
|
||||||
|
|
||||||
|
Now make the relevant changes in `homeserver.yaml` to enable ACME support:
|
||||||
|
|
||||||
|
```
|
||||||
|
acme:
|
||||||
|
enabled: true
|
||||||
|
port: 8009
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Authbind
|
||||||
|
|
||||||
|
`authbind` allows a program which does not run as root to bind to
|
||||||
|
low-numbered ports in a controlled way. The setup is simpler, but requires a
|
||||||
|
webserver not to already be running on port 80. **This includes every time
|
||||||
|
Synapse renews a certificate**, which may be cumbersome if you usually run a
|
||||||
|
web server on port 80. Nevertheless, if you're sure port 80 is not being used
|
||||||
|
for any other purpose then all that is necessary is the following:
|
||||||
|
|
||||||
|
Install `authbind`. For example, on Debian/Ubuntu:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo apt-get install authbind
|
||||||
|
```
|
||||||
|
|
||||||
|
Allow `authbind` to bind port 80:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo touch /etc/authbind/byport/80
|
||||||
|
sudo chmod 777 /etc/authbind/byport/80
|
||||||
|
```
|
||||||
|
|
||||||
|
When Synapse is started, use the following syntax:
|
||||||
|
|
||||||
|
```
|
||||||
|
authbind --deep <synapse start command>
|
||||||
|
```
|
||||||
|
|
||||||
|
Make the relevant changes in `homeserver.yaml` to enable ACME support:
|
||||||
|
|
||||||
|
```
|
||||||
|
acme:
|
||||||
|
enabled: true
|
||||||
|
```
|
||||||
|
|
||||||
|
### (Re)starting synapse
|
||||||
|
|
||||||
|
Ensure that the certificate paths specified in `homeserver.yaml` (`tls_certificate_path` and `tls_private_key_path`) do not currently point to any files. Synapse will not provision certificates if files exist, as it does not want to overwrite existing certificates.
|
||||||
|
|
||||||
|
Finally, start/restart Synapse.
|
||||||
@@ -1,37 +1,31 @@
|
|||||||
# Overview
|
# Overview
|
||||||
A captcha can be enabled on your homeserver to help prevent bots from registering
|
Captcha can be enabled for this home server. This file explains how to do that.
|
||||||
accounts. Synapse currently uses Google's reCAPTCHA service which requires API keys
|
The captcha mechanism used is Google's ReCaptcha. This requires API keys from Google.
|
||||||
from Google.
|
|
||||||
|
|
||||||
## Getting API keys
|
## Getting keys
|
||||||
|
|
||||||
|
Requires a site/secret key pair from:
|
||||||
|
|
||||||
|
<https://developers.google.com/recaptcha/>
|
||||||
|
|
||||||
|
Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
|
||||||
|
|
||||||
|
## Setting ReCaptcha Keys
|
||||||
|
|
||||||
|
The keys are a config option on the home server config. If they are not
|
||||||
|
visible, you can generate them via `--generate-config`. Set the following value:
|
||||||
|
|
||||||
1. Create a new site at <https://www.google.com/recaptcha/admin/create>
|
|
||||||
1. Set the label to anything you want
|
|
||||||
1. Set the type to reCAPTCHA v2 using the "I'm not a robot" Checkbox option.
|
|
||||||
This is the only type of captcha that works with Synapse.
|
|
||||||
1. Add the public hostname for your server, as set in `public_baseurl`
|
|
||||||
in `homeserver.yaml`, to the list of authorized domains. If you have not set
|
|
||||||
`public_baseurl`, use `server_name`.
|
|
||||||
1. Agree to the terms of service and submit.
|
|
||||||
1. Copy your site key and secret key and add them to your `homeserver.yaml`
|
|
||||||
configuration file
|
|
||||||
```
|
|
||||||
recaptcha_public_key: YOUR_SITE_KEY
|
recaptcha_public_key: YOUR_SITE_KEY
|
||||||
recaptcha_private_key: YOUR_SECRET_KEY
|
recaptcha_private_key: YOUR_SECRET_KEY
|
||||||
```
|
|
||||||
1. Enable the CAPTCHA for new registrations
|
In addition, you MUST enable captchas via:
|
||||||
```
|
|
||||||
enable_registration_captcha: true
|
enable_registration_captcha: true
|
||||||
```
|
|
||||||
1. Go to the settings page for the CAPTCHA you just created
|
|
||||||
1. Uncheck the "Verify the origin of reCAPTCHA solutions" checkbox so that the
|
|
||||||
captcha can be displayed in any client. If you do not disable this option then you
|
|
||||||
must specify the domains of every client that is allowed to display the CAPTCHA.
|
|
||||||
|
|
||||||
## Configuring IP used for auth
|
## Configuring IP used for auth
|
||||||
|
|
||||||
The reCAPTCHA API requires that the IP address of the user who solved the
|
The ReCaptcha API requires that the IP address of the user who solved the
|
||||||
CAPTCHA is sent. If the client is connecting through a proxy or load balancer,
|
captcha is sent. If the client is connecting through a proxy or load balancer,
|
||||||
it may be required to use the `X-Forwarded-For` (XFF) header instead of the origin
|
it may be required to use the `X-Forwarded-For` (XFF) header instead of the origin
|
||||||
IP address. This can be configured using the `x_forwarded` directive in the
|
IP address. This can be configured using the `x_forwarded` directive in the
|
||||||
listeners section of the `homeserver.yaml` configuration file.
|
listeners section of the homeserver.yaml configuration file.
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ upgraded, however it may be of use to those with old installs returning to the
|
|||||||
project.
|
project.
|
||||||
|
|
||||||
If you are setting up a server from scratch you almost certainly should look at
|
If you are setting up a server from scratch you almost certainly should look at
|
||||||
the [installation guide](setup/installation.md) instead.
|
the [installation guide](../INSTALL.md) instead.
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It
|
The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It
|
||||||
@@ -101,6 +101,15 @@ In this case, your `server_name` points to the host where your Synapse is
|
|||||||
running. There is no need to create a `.well-known` URI or an SRV record, but
|
running. There is no need to create a `.well-known` URI or an SRV record, but
|
||||||
you will need to give Synapse a valid, signed, certificate.
|
you will need to give Synapse a valid, signed, certificate.
|
||||||
|
|
||||||
|
The easiest way to do that is with Synapse's built-in ACME (Let's Encrypt)
|
||||||
|
support. Full details are in [ACME.md](./ACME.md) but, in a nutshell:
|
||||||
|
|
||||||
|
1. Allow Synapse to listen on port 80 with `authbind`, or forward it from a
|
||||||
|
reverse proxy.
|
||||||
|
2. Enable acme support in `homeserver.yaml`.
|
||||||
|
3. Move your old certificates out of the way.
|
||||||
|
4. Restart Synapse.
|
||||||
|
|
||||||
### If you do have an SRV record currently
|
### If you do have an SRV record currently
|
||||||
|
|
||||||
If you are using an SRV record, your matrix domain (`server_name`) may not
|
If you are using an SRV record, your matrix domain (`server_name`) may not
|
||||||
@@ -121,9 +130,15 @@ In this situation, you have three choices for how to proceed:
|
|||||||
#### Option 1: give Synapse a certificate for your matrix domain
|
#### Option 1: give Synapse a certificate for your matrix domain
|
||||||
|
|
||||||
Synapse 1.0 will expect your server to present a TLS certificate for your
|
Synapse 1.0 will expect your server to present a TLS certificate for your
|
||||||
`server_name` (`example.com` in the above example). You can achieve this by acquiring a
|
`server_name` (`example.com` in the above example). You can achieve this by
|
||||||
certificate for the `server_name` yourself (for example, using `certbot`), and giving it
|
doing one of the following:
|
||||||
and the key to Synapse via `tls_certificate_path` and `tls_private_key_path`.
|
|
||||||
|
* Acquire a certificate for the `server_name` yourself (for example, using
|
||||||
|
`certbot`), and give it and the key to Synapse via `tls_certificate_path`
|
||||||
|
and `tls_private_key_path`, or:
|
||||||
|
|
||||||
|
* Use Synapse's [ACME support](./ACME.md), and forward port 80 on the
|
||||||
|
`server_name` domain to your Synapse instance.
|
||||||
|
|
||||||
#### Option 2: run Synapse behind a reverse proxy
|
#### Option 2: run Synapse behind a reverse proxy
|
||||||
|
|
||||||
@@ -132,7 +147,7 @@ your domain, you can simply route all traffic through the reverse proxy by
|
|||||||
updating the SRV record appropriately (or removing it, if the proxy listens on
|
updating the SRV record appropriately (or removing it, if the proxy listens on
|
||||||
8448).
|
8448).
|
||||||
|
|
||||||
See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a
|
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
|
||||||
reverse proxy.
|
reverse proxy.
|
||||||
|
|
||||||
#### Option 3: add a .well-known file to delegate your matrix traffic
|
#### Option 3: add a .well-known file to delegate your matrix traffic
|
||||||
@@ -146,9 +161,10 @@ You can do this with a `.well-known` file as follows:
|
|||||||
with Synapse 0.34 and earlier.
|
with Synapse 0.34 and earlier.
|
||||||
|
|
||||||
2. Give Synapse a certificate corresponding to the target domain
|
2. Give Synapse a certificate corresponding to the target domain
|
||||||
(`customer.example.net` in the above example). You can do this by acquire a
|
(`customer.example.net` in the above example). You can either use Synapse's
|
||||||
certificate for the target domain and giving it to Synapse via `tls_certificate_path`
|
built-in [ACME support](./ACME.md) for this (via the `domain` parameter in
|
||||||
and `tls_private_key_path`.
|
the `acme` section), or acquire a certificate yourself and give it to
|
||||||
|
Synapse via `tls_certificate_path` and `tls_private_key_path`.
|
||||||
|
|
||||||
3. Restart Synapse to ensure the new certificate is loaded.
|
3. Restart Synapse to ensure the new certificate is loaded.
|
||||||
|
|
||||||
@@ -303,7 +319,7 @@ We no longer actively recommend against using a reverse proxy. Many admins will
|
|||||||
find it easier to direct federation traffic to a reverse proxy and manage their
|
find it easier to direct federation traffic to a reverse proxy and manage their
|
||||||
own TLS certificates, and this is a supported configuration.
|
own TLS certificates, and this is a supported configuration.
|
||||||
|
|
||||||
See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a
|
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
|
||||||
reverse proxy.
|
reverse proxy.
|
||||||
|
|
||||||
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
||||||
|
|||||||
@@ -1,72 +1,7 @@
|
|||||||
# Synapse Documentation
|
# Synapse Documentation
|
||||||
|
|
||||||
**The documentation is currently hosted [here](https://matrix-org.github.io/synapse).**
|
This directory contains documentation specific to the `synapse` homeserver.
|
||||||
Please update any links to point to the new website instead.
|
|
||||||
|
|
||||||
## About
|
All matrix-generic documentation now lives in its own project, located at [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc)
|
||||||
|
|
||||||
This directory currently holds a series of markdown files documenting how to install, use
|
(Note: some items here may be moved to [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc) at some point in the future.)
|
||||||
and develop Synapse, the reference Matrix homeserver. The documentation is readable directly
|
|
||||||
from this repository, but it is recommended to instead browse through the
|
|
||||||
[website](https://matrix-org.github.io/synapse) for easier discoverability.
|
|
||||||
|
|
||||||
## Adding to the documentation
|
|
||||||
|
|
||||||
Most of the documentation currently exists as top-level files, as when organising them into
|
|
||||||
a structured website, these files were kept in place so that existing links would not break.
|
|
||||||
The rest of the documentation is stored in folders, such as `setup`, `usage`, and `development`
|
|
||||||
etc. **All new documentation files should be placed in structured folders.** For example:
|
|
||||||
|
|
||||||
To create a new user-facing documentation page about a new Single Sign-On protocol named
|
|
||||||
"MyCoolProtocol", one should create a new file with a relevant name, such as "my_cool_protocol.md".
|
|
||||||
This file might fit into the documentation structure at:
|
|
||||||
|
|
||||||
- Usage
|
|
||||||
- Configuration
|
|
||||||
- User Authentication
|
|
||||||
- Single Sign-On
|
|
||||||
- **My Cool Protocol**
|
|
||||||
|
|
||||||
Given that, one would place the new file under
|
|
||||||
`usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md`.
|
|
||||||
|
|
||||||
Note that the structure of the documentation (and thus the left sidebar on the website) is determined
|
|
||||||
by the list in [SUMMARY.md](SUMMARY.md). The final thing to do when adding a new page is to add a new
|
|
||||||
line linking to the new documentation file:
|
|
||||||
|
|
||||||
```markdown
|
|
||||||
- [My Cool Protocol](usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Building the documentation
|
|
||||||
|
|
||||||
The documentation is built with [mdbook](https://rust-lang.github.io/mdBook/), and the outline of the
|
|
||||||
documentation is determined by the structure of [SUMMARY.md](SUMMARY.md).
|
|
||||||
|
|
||||||
First, [get mdbook](https://github.com/rust-lang/mdBook#installation). Then, **from the root of the repository**,
|
|
||||||
build the documentation with:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
mdbook build
|
|
||||||
```
|
|
||||||
|
|
||||||
The rendered contents will be outputted to a new `book/` directory at the root of the repository. You can
|
|
||||||
browse the book by opening `book/index.html` in a web browser.
|
|
||||||
|
|
||||||
You can also have mdbook host the docs on a local webserver with hot-reload functionality via:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
mdbook serve
|
|
||||||
```
|
|
||||||
|
|
||||||
The URL at which the docs can be viewed at will be logged.
|
|
||||||
|
|
||||||
## Configuration and theming
|
|
||||||
|
|
||||||
The look and behaviour of the website is configured by the [book.toml](../book.toml) file
|
|
||||||
at the root of the repository. See
|
|
||||||
[mdbook's documentation on configuration](https://rust-lang.github.io/mdBook/format/config.html)
|
|
||||||
for available options.
|
|
||||||
|
|
||||||
The site can be themed and additionally extended with extra UI and features. See
|
|
||||||
[website_files/README.md](website_files/README.md) for details.
|
|
||||||
|
|||||||
@@ -1,89 +0,0 @@
|
|||||||
# Summary
|
|
||||||
|
|
||||||
# Introduction
|
|
||||||
- [Welcome and Overview](welcome_and_overview.md)
|
|
||||||
|
|
||||||
# Setup
|
|
||||||
- [Installation](setup/installation.md)
|
|
||||||
- [Using Postgres](postgres.md)
|
|
||||||
- [Configuring a Reverse Proxy](reverse_proxy.md)
|
|
||||||
- [Configuring a Turn Server](turn-howto.md)
|
|
||||||
- [Delegation](delegate.md)
|
|
||||||
|
|
||||||
# Upgrading
|
|
||||||
- [Upgrading between Synapse Versions](upgrade.md)
|
|
||||||
- [Upgrading from pre-Synapse 1.0](MSC1711_certificates_FAQ.md)
|
|
||||||
|
|
||||||
# Usage
|
|
||||||
- [Federation](federate.md)
|
|
||||||
- [Configuration](usage/configuration/README.md)
|
|
||||||
- [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
|
|
||||||
- [Logging Sample Config File](usage/configuration/logging_sample_config.md)
|
|
||||||
- [Structured Logging](structured_logging.md)
|
|
||||||
- [User Authentication](usage/configuration/user_authentication/README.md)
|
|
||||||
- [Single-Sign On]()
|
|
||||||
- [OpenID Connect](openid.md)
|
|
||||||
- [SAML]()
|
|
||||||
- [CAS]()
|
|
||||||
- [SSO Mapping Providers](sso_mapping_providers.md)
|
|
||||||
- [Password Auth Providers](password_auth_providers.md)
|
|
||||||
- [JSON Web Tokens](jwt.md)
|
|
||||||
- [Registration Captcha](CAPTCHA_SETUP.md)
|
|
||||||
- [Application Services](application_services.md)
|
|
||||||
- [Server Notices](server_notices.md)
|
|
||||||
- [Consent Tracking](consent_tracking.md)
|
|
||||||
- [URL Previews](url_previews.md)
|
|
||||||
- [User Directory](user_directory.md)
|
|
||||||
- [Message Retention Policies](message_retention_policies.md)
|
|
||||||
- [Pluggable Modules](modules.md)
|
|
||||||
- [Third Party Rules]()
|
|
||||||
- [Spam Checker](spam_checker.md)
|
|
||||||
- [Presence Router](presence_router_module.md)
|
|
||||||
- [Media Storage Providers]()
|
|
||||||
- [Workers](workers.md)
|
|
||||||
- [Using `synctl` with Workers](synctl_workers.md)
|
|
||||||
- [Systemd](systemd-with-workers/README.md)
|
|
||||||
- [Administration](usage/administration/README.md)
|
|
||||||
- [Admin API](usage/administration/admin_api/README.md)
|
|
||||||
- [Account Validity](admin_api/account_validity.md)
|
|
||||||
- [Delete Group](admin_api/delete_group.md)
|
|
||||||
- [Event Reports](admin_api/event_reports.md)
|
|
||||||
- [Media](admin_api/media_admin_api.md)
|
|
||||||
- [Purge History](admin_api/purge_history_api.md)
|
|
||||||
- [Purge Rooms](admin_api/purge_room.md)
|
|
||||||
- [Register Users](admin_api/register_api.md)
|
|
||||||
- [Manipulate Room Membership](admin_api/room_membership.md)
|
|
||||||
- [Rooms](admin_api/rooms.md)
|
|
||||||
- [Server Notices](admin_api/server_notices.md)
|
|
||||||
- [Shutdown Room](admin_api/shutdown_room.md)
|
|
||||||
- [Statistics](admin_api/statistics.md)
|
|
||||||
- [Users](admin_api/user_admin_api.md)
|
|
||||||
- [Server Version](admin_api/version_api.md)
|
|
||||||
- [Manhole](manhole.md)
|
|
||||||
- [Monitoring](metrics-howto.md)
|
|
||||||
- [Request log format](usage/administration/request_log.md)
|
|
||||||
- [Scripts]()
|
|
||||||
|
|
||||||
# Development
|
|
||||||
- [Contributing Guide](development/contributing_guide.md)
|
|
||||||
- [Code Style](code_style.md)
|
|
||||||
- [Git Usage](dev/git.md)
|
|
||||||
- [Testing]()
|
|
||||||
- [OpenTracing](opentracing.md)
|
|
||||||
- [Database Schemas](development/database_schema.md)
|
|
||||||
- [Synapse Architecture]()
|
|
||||||
- [Log Contexts](log_contexts.md)
|
|
||||||
- [Replication](replication.md)
|
|
||||||
- [TCP Replication](tcp_replication.md)
|
|
||||||
- [Internal Documentation](development/internal_documentation/README.md)
|
|
||||||
- [Single Sign-On]()
|
|
||||||
- [SAML](dev/saml.md)
|
|
||||||
- [CAS](dev/cas.md)
|
|
||||||
- [State Resolution]()
|
|
||||||
- [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md)
|
|
||||||
- [Media Repository](media_repository.md)
|
|
||||||
- [Room and User Statistics](room_and_user_statistics.md)
|
|
||||||
- [Scripts]()
|
|
||||||
|
|
||||||
# Other
|
|
||||||
- [Dependency Deprecation Policy](deprecation_policy.md)
|
|
||||||
@@ -1,14 +1,28 @@
|
|||||||
Admin APIs
|
Admin APIs
|
||||||
==========
|
==========
|
||||||
|
|
||||||
**Note**: The latest documentation can be viewed `here <https://matrix-org.github.io/synapse>`_.
|
|
||||||
See `docs/README.md <../README.md>`_ for more information.
|
|
||||||
|
|
||||||
**Please update links to point to the website instead.** Existing files in this directory
|
|
||||||
are preserved to maintain historical links, but may be moved in the future.
|
|
||||||
|
|
||||||
This directory includes documentation for the various synapse specific admin
|
This directory includes documentation for the various synapse specific admin
|
||||||
APIs available. Updates to the existing Admin API documentation should still
|
APIs available.
|
||||||
be made to these files, but any new documentation files should instead be placed under
|
|
||||||
`docs/usage/administration/admin_api <../usage/administration/admin_api>`_.
|
|
||||||
|
|
||||||
|
Authenticating as a server admin
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
Many of the API calls in the admin api will require an `access_token` for a
|
||||||
|
server admin. (Note that a server admin is distinct from a room admin.)
|
||||||
|
|
||||||
|
A user can be marked as a server admin by updating the database directly, e.g.:
|
||||||
|
|
||||||
|
.. code-block:: sql
|
||||||
|
|
||||||
|
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
||||||
|
|
||||||
|
A new server admin user can also be created using the
|
||||||
|
``register_new_matrix_user`` script.
|
||||||
|
|
||||||
|
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||||
|
|
||||||
|
Once you have your `access_token`, to include it in a request, the best option is to add the token to a request header:
|
||||||
|
|
||||||
|
``curl --header "Authorization: Bearer <access_token>" <the_rest_of_your_API_request>``
|
||||||
|
|
||||||
|
Fore more details, please refer to the complete `matrix spec documentation <https://matrix.org/docs/spec/client_server/r0.5.0#using-access-tokens>`_.
|
||||||
|
|||||||
@@ -1,42 +0,0 @@
|
|||||||
# Account validity API
|
|
||||||
|
|
||||||
This API allows a server administrator to manage the validity of an account. To
|
|
||||||
use it, you must enable the account validity feature (under
|
|
||||||
`account_validity`) in Synapse's configuration.
|
|
||||||
|
|
||||||
## Renew account
|
|
||||||
|
|
||||||
This API extends the validity of an account by as much time as configured in the
|
|
||||||
`period` parameter from the `account_validity` configuration.
|
|
||||||
|
|
||||||
The API is:
|
|
||||||
|
|
||||||
```
|
|
||||||
POST /_synapse/admin/v1/account_validity/validity
|
|
||||||
```
|
|
||||||
|
|
||||||
with the following body:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"user_id": "<user ID for the account to renew>",
|
|
||||||
"expiration_ts": 0,
|
|
||||||
"enable_renewal_emails": true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
`expiration_ts` is an optional parameter and overrides the expiration date,
|
|
||||||
which otherwise defaults to now + validity period.
|
|
||||||
|
|
||||||
`enable_renewal_emails` is also an optional parameter and enables/disables
|
|
||||||
sending renewal emails to the user. Defaults to true.
|
|
||||||
|
|
||||||
The API returns with the new expiration date for this account, as a timestamp in
|
|
||||||
milliseconds since epoch:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"expiration_ts": 0
|
|
||||||
}
|
|
||||||
```
|
|
||||||
42
docs/admin_api/account_validity.rst
Normal file
42
docs/admin_api/account_validity.rst
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
Account validity API
|
||||||
|
====================
|
||||||
|
|
||||||
|
This API allows a server administrator to manage the validity of an account. To
|
||||||
|
use it, you must enable the account validity feature (under
|
||||||
|
``account_validity``) in Synapse's configuration.
|
||||||
|
|
||||||
|
Renew account
|
||||||
|
-------------
|
||||||
|
|
||||||
|
This API extends the validity of an account by as much time as configured in the
|
||||||
|
``period`` parameter from the ``account_validity`` configuration.
|
||||||
|
|
||||||
|
The API is::
|
||||||
|
|
||||||
|
POST /_synapse/admin/v1/account_validity/validity
|
||||||
|
|
||||||
|
with the following body:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"user_id": "<user ID for the account to renew>",
|
||||||
|
"expiration_ts": 0,
|
||||||
|
"enable_renewal_emails": true
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
``expiration_ts`` is an optional parameter and overrides the expiration date,
|
||||||
|
which otherwise defaults to now + validity period.
|
||||||
|
|
||||||
|
``enable_renewal_emails`` is also an optional parameter and enables/disables
|
||||||
|
sending renewal emails to the user. Defaults to true.
|
||||||
|
|
||||||
|
The API returns with the new expiration date for this account, as a timestamp in
|
||||||
|
milliseconds since epoch:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"expiration_ts": 0
|
||||||
|
}
|
||||||
@@ -11,4 +11,4 @@ POST /_synapse/admin/v1/delete_group/<group_id>
|
|||||||
```
|
```
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: see [Admin API](../usage/administration/admin_api).
|
server admin: see [README.rst](README.rst).
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ The api is:
|
|||||||
GET /_synapse/admin/v1/event_reports?from=0&limit=10
|
GET /_synapse/admin/v1/event_reports?from=0&limit=10
|
||||||
```
|
```
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: see [Admin API](../usage/administration/admin_api).
|
server admin: see [README.rst](README.rst).
|
||||||
|
|
||||||
It returns a JSON body like the following:
|
It returns a JSON body like the following:
|
||||||
|
|
||||||
@@ -75,9 +75,9 @@ The following fields are returned in the JSON response body:
|
|||||||
* `name`: string - The name of the room.
|
* `name`: string - The name of the room.
|
||||||
* `event_id`: string - The ID of the reported event.
|
* `event_id`: string - The ID of the reported event.
|
||||||
* `user_id`: string - This is the user who reported the event and wrote the reason.
|
* `user_id`: string - This is the user who reported the event and wrote the reason.
|
||||||
* `reason`: string - Comment made by the `user_id` in this report. May be blank or `null`.
|
* `reason`: string - Comment made by the `user_id` in this report. May be blank.
|
||||||
* `score`: integer - Content is reported based upon a negative score, where -100 is
|
* `score`: integer - Content is reported based upon a negative score, where -100 is
|
||||||
"most offensive" and 0 is "inoffensive". May be `null`.
|
"most offensive" and 0 is "inoffensive".
|
||||||
* `sender`: string - This is the ID of the user who sent the original message/event that
|
* `sender`: string - This is the ID of the user who sent the original message/event that
|
||||||
was reported.
|
was reported.
|
||||||
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
|
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
|
||||||
@@ -95,7 +95,7 @@ The api is:
|
|||||||
GET /_synapse/admin/v1/event_reports/<report_id>
|
GET /_synapse/admin/v1/event_reports/<report_id>
|
||||||
```
|
```
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: see [Admin API](../usage/administration/admin_api).
|
server admin: see [README.rst](README.rst).
|
||||||
|
|
||||||
It returns a JSON body like the following:
|
It returns a JSON body like the following:
|
||||||
|
|
||||||
|
|||||||
@@ -1,24 +1,16 @@
|
|||||||
# Contents
|
# Contents
|
||||||
- [Querying media](#querying-media)
|
- [List all media in a room](#list-all-media-in-a-room)
|
||||||
* [List all media in a room](#list-all-media-in-a-room)
|
|
||||||
* [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
|
|
||||||
- [Quarantine media](#quarantine-media)
|
- [Quarantine media](#quarantine-media)
|
||||||
* [Quarantining media by ID](#quarantining-media-by-id)
|
* [Quarantining media by ID](#quarantining-media-by-id)
|
||||||
* [Remove media from quarantine by ID](#remove-media-from-quarantine-by-id)
|
|
||||||
* [Quarantining media in a room](#quarantining-media-in-a-room)
|
* [Quarantining media in a room](#quarantining-media-in-a-room)
|
||||||
* [Quarantining all media of a user](#quarantining-all-media-of-a-user)
|
* [Quarantining all media of a user](#quarantining-all-media-of-a-user)
|
||||||
* [Protecting media from being quarantined](#protecting-media-from-being-quarantined)
|
* [Protecting media from being quarantined](#protecting-media-from-being-quarantined)
|
||||||
* [Unprotecting media from being quarantined](#unprotecting-media-from-being-quarantined)
|
|
||||||
- [Delete local media](#delete-local-media)
|
- [Delete local media](#delete-local-media)
|
||||||
* [Delete a specific local media](#delete-a-specific-local-media)
|
* [Delete a specific local media](#delete-a-specific-local-media)
|
||||||
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
|
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
|
||||||
- [Purge Remote Media API](#purge-remote-media-api)
|
- [Purge Remote Media API](#purge-remote-media-api)
|
||||||
|
|
||||||
# Querying media
|
# List all media in a room
|
||||||
|
|
||||||
These APIs allow extracting media information from the homeserver.
|
|
||||||
|
|
||||||
## List all media in a room
|
|
||||||
|
|
||||||
This API gets a list of known media in a room.
|
This API gets a list of known media in a room.
|
||||||
However, it only shows media from unencrypted events or rooms.
|
However, it only shows media from unencrypted events or rooms.
|
||||||
@@ -28,7 +20,7 @@ The API is:
|
|||||||
GET /_synapse/admin/v1/room/<room_id>/media
|
GET /_synapse/admin/v1/room/<room_id>/media
|
||||||
```
|
```
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: see [Admin API](../usage/administration/admin_api).
|
server admin: see [README.rst](README.rst).
|
||||||
|
|
||||||
The API returns a JSON body like the following:
|
The API returns a JSON body like the following:
|
||||||
```json
|
```json
|
||||||
@@ -44,12 +36,6 @@ The API returns a JSON body like the following:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## List all media uploaded by a user
|
|
||||||
|
|
||||||
Listing all media that has been uploaded by a local user can be achieved through
|
|
||||||
the use of the [List media of a user](user_admin_api.md#list-media-of-a-user)
|
|
||||||
Admin API.
|
|
||||||
|
|
||||||
# Quarantine media
|
# Quarantine media
|
||||||
|
|
||||||
Quarantining media means that it is marked as inaccessible by users. It applies
|
Quarantining media means that it is marked as inaccessible by users. It applies
|
||||||
@@ -78,27 +64,6 @@ Response:
|
|||||||
{}
|
{}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Remove media from quarantine by ID
|
|
||||||
|
|
||||||
This API removes a single piece of local or remote media from quarantine.
|
|
||||||
|
|
||||||
Request:
|
|
||||||
|
|
||||||
```
|
|
||||||
POST /_synapse/admin/v1/media/unquarantine/<server_name>/<media_id>
|
|
||||||
|
|
||||||
{}
|
|
||||||
```
|
|
||||||
|
|
||||||
Where `server_name` is in the form of `example.org`, and `media_id` is in the
|
|
||||||
form of `abcdefg12345...`.
|
|
||||||
|
|
||||||
Response:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Quarantining media in a room
|
## Quarantining media in a room
|
||||||
|
|
||||||
This API quarantines all local and remote media in a room.
|
This API quarantines all local and remote media in a room.
|
||||||
@@ -182,26 +147,6 @@ Response:
|
|||||||
{}
|
{}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Unprotecting media from being quarantined
|
|
||||||
|
|
||||||
This API reverts the protection of a media.
|
|
||||||
|
|
||||||
Request:
|
|
||||||
|
|
||||||
```
|
|
||||||
POST /_synapse/admin/v1/media/unprotect/<media_id>
|
|
||||||
|
|
||||||
{}
|
|
||||||
```
|
|
||||||
|
|
||||||
Where `media_id` is in the form of `abcdefg12345...`.
|
|
||||||
|
|
||||||
Response:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{}
|
|
||||||
```
|
|
||||||
|
|
||||||
# Delete local media
|
# Delete local media
|
||||||
This API deletes the *local* media from the disk of your own server.
|
This API deletes the *local* media from the disk of your own server.
|
||||||
This includes any local thumbnails and copies of media downloaded from
|
This includes any local thumbnails and copies of media downloaded from
|
||||||
@@ -257,7 +202,7 @@ URL Parameters
|
|||||||
* `server_name`: string - The name of your local server (e.g `matrix.org`).
|
* `server_name`: string - The name of your local server (e.g `matrix.org`).
|
||||||
* `before_ts`: string representing a positive integer - Unix timestamp in ms.
|
* `before_ts`: string representing a positive integer - Unix timestamp in ms.
|
||||||
Files that were last used before this timestamp will be deleted. It is the timestamp of
|
Files that were last used before this timestamp will be deleted. It is the timestamp of
|
||||||
last access and not the timestamp creation.
|
last access and not the timestamp creation.
|
||||||
* `size_gt`: Optional - string representing a positive integer - Size of the media in bytes.
|
* `size_gt`: Optional - string representing a positive integer - Size of the media in bytes.
|
||||||
Files that are larger will be deleted. Defaults to `0`.
|
Files that are larger will be deleted. Defaults to `0`.
|
||||||
* `keep_profiles`: Optional - string representing a boolean - Switch to also delete files
|
* `keep_profiles`: Optional - string representing a boolean - Switch to also delete files
|
||||||
@@ -311,7 +256,7 @@ The following fields are returned in the JSON response body:
|
|||||||
* `deleted`: integer - The number of media items successfully deleted
|
* `deleted`: integer - The number of media items successfully deleted
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: see [Admin API](../usage/administration/admin_api).
|
server admin: see [README.rst](README.rst).
|
||||||
|
|
||||||
If the user re-requests purged remote media, synapse will re-request the media
|
If the user re-requests purged remote media, synapse will re-request the media
|
||||||
from the originating server.
|
from the originating server.
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
# Purge History API
|
Purge History API
|
||||||
|
=================
|
||||||
|
|
||||||
The purge history API allows server admins to purge historic events from their
|
The purge history API allows server admins to purge historic events from their
|
||||||
database, reclaiming disk space.
|
database, reclaiming disk space.
|
||||||
@@ -12,12 +13,10 @@ delete the last message in a room.
|
|||||||
|
|
||||||
The API is:
|
The API is:
|
||||||
|
|
||||||
```
|
``POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]``
|
||||||
POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]
|
|
||||||
```
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||||
server admin: [Admin API](../usage/administration/admin_api)
|
server admin: see `README.rst <README.rst>`_.
|
||||||
|
|
||||||
By default, events sent by local users are not deleted, as they may represent
|
By default, events sent by local users are not deleted, as they may represent
|
||||||
the only copies of this content in existence. (Events sent by remote users are
|
the only copies of this content in existence. (Events sent by remote users are
|
||||||
@@ -25,54 +24,54 @@ deleted.)
|
|||||||
|
|
||||||
Room state data (such as joins, leaves, topic) is always preserved.
|
Room state data (such as joins, leaves, topic) is always preserved.
|
||||||
|
|
||||||
To delete local message events as well, set `delete_local_events` in the body:
|
To delete local message events as well, set ``delete_local_events`` in the body:
|
||||||
|
|
||||||
```
|
.. code:: json
|
||||||
{
|
|
||||||
"delete_local_events": true
|
{
|
||||||
}
|
"delete_local_events": true
|
||||||
```
|
}
|
||||||
|
|
||||||
The caller must specify the point in the room to purge up to. This can be
|
The caller must specify the point in the room to purge up to. This can be
|
||||||
specified by including an event_id in the URI, or by setting a
|
specified by including an event_id in the URI, or by setting a
|
||||||
`purge_up_to_event_id` or `purge_up_to_ts` in the request body. If an event
|
``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event
|
||||||
id is given, that event (and others at the same graph depth) will be retained.
|
id is given, that event (and others at the same graph depth) will be retained.
|
||||||
If `purge_up_to_ts` is given, it should be a timestamp since the unix epoch,
|
If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch,
|
||||||
in milliseconds.
|
in milliseconds.
|
||||||
|
|
||||||
The API starts the purge running, and returns immediately with a JSON body with
|
The API starts the purge running, and returns immediately with a JSON body with
|
||||||
a purge id:
|
a purge id:
|
||||||
|
|
||||||
```json
|
.. code:: json
|
||||||
{
|
|
||||||
"purge_id": "<opaque id>"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Purge status query
|
{
|
||||||
|
"purge_id": "<opaque id>"
|
||||||
|
}
|
||||||
|
|
||||||
|
Purge status query
|
||||||
|
------------------
|
||||||
|
|
||||||
It is possible to poll for updates on recent purges with a second API;
|
It is possible to poll for updates on recent purges with a second API;
|
||||||
|
|
||||||
```
|
``GET /_synapse/admin/v1/purge_history_status/<purge_id>``
|
||||||
GET /_synapse/admin/v1/purge_history_status/<purge_id>
|
|
||||||
```
|
|
||||||
|
|
||||||
Again, you will need to authenticate by providing an `access_token` for a
|
Again, you will need to authenticate by providing an ``access_token`` for a
|
||||||
server admin.
|
server admin.
|
||||||
|
|
||||||
This API returns a JSON body like the following:
|
This API returns a JSON body like the following:
|
||||||
|
|
||||||
```json
|
.. code:: json
|
||||||
{
|
|
||||||
"status": "active"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The status will be one of `active`, `complete`, or `failed`.
|
{
|
||||||
|
"status": "active"
|
||||||
|
}
|
||||||
|
|
||||||
## Reclaim disk space (Postgres)
|
The status will be one of ``active``, ``complete``, or ``failed``.
|
||||||
|
|
||||||
|
Reclaim disk space (Postgres)
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
To reclaim the disk space and return it to the operating system, you need to run
|
To reclaim the disk space and return it to the operating system, you need to run
|
||||||
`VACUUM FULL;` on the database.
|
`VACUUM FULL;` on the database.
|
||||||
|
|
||||||
<https://www.postgresql.org/docs/current/sql-vacuum.html>
|
https://www.postgresql.org/docs/current/sql-vacuum.html
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
# Shared-Secret Registration
|
|
||||||
|
|
||||||
This API allows for the creation of users in an administrative and
|
|
||||||
non-interactive way. This is generally used for bootstrapping a Synapse
|
|
||||||
instance with administrator accounts.
|
|
||||||
|
|
||||||
To authenticate yourself to the server, you will need both the shared secret
|
|
||||||
(`registration_shared_secret` in the homeserver configuration), and a
|
|
||||||
one-time nonce. If the registration shared secret is not configured, this API
|
|
||||||
is not enabled.
|
|
||||||
|
|
||||||
To fetch the nonce, you need to request one from the API:
|
|
||||||
|
|
||||||
```
|
|
||||||
> GET /_synapse/admin/v1/register
|
|
||||||
|
|
||||||
< {"nonce": "thisisanonce"}
|
|
||||||
```
|
|
||||||
|
|
||||||
Once you have the nonce, you can make a `POST` to the same URL with a JSON
|
|
||||||
body containing the nonce, username, password, whether they are an admin
|
|
||||||
(optional, False by default), and a HMAC digest of the content. Also you can
|
|
||||||
set the displayname (optional, `username` by default).
|
|
||||||
|
|
||||||
As an example:
|
|
||||||
|
|
||||||
```
|
|
||||||
> POST /_synapse/admin/v1/register
|
|
||||||
> {
|
|
||||||
"nonce": "thisisanonce",
|
|
||||||
"username": "pepper_roni",
|
|
||||||
"displayname": "Pepper Roni",
|
|
||||||
"password": "pizza",
|
|
||||||
"admin": true,
|
|
||||||
"mac": "mac_digest_here"
|
|
||||||
}
|
|
||||||
|
|
||||||
< {
|
|
||||||
"access_token": "token_here",
|
|
||||||
"user_id": "@pepper_roni:localhost",
|
|
||||||
"home_server": "test",
|
|
||||||
"device_id": "device_id_here"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
|
|
||||||
the shared secret and the content being the nonce, user, password, either the
|
|
||||||
string "admin" or "notadmin", and optionally the user_type
|
|
||||||
each separated by NULs. For an example of generation in Python:
|
|
||||||
|
|
||||||
```python
|
|
||||||
import hmac, hashlib
|
|
||||||
|
|
||||||
def generate_mac(nonce, user, password, admin=False, user_type=None):
|
|
||||||
|
|
||||||
mac = hmac.new(
|
|
||||||
key=shared_secret,
|
|
||||||
digestmod=hashlib.sha1,
|
|
||||||
)
|
|
||||||
|
|
||||||
mac.update(nonce.encode('utf8'))
|
|
||||||
mac.update(b"\x00")
|
|
||||||
mac.update(user.encode('utf8'))
|
|
||||||
mac.update(b"\x00")
|
|
||||||
mac.update(password.encode('utf8'))
|
|
||||||
mac.update(b"\x00")
|
|
||||||
mac.update(b"admin" if admin else b"notadmin")
|
|
||||||
if user_type:
|
|
||||||
mac.update(b"\x00")
|
|
||||||
mac.update(user_type.encode('utf8'))
|
|
||||||
|
|
||||||
return mac.hexdigest()
|
|
||||||
```
|
|
||||||
68
docs/admin_api/register_api.rst
Normal file
68
docs/admin_api/register_api.rst
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
Shared-Secret Registration
|
||||||
|
==========================
|
||||||
|
|
||||||
|
This API allows for the creation of users in an administrative and
|
||||||
|
non-interactive way. This is generally used for bootstrapping a Synapse
|
||||||
|
instance with administrator accounts.
|
||||||
|
|
||||||
|
To authenticate yourself to the server, you will need both the shared secret
|
||||||
|
(``registration_shared_secret`` in the homeserver configuration), and a
|
||||||
|
one-time nonce. If the registration shared secret is not configured, this API
|
||||||
|
is not enabled.
|
||||||
|
|
||||||
|
To fetch the nonce, you need to request one from the API::
|
||||||
|
|
||||||
|
> GET /_synapse/admin/v1/register
|
||||||
|
|
||||||
|
< {"nonce": "thisisanonce"}
|
||||||
|
|
||||||
|
Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
|
||||||
|
body containing the nonce, username, password, whether they are an admin
|
||||||
|
(optional, False by default), and a HMAC digest of the content. Also you can
|
||||||
|
set the displayname (optional, ``username`` by default).
|
||||||
|
|
||||||
|
As an example::
|
||||||
|
|
||||||
|
> POST /_synapse/admin/v1/register
|
||||||
|
> {
|
||||||
|
"nonce": "thisisanonce",
|
||||||
|
"username": "pepper_roni",
|
||||||
|
"displayname": "Pepper Roni",
|
||||||
|
"password": "pizza",
|
||||||
|
"admin": true,
|
||||||
|
"mac": "mac_digest_here"
|
||||||
|
}
|
||||||
|
|
||||||
|
< {
|
||||||
|
"access_token": "token_here",
|
||||||
|
"user_id": "@pepper_roni:localhost",
|
||||||
|
"home_server": "test",
|
||||||
|
"device_id": "device_id_here"
|
||||||
|
}
|
||||||
|
|
||||||
|
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
|
||||||
|
the shared secret and the content being the nonce, user, password, either the
|
||||||
|
string "admin" or "notadmin", and optionally the user_type
|
||||||
|
each separated by NULs. For an example of generation in Python::
|
||||||
|
|
||||||
|
import hmac, hashlib
|
||||||
|
|
||||||
|
def generate_mac(nonce, user, password, admin=False, user_type=None):
|
||||||
|
|
||||||
|
mac = hmac.new(
|
||||||
|
key=shared_secret,
|
||||||
|
digestmod=hashlib.sha1,
|
||||||
|
)
|
||||||
|
|
||||||
|
mac.update(nonce.encode('utf8'))
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(user.encode('utf8'))
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(password.encode('utf8'))
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(b"admin" if admin else b"notadmin")
|
||||||
|
if user_type:
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(user_type.encode('utf8'))
|
||||||
|
|
||||||
|
return mac.hexdigest()
|
||||||
@@ -24,7 +24,7 @@ POST /_synapse/admin/v1/join/<room_id_or_alias>
|
|||||||
```
|
```
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: see [Admin API](../usage/administration/admin_api).
|
server admin: see [README.rst](README.rst).
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
# Contents
|
# Contents
|
||||||
- [List Room API](#list-room-api)
|
- [List Room API](#list-room-api)
|
||||||
|
* [Parameters](#parameters)
|
||||||
|
* [Usage](#usage)
|
||||||
- [Room Details API](#room-details-api)
|
- [Room Details API](#room-details-api)
|
||||||
- [Room Members API](#room-members-api)
|
- [Room Members API](#room-members-api)
|
||||||
- [Room State API](#room-state-api)
|
|
||||||
- [Delete Room API](#delete-room-api)
|
- [Delete Room API](#delete-room-api)
|
||||||
|
* [Parameters](#parameters-1)
|
||||||
|
* [Response](#response)
|
||||||
* [Undoing room shutdowns](#undoing-room-shutdowns)
|
* [Undoing room shutdowns](#undoing-room-shutdowns)
|
||||||
- [Make Room Admin API](#make-room-admin-api)
|
- [Make Room Admin API](#make-room-admin-api)
|
||||||
- [Forward Extremities Admin API](#forward-extremities-admin-api)
|
|
||||||
- [Event Context API](#event-context-api)
|
|
||||||
|
|
||||||
# List Room API
|
# List Room API
|
||||||
|
|
||||||
@@ -15,7 +16,7 @@ The List Room admin API allows server admins to get a list of rooms on their
|
|||||||
server. There are various parameters available that allow for filtering and
|
server. There are various parameters available that allow for filtering and
|
||||||
sorting the returned list. This API supports pagination.
|
sorting the returned list. This API supports pagination.
|
||||||
|
|
||||||
**Parameters**
|
## Parameters
|
||||||
|
|
||||||
The following query parameters are available:
|
The following query parameters are available:
|
||||||
|
|
||||||
@@ -42,8 +43,6 @@ The following query parameters are available:
|
|||||||
* `search_term` - Filter rooms by their room name. Search term can be contained in any
|
* `search_term` - Filter rooms by their room name. Search term can be contained in any
|
||||||
part of the room name. Defaults to no filtering.
|
part of the room name. Defaults to no filtering.
|
||||||
|
|
||||||
**Response**
|
|
||||||
|
|
||||||
The following fields are possible in the JSON response body:
|
The following fields are possible in the JSON response body:
|
||||||
|
|
||||||
* `rooms` - An array of objects, each containing information about a room.
|
* `rooms` - An array of objects, each containing information about a room.
|
||||||
@@ -77,15 +76,17 @@ The following fields are possible in the JSON response body:
|
|||||||
Use `prev_batch` for the `from` value in the next request to
|
Use `prev_batch` for the `from` value in the next request to
|
||||||
get the "previous page" of results.
|
get the "previous page" of results.
|
||||||
|
|
||||||
The API is:
|
## Usage
|
||||||
|
|
||||||
A standard request with no filtering:
|
A standard request with no filtering:
|
||||||
|
|
||||||
```
|
```
|
||||||
GET /_synapse/admin/v1/rooms
|
GET /_synapse/admin/v1/rooms
|
||||||
|
|
||||||
|
{}
|
||||||
```
|
```
|
||||||
|
|
||||||
A response body like the following is returned:
|
Response:
|
||||||
|
|
||||||
```jsonc
|
```jsonc
|
||||||
{
|
{
|
||||||
@@ -133,9 +134,11 @@ Filtering by room name:
|
|||||||
|
|
||||||
```
|
```
|
||||||
GET /_synapse/admin/v1/rooms?search_term=TWIM
|
GET /_synapse/admin/v1/rooms?search_term=TWIM
|
||||||
|
|
||||||
|
{}
|
||||||
```
|
```
|
||||||
|
|
||||||
A response body like the following is returned:
|
Response:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
@@ -166,9 +169,11 @@ Paginating through a list of rooms:
|
|||||||
|
|
||||||
```
|
```
|
||||||
GET /_synapse/admin/v1/rooms?order_by=size
|
GET /_synapse/admin/v1/rooms?order_by=size
|
||||||
|
|
||||||
|
{}
|
||||||
```
|
```
|
||||||
|
|
||||||
A response body like the following is returned:
|
Response:
|
||||||
|
|
||||||
```jsonc
|
```jsonc
|
||||||
{
|
{
|
||||||
@@ -220,9 +225,11 @@ parameter to the value of `next_token`.
|
|||||||
|
|
||||||
```
|
```
|
||||||
GET /_synapse/admin/v1/rooms?order_by=size&from=100
|
GET /_synapse/admin/v1/rooms?order_by=size&from=100
|
||||||
|
|
||||||
|
{}
|
||||||
```
|
```
|
||||||
|
|
||||||
A response body like the following is returned:
|
Response:
|
||||||
|
|
||||||
```jsonc
|
```jsonc
|
||||||
{
|
{
|
||||||
@@ -294,13 +301,17 @@ The following fields are possible in the JSON response body:
|
|||||||
* `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
|
* `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
|
||||||
* `state_events` - Total number of state_events of a room. Complexity of the room.
|
* `state_events` - Total number of state_events of a room. Complexity of the room.
|
||||||
|
|
||||||
The API is:
|
## Usage
|
||||||
|
|
||||||
|
A standard request:
|
||||||
|
|
||||||
```
|
```
|
||||||
GET /_synapse/admin/v1/rooms/<room_id>
|
GET /_synapse/admin/v1/rooms/<room_id>
|
||||||
|
|
||||||
|
{}
|
||||||
```
|
```
|
||||||
|
|
||||||
A response body like the following is returned:
|
Response:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
@@ -333,13 +344,17 @@ The response includes the following fields:
|
|||||||
* `members` - A list of all the members that are present in the room, represented by their ids.
|
* `members` - A list of all the members that are present in the room, represented by their ids.
|
||||||
* `total` - Total number of members in the room.
|
* `total` - Total number of members in the room.
|
||||||
|
|
||||||
The API is:
|
## Usage
|
||||||
|
|
||||||
|
A standard request:
|
||||||
|
|
||||||
```
|
```
|
||||||
GET /_synapse/admin/v1/rooms/<room_id>/members
|
GET /_synapse/admin/v1/rooms/<room_id>/members
|
||||||
|
|
||||||
|
{}
|
||||||
```
|
```
|
||||||
|
|
||||||
A response body like the following is returned:
|
Response:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
@@ -352,32 +367,6 @@ A response body like the following is returned:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
# Room State API
|
|
||||||
|
|
||||||
The Room State admin API allows server admins to get a list of all state events in a room.
|
|
||||||
|
|
||||||
The response includes the following fields:
|
|
||||||
|
|
||||||
* `state` - The current state of the room at the time of request.
|
|
||||||
|
|
||||||
The API is:
|
|
||||||
|
|
||||||
```
|
|
||||||
GET /_synapse/admin/v1/rooms/<room_id>/state
|
|
||||||
```
|
|
||||||
|
|
||||||
A response body like the following is returned:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"state": [
|
|
||||||
{"type": "m.room.create", "state_key": "", "etc": true},
|
|
||||||
{"type": "m.room.power_levels", "state_key": "", "etc": true},
|
|
||||||
{"type": "m.room.name", "state_key": "", "etc": true}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
# Delete Room API
|
# Delete Room API
|
||||||
|
|
||||||
The Delete Room admin API allows server admins to remove rooms from server
|
The Delete Room admin API allows server admins to remove rooms from server
|
||||||
@@ -406,11 +395,10 @@ the new room. Users on other servers will be unaffected.
|
|||||||
The API is:
|
The API is:
|
||||||
|
|
||||||
```
|
```
|
||||||
DELETE /_synapse/admin/v1/rooms/<room_id>
|
POST /_synapse/admin/v1/rooms/<room_id>/delete
|
||||||
```
|
```
|
||||||
|
|
||||||
with a body of:
|
with a body of:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"new_room_user_id": "@someuser:example.com",
|
"new_room_user_id": "@someuser:example.com",
|
||||||
@@ -422,7 +410,7 @@ with a body of:
|
|||||||
```
|
```
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||||
server admin: see [Admin API](../usage/administration/admin_api).
|
server admin: see [README.rst](README.rst).
|
||||||
|
|
||||||
A response body like the following is returned:
|
A response body like the following is returned:
|
||||||
|
|
||||||
@@ -440,7 +428,7 @@ A response body like the following is returned:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Parameters**
|
## Parameters
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
The following parameters should be set in the URL:
|
||||||
|
|
||||||
@@ -470,7 +458,7 @@ The following JSON body parameters are available:
|
|||||||
|
|
||||||
The JSON body must not be empty. The body must be at least `{}`.
|
The JSON body must not be empty. The body must be at least `{}`.
|
||||||
|
|
||||||
**Response**
|
## Response
|
||||||
|
|
||||||
The following fields are returned in the JSON response body:
|
The following fields are returned in the JSON response body:
|
||||||
|
|
||||||
@@ -508,15 +496,6 @@ You will have to manually handle, if you so choose, the following:
|
|||||||
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
|
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
|
||||||
* Removal of the Content Violation room if desired.
|
* Removal of the Content Violation room if desired.
|
||||||
|
|
||||||
## Deprecated endpoint
|
|
||||||
|
|
||||||
The previous deprecated API will be removed in a future release, it was:
|
|
||||||
|
|
||||||
```
|
|
||||||
POST /_synapse/admin/v1/rooms/<room_id>/delete
|
|
||||||
```
|
|
||||||
|
|
||||||
It behaves the same way than the current endpoint except the path and the method.
|
|
||||||
|
|
||||||
# Make Room Admin API
|
# Make Room Admin API
|
||||||
|
|
||||||
@@ -527,178 +506,8 @@ By default the server admin (the caller) is granted power, but another user can
|
|||||||
optionally be specified, e.g.:
|
optionally be specified, e.g.:
|
||||||
|
|
||||||
```
|
```
|
||||||
POST /_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
|
POST /_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
|
||||||
{
|
|
||||||
"user_id": "@foo:example.com"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
# Forward Extremities Admin API
|
|
||||||
|
|
||||||
Enables querying and deleting forward extremities from rooms. When a lot of forward
|
|
||||||
extremities accumulate in a room, performance can become degraded. For details, see
|
|
||||||
[#1760](https://github.com/matrix-org/synapse/issues/1760).
|
|
||||||
|
|
||||||
## Check for forward extremities
|
|
||||||
|
|
||||||
To check the status of forward extremities for a room:
|
|
||||||
|
|
||||||
```
|
|
||||||
GET /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
|
|
||||||
```
|
|
||||||
|
|
||||||
A response as follows will be returned:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"count": 1,
|
|
||||||
"results": [
|
|
||||||
{
|
{
|
||||||
"event_id": "$M5SP266vsnxctfwFgFLNceaCo3ujhRtg_NiiHabcdefgh",
|
"user_id": "@foo:example.com"
|
||||||
"state_group": 439,
|
|
||||||
"depth": 123,
|
|
||||||
"received_ts": 1611263016761
|
|
||||||
}
|
}
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Deleting forward extremities
|
|
||||||
|
|
||||||
**WARNING**: Please ensure you know what you're doing and have read
|
|
||||||
the related issue [#1760](https://github.com/matrix-org/synapse/issues/1760).
|
|
||||||
Under no situations should this API be executed as an automated maintenance task!
|
|
||||||
|
|
||||||
If a room has lots of forward extremities, the extra can be
|
|
||||||
deleted as follows:
|
|
||||||
|
|
||||||
```
|
|
||||||
DELETE /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
|
|
||||||
```
|
|
||||||
|
|
||||||
A response as follows will be returned, indicating the amount of forward extremities
|
|
||||||
that were deleted.
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"deleted": 1
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
# Event Context API
|
|
||||||
|
|
||||||
This API lets a client find the context of an event. This is designed primarily to investigate abuse reports.
|
|
||||||
|
|
||||||
```
|
|
||||||
GET /_synapse/admin/v1/rooms/<room_id>/context/<event_id>
|
|
||||||
```
|
|
||||||
|
|
||||||
This API mimmicks [GET /_matrix/client/r0/rooms/{roomId}/context/{eventId}](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-rooms-roomid-context-eventid). Please refer to the link for all details on parameters and reseponse.
|
|
||||||
|
|
||||||
Example response:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"end": "t29-57_2_0_2",
|
|
||||||
"events_after": [
|
|
||||||
{
|
|
||||||
"content": {
|
|
||||||
"body": "This is an example text message",
|
|
||||||
"msgtype": "m.text",
|
|
||||||
"format": "org.matrix.custom.html",
|
|
||||||
"formatted_body": "<b>This is an example text message</b>"
|
|
||||||
},
|
|
||||||
"type": "m.room.message",
|
|
||||||
"event_id": "$143273582443PhrSn:example.org",
|
|
||||||
"room_id": "!636q39766251:example.com",
|
|
||||||
"sender": "@example:example.org",
|
|
||||||
"origin_server_ts": 1432735824653,
|
|
||||||
"unsigned": {
|
|
||||||
"age": 1234
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"event": {
|
|
||||||
"content": {
|
|
||||||
"body": "filename.jpg",
|
|
||||||
"info": {
|
|
||||||
"h": 398,
|
|
||||||
"w": 394,
|
|
||||||
"mimetype": "image/jpeg",
|
|
||||||
"size": 31037
|
|
||||||
},
|
|
||||||
"url": "mxc://example.org/JWEIFJgwEIhweiWJE",
|
|
||||||
"msgtype": "m.image"
|
|
||||||
},
|
|
||||||
"type": "m.room.message",
|
|
||||||
"event_id": "$f3h4d129462ha:example.com",
|
|
||||||
"room_id": "!636q39766251:example.com",
|
|
||||||
"sender": "@example:example.org",
|
|
||||||
"origin_server_ts": 1432735824653,
|
|
||||||
"unsigned": {
|
|
||||||
"age": 1234
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"events_before": [
|
|
||||||
{
|
|
||||||
"content": {
|
|
||||||
"body": "something-important.doc",
|
|
||||||
"filename": "something-important.doc",
|
|
||||||
"info": {
|
|
||||||
"mimetype": "application/msword",
|
|
||||||
"size": 46144
|
|
||||||
},
|
|
||||||
"msgtype": "m.file",
|
|
||||||
"url": "mxc://example.org/FHyPlCeYUSFFxlgbQYZmoEoe"
|
|
||||||
},
|
|
||||||
"type": "m.room.message",
|
|
||||||
"event_id": "$143273582443PhrSn:example.org",
|
|
||||||
"room_id": "!636q39766251:example.com",
|
|
||||||
"sender": "@example:example.org",
|
|
||||||
"origin_server_ts": 1432735824653,
|
|
||||||
"unsigned": {
|
|
||||||
"age": 1234
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"start": "t27-54_2_0_2",
|
|
||||||
"state": [
|
|
||||||
{
|
|
||||||
"content": {
|
|
||||||
"creator": "@example:example.org",
|
|
||||||
"room_version": "1",
|
|
||||||
"m.federate": true,
|
|
||||||
"predecessor": {
|
|
||||||
"event_id": "$something:example.org",
|
|
||||||
"room_id": "!oldroom:example.org"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"type": "m.room.create",
|
|
||||||
"event_id": "$143273582443PhrSn:example.org",
|
|
||||||
"room_id": "!636q39766251:example.com",
|
|
||||||
"sender": "@example:example.org",
|
|
||||||
"origin_server_ts": 1432735824653,
|
|
||||||
"unsigned": {
|
|
||||||
"age": 1234
|
|
||||||
},
|
|
||||||
"state_key": ""
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"content": {
|
|
||||||
"membership": "join",
|
|
||||||
"avatar_url": "mxc://example.org/SEsfnsuifSDFSSEF",
|
|
||||||
"displayname": "Alice Margatroid"
|
|
||||||
},
|
|
||||||
"type": "m.room.member",
|
|
||||||
"event_id": "$143273582443PhrSn:example.org",
|
|
||||||
"room_id": "!636q39766251:example.com",
|
|
||||||
"sender": "@example:example.org",
|
|
||||||
"origin_server_ts": 1432735824653,
|
|
||||||
"unsigned": {
|
|
||||||
"age": 1234
|
|
||||||
},
|
|
||||||
"state_key": "@alice:example.org"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -45,4 +45,4 @@ Once the notice has been sent, the API will return the following response:
|
|||||||
```
|
```
|
||||||
|
|
||||||
Note that server notices must be enabled in `homeserver.yaml` before this API
|
Note that server notices must be enabled in `homeserver.yaml` before this API
|
||||||
can be used. See [the server notices documentation](../server_notices.md) for more information.
|
can be used. See [server_notices.md](../server_notices.md) for more information.
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user