mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
1 Commits
v0.17.0-rc
...
initial_sy
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8dbb624b3 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -42,7 +42,3 @@ build/
|
||||
|
||||
localhost-800*/
|
||||
static/client/register/register_config.js
|
||||
.tox
|
||||
|
||||
env/
|
||||
*.config
|
||||
|
||||
62
AUTHORS.rst
62
AUTHORS.rst
@@ -1,62 +0,0 @@
|
||||
Erik Johnston <erik at matrix.org>
|
||||
* HS core
|
||||
* Federation API impl
|
||||
|
||||
Mark Haines <mark at matrix.org>
|
||||
* HS core
|
||||
* Crypto
|
||||
* Content repository
|
||||
* CS v2 API impl
|
||||
|
||||
Kegan Dougal <kegan at matrix.org>
|
||||
* HS core
|
||||
* CS v1 API impl
|
||||
* AS API impl
|
||||
|
||||
Paul "LeoNerd" Evans <paul at matrix.org>
|
||||
* HS core
|
||||
* Presence
|
||||
* Typing Notifications
|
||||
* Performance metrics and caching layer
|
||||
|
||||
Dave Baker <dave at matrix.org>
|
||||
* Push notifications
|
||||
* Auth CS v2 impl
|
||||
|
||||
Matthew Hodgson <matthew at matrix.org>
|
||||
* General doc & housekeeping
|
||||
* Vertobot/vertobridge matrix<->verto PoC
|
||||
|
||||
Emmanuel Rohee <manu at matrix.org>
|
||||
* Supporting iOS clients (testability and fallback registration)
|
||||
|
||||
Turned to Dust <dwinslow86 at gmail.com>
|
||||
* ArchLinux installation instructions
|
||||
|
||||
Brabo <brabo at riseup.net>
|
||||
* Installation instruction fixes
|
||||
|
||||
Ivan Shapovalov <intelfx100 at gmail.com>
|
||||
* contrib/systemd: a sample systemd unit file and a logger configuration
|
||||
|
||||
Eric Myhre <hash at exultant.us>
|
||||
* Fix bug where ``media_store_path`` config option was ignored by v0 content
|
||||
repository API.
|
||||
|
||||
Muthu Subramanian <muthu.subramanian.karunanidhi at ericsson.com>
|
||||
* Add SAML2 support for registration and login.
|
||||
|
||||
Steven Hammerton <steven.hammerton at openmarket.com>
|
||||
* Add CAS support for registration and login.
|
||||
|
||||
Mads Robin Christensen <mads at v42 dot dk>
|
||||
* CentOS 7 installation instructions.
|
||||
|
||||
Florent Violleau <floviolleau at gmail dot com>
|
||||
* Add Raspberry Pi installation instructions and general troubleshooting items
|
||||
|
||||
Niklas Riekenbrauck <nikriek at gmail dot.com>
|
||||
* Add JWT support for registration and login
|
||||
|
||||
Christoph Witzany <christoph at web.crofting.com>
|
||||
* Add LDAP support for authentication
|
||||
841
CHANGES.rst
841
CHANGES.rst
@@ -1,838 +1,3 @@
|
||||
Changes in synapse v0.17.0-rc2 (2016-08-02)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Forbid non-ASes from registering users whose names begin with '_' (PR #958)
|
||||
* Add some basic admin API docs (PR #963)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Send the correct host header when fetching keys (PR #941)
|
||||
* Fix joining a room that has missing auth events (PR #964)
|
||||
* Fix various push bugs (PR #966, #970)
|
||||
* Fix adding emails on registration (PR #968)
|
||||
|
||||
|
||||
Changes in synapse v0.17.0-rc1 (2016-07-28)
|
||||
===========================================
|
||||
|
||||
This release changes the LDAP configuration format in a backwards incompatible
|
||||
way, see PR #843 for details.
|
||||
|
||||
The 0.17 release will contain significant security bug fixes regarding
|
||||
authenticating events received over federation
|
||||
|
||||
|
||||
Features:
|
||||
|
||||
* Add purge_media_cache admin API (PR #902)
|
||||
* Add deactivate account admin API (PR #903)
|
||||
* Add optional pepper to password hashing (PR #907, #910 by KentShikama)
|
||||
* Add an admin option to shared secret registration (breaks backwards compat)
|
||||
(PR #909)
|
||||
* Add purge local room history API (PR #911, #923, #924)
|
||||
* Add requestToken endpoints (PR #915)
|
||||
* Add an /account/deactivate endpoint (PR #921)
|
||||
* Add filter param to /messages. Add 'contains_url' to filter. (PR #922)
|
||||
* Add device_id support to /login (PR #929)
|
||||
* Add device_id support to /v2/register flow. (PR #937, #942)
|
||||
* Add GET /devices endpoint (PR #939, #944)
|
||||
* Add GET /device/{deviceId} (PR #943)
|
||||
* Add update and delete APIs for devices (PR #949)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt)
|
||||
* Linearize some federation endpoints based on (origin, room_id) (PR #879)
|
||||
* Remove the legacy v0 content upload API. (PR #888)
|
||||
* Use similar naming we use in email notifs for push (PR #894)
|
||||
* Optionally include password hash in createUser endpoint (PR #905 by
|
||||
KentShikama)
|
||||
* Use a query that postgresql optimises better for get_events_around (PR #906)
|
||||
* Fall back to 'username' if 'user' is not given for appservice registration.
|
||||
(PR #927 by Half-Shot)
|
||||
* Add metrics for psutil derived memory usage (PR #936)
|
||||
* Record device_id in client_ips (PR #938)
|
||||
* Send the correct host header when fetching keys (PR #941)
|
||||
* Log the hostname the reCAPTCHA was completed on (PR #946)
|
||||
* Make the device id on e2e key upload optional (PR #956)
|
||||
* Add r0.2.0 to the "supported versions" list (PR #960)
|
||||
* Don't include name of room for invites in push (PR #961)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix substitution failure in mail template (PR #887)
|
||||
* Put most recent 20 messages in email notif (PR #892)
|
||||
* Ensure that the guest user is in the database when upgrading accounts
|
||||
(PR #914)
|
||||
* Fix various edge cases in auth handling (PR #919)
|
||||
* Fix 500 ISE when sending alias event without a state_key (PR #925)
|
||||
* Fix bug where we stored rejections in the state_group, persist all
|
||||
rejections (PR #948)
|
||||
* Fix lack of check of if the user is banned when handling 3pid invites
|
||||
(PR #952)
|
||||
* Fix a couple of bugs in the transaction and keyring code (PR #954, #955)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.16.1-r1 (2016-07-08)
|
||||
==========================================
|
||||
|
||||
THIS IS A CRITICAL SECURITY UPDATE.
|
||||
|
||||
This fixes a bug which allowed users' accounts to be accessed by unauthorised
|
||||
users.
|
||||
|
||||
Changes in synapse v0.16.1 (2016-06-20)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix assorted bugs in ``/preview_url`` (PR #872)
|
||||
* Fix TypeError when setting unicode passwords (PR #873)
|
||||
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* Turn ``use_frozen_events`` off by default (PR #877)
|
||||
* Disable responding with canonical json for federation (PR #878)
|
||||
|
||||
|
||||
Changes in synapse v0.16.1-rc1 (2016-06-15)
|
||||
===========================================
|
||||
|
||||
Features: None
|
||||
|
||||
Changes:
|
||||
|
||||
* Log requester for ``/publicRoom`` endpoints when possible (PR #856)
|
||||
* 502 on ``/thumbnail`` when can't connect to remote server (PR #862)
|
||||
* Linearize fetching of gaps on incoming events (PR #871)
|
||||
|
||||
|
||||
Bugs fixes:
|
||||
|
||||
* Fix bug where rooms where marked as published by default (PR #857)
|
||||
* Fix bug where joining room with an event with invalid sender (PR #868)
|
||||
* Fix bug where backfilled events were sent down sync streams (PR #869)
|
||||
* Fix bug where outgoing connections could wedge indefinitely, causing push
|
||||
notifications to be unreliable (PR #870)
|
||||
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* Improve ``/publicRooms`` performance(PR #859)
|
||||
|
||||
|
||||
Changes in synapse v0.16.0 (2016-06-09)
|
||||
=======================================
|
||||
|
||||
NB: As of v0.14 all AS config files must have an ID field.
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Don't make rooms published by default (PR #857)
|
||||
|
||||
Changes in synapse v0.16.0-rc2 (2016-06-08)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add configuration option for tuning GC via ``gc.set_threshold`` (PR #849)
|
||||
|
||||
Changes:
|
||||
|
||||
* Record metrics about GC (PR #771, #847, #852)
|
||||
* Add metric counter for number of persisted events (PR #841)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix 'From' header in email notifications (PR #843)
|
||||
* Fix presence where timeouts were not being fired for the first 8h after
|
||||
restarts (PR #842)
|
||||
* Fix bug where synapse sent malformed transactions to AS's when retrying
|
||||
transactions (Commits 310197b, 8437906)
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* Remove event fetching from DB threads (PR #835)
|
||||
* Change the way we cache events (PR #836)
|
||||
* Add events to cache when we persist them (PR #840)
|
||||
|
||||
|
||||
Changes in synapse v0.16.0-rc1 (2016-06-03)
|
||||
===========================================
|
||||
|
||||
Version 0.15 was not released. See v0.15.0-rc1 below for additional changes.
|
||||
|
||||
Features:
|
||||
|
||||
* Add email notifications for missed messages (PR #759, #786, #799, #810, #815,
|
||||
#821)
|
||||
* Add a ``url_preview_ip_range_whitelist`` config param (PR #760)
|
||||
* Add /report endpoint (PR #762)
|
||||
* Add basic ignore user API (PR #763)
|
||||
* Add an openidish mechanism for proving that you own a given user_id (PR #765)
|
||||
* Allow clients to specify a server_name to avoid 'No known servers' (PR #794)
|
||||
* Add secondary_directory_servers option to fetch room list from other servers
|
||||
(PR #808, #813)
|
||||
|
||||
Changes:
|
||||
|
||||
* Report per request metrics for all of the things using request_handler (PR
|
||||
#756)
|
||||
* Correctly handle ``NULL`` password hashes from the database (PR #775)
|
||||
* Allow receipts for events we haven't seen in the db (PR #784)
|
||||
* Make synctl read a cache factor from config file (PR #785)
|
||||
* Increment badge count per missed convo, not per msg (PR #793)
|
||||
* Special case m.room.third_party_invite event auth to match invites (PR #814)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix typo in event_auth servlet path (PR #757)
|
||||
* Fix password reset (PR #758)
|
||||
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* Reduce database inserts when sending transactions (PR #767)
|
||||
* Queue events by room for persistence (PR #768)
|
||||
* Add cache to ``get_user_by_id`` (PR #772)
|
||||
* Add and use ``get_domain_from_id`` (PR #773)
|
||||
* Use tree cache for ``get_linearized_receipts_for_room`` (PR #779)
|
||||
* Remove unused indices (PR #782)
|
||||
* Add caches to ``bulk_get_push_rules*`` (PR #804)
|
||||
* Cache ``get_event_reference_hashes`` (PR #806)
|
||||
* Add ``get_users_with_read_receipts_in_room`` cache (PR #809)
|
||||
* Use state to calculate ``get_users_in_room`` (PR #811)
|
||||
* Load push rules in storage layer so that they get cached (PR #825)
|
||||
* Make ``get_joined_hosts_for_room`` use get_users_in_room (PR #828)
|
||||
* Poke notifier on next reactor tick (PR #829)
|
||||
* Change CacheMetrics to be quicker (PR #830)
|
||||
|
||||
|
||||
Changes in synapse v0.15.0-rc1 (2016-04-26)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add login support for Javascript Web Tokens, thanks to Niklas Riekenbrauck
|
||||
(PR #671,#687)
|
||||
* Add URL previewing support (PR #688)
|
||||
* Add login support for LDAP, thanks to Christoph Witzany (PR #701)
|
||||
* Add GET endpoint for pushers (PR #716)
|
||||
|
||||
Changes:
|
||||
|
||||
* Never notify for member events (PR #667)
|
||||
* Deduplicate identical ``/sync`` requests (PR #668)
|
||||
* Require user to have left room to forget room (PR #673)
|
||||
* Use DNS cache if within TTL (PR #677)
|
||||
* Let users see their own leave events (PR #699)
|
||||
* Deduplicate membership changes (PR #700)
|
||||
* Increase performance of pusher code (PR #705)
|
||||
* Respond with error status 504 if failed to talk to remote server (PR #731)
|
||||
* Increase search performance on postgres (PR #745)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where disabling all notifications still resulted in push (PR #678)
|
||||
* Fix bug where users couldn't reject remote invites if remote refused (PR #691)
|
||||
* Fix bug where synapse attempted to backfill from itself (PR #693)
|
||||
* Fix bug where profile information was not correctly added when joining remote
|
||||
rooms (PR #703)
|
||||
* Fix bug where register API required incorrect key name for AS registration
|
||||
(PR #727)
|
||||
|
||||
|
||||
Changes in synapse v0.14.0 (2016-03-30)
|
||||
=======================================
|
||||
|
||||
No changes from v0.14.0-rc2
|
||||
|
||||
Changes in synapse v0.14.0-rc2 (2016-03-23)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add published room list API (PR #657)
|
||||
|
||||
Changes:
|
||||
|
||||
* Change various caches to consume less memory (PR #656, #658, #660, #662,
|
||||
#663, #665)
|
||||
* Allow rooms to be published without requiring an alias (PR #664)
|
||||
* Intern common strings in caches to reduce memory footprint (#666)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix reject invites over federation (PR #646)
|
||||
* Fix bug where registration was not idempotent (PR #649)
|
||||
* Update aliases event after deleting aliases (PR #652)
|
||||
* Fix unread notification count, which was sometimes wrong (PR #661)
|
||||
|
||||
Changes in synapse v0.14.0-rc1 (2016-03-14)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add event_id to response to state event PUT (PR #581)
|
||||
* Allow guest users access to messages in rooms they have joined (PR #587)
|
||||
* Add config for what state is included in a room invite (PR #598)
|
||||
* Send the inviter's member event in room invite state (PR #607)
|
||||
* Add error codes for malformed/bad JSON in /login (PR #608)
|
||||
* Add support for changing the actions for default rules (PR #609)
|
||||
* Add environment variable SYNAPSE_CACHE_FACTOR, default it to 0.1 (PR #612)
|
||||
* Add ability for alias creators to delete aliases (PR #614)
|
||||
* Add profile information to invites (PR #624)
|
||||
|
||||
Changes:
|
||||
|
||||
* Enforce user_id exclusivity for AS registrations (PR #572)
|
||||
* Make adding push rules idempotent (PR #587)
|
||||
* Improve presence performance (PR #582, #586)
|
||||
* Change presence semantics for ``last_active_ago`` (PR #582, #586)
|
||||
* Don't allow ``m.room.create`` to be changed (PR #596)
|
||||
* Add 800x600 to default list of valid thumbnail sizes (PR #616)
|
||||
* Always include kicks and bans in full /sync (PR #625)
|
||||
* Send history visibility on boundary changes (PR #626)
|
||||
* Register endpoint now returns a refresh_token (PR #637)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where we returned incorrect state in /sync (PR #573)
|
||||
* Always return a JSON object from push rule API (PR #606)
|
||||
* Fix bug where registering without a user id sometimes failed (PR #610)
|
||||
* Report size of ExpiringCache in cache size metrics (PR #611)
|
||||
* Fix rejection of invites to empty rooms (PR #615)
|
||||
* Fix usage of ``bcrypt`` to not use ``checkpw`` (PR #619)
|
||||
* Pin ``pysaml2`` dependency (PR #634)
|
||||
* Fix bug in ``/sync`` where timeline order was incorrect for backfilled events
|
||||
(PR #635)
|
||||
|
||||
Changes in synapse v0.13.3 (2016-02-11)
|
||||
=======================================
|
||||
|
||||
* Fix bug where ``/sync`` would occasionally return events in the wrong room.
|
||||
|
||||
Changes in synapse v0.13.2 (2016-02-11)
|
||||
=======================================
|
||||
|
||||
* Fix bug where ``/events`` would fail to skip some events if there had been
|
||||
more events than the limit specified since the last request (PR #570)
|
||||
|
||||
Changes in synapse v0.13.1 (2016-02-10)
|
||||
=======================================
|
||||
|
||||
* Bump matrix-angular-sdk (matrix web console) dependency to 0.6.8 to
|
||||
pull in the fix for SYWEB-361 so that the default client can display
|
||||
HTML messages again(!)
|
||||
|
||||
Changes in synapse v0.13.0 (2016-02-10)
|
||||
=======================================
|
||||
|
||||
This version includes an upgrade of the schema, specifically adding an index to
|
||||
the ``events`` table. This may cause synapse to pause for several minutes the
|
||||
first time it is started after the upgrade.
|
||||
|
||||
Changes:
|
||||
|
||||
* Improve general performance (PR #540, #543. #544, #54, #549, #567)
|
||||
* Change guest user ids to be incrementing integers (PR #550)
|
||||
* Improve performance of public room list API (PR #552)
|
||||
* Change profile API to omit keys rather than return null (PR #557)
|
||||
* Add ``/media/r0`` endpoint prefix, which is equivalent to ``/media/v1/``
|
||||
(PR #595)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug with upgrading guest accounts where it would fail if you opened the
|
||||
registration email on a different device (PR #547)
|
||||
* Fix bug where unread count could be wrong (PR #568)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.12.1-rc1 (2016-01-29)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add unread notification counts in ``/sync`` (PR #456)
|
||||
* Add support for inviting 3pids in ``/createRoom`` (PR #460)
|
||||
* Add ability for guest accounts to upgrade (PR #462)
|
||||
* Add ``/versions`` API (PR #468)
|
||||
* Add ``event`` to ``/context`` API (PR #492)
|
||||
* Add specific error code for invalid user names in ``/register`` (PR #499)
|
||||
* Add support for push badge counts (PR #507)
|
||||
* Add support for non-guest users to peek in rooms using ``/events`` (PR #510)
|
||||
|
||||
Changes:
|
||||
|
||||
* Change ``/sync`` so that guest users only get rooms they've joined (PR #469)
|
||||
* Change to require unbanning before other membership changes (PR #501)
|
||||
* Change default push rules to notify for all messages (PR #486)
|
||||
* Change default push rules to not notify on membership changes (PR #514)
|
||||
* Change default push rules in one to one rooms to only notify for events that
|
||||
are messages (PR #529)
|
||||
* Change ``/sync`` to reject requests with a ``from`` query param (PR #512)
|
||||
* Change server manhole to use SSH rather than telnet (PR #473)
|
||||
* Change server to require AS users to be registered before use (PR #487)
|
||||
* Change server not to start when ASes are invalidly configured (PR #494)
|
||||
* Change server to require ID and ``as_token`` to be unique for AS's (PR #496)
|
||||
* Change maximum pagination limit to 1000 (PR #497)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where ``/sync`` didn't return when something under the leave key
|
||||
changed (PR #461)
|
||||
* Fix bug where we returned smaller rather than larger than requested
|
||||
thumbnails when ``method=crop`` (PR #464)
|
||||
* Fix thumbnails API to only return cropped thumbnails when asking for a
|
||||
cropped thumbnail (PR #475)
|
||||
* Fix bug where we occasionally still logged access tokens (PR #477)
|
||||
* Fix bug where ``/events`` would always return immediately for guest users
|
||||
(PR #480)
|
||||
* Fix bug where ``/sync`` unexpectedly returned old left rooms (PR #481)
|
||||
* Fix enabling and disabling push rules (PR #498)
|
||||
* Fix bug where ``/register`` returned 500 when given unicode username
|
||||
(PR #513)
|
||||
|
||||
Changes in synapse v0.12.0 (2016-01-04)
|
||||
=======================================
|
||||
|
||||
* Expose ``/login`` under ``r0`` (PR #459)
|
||||
|
||||
Changes in synapse v0.12.0-rc3 (2015-12-23)
|
||||
===========================================
|
||||
|
||||
* Allow guest accounts access to ``/sync`` (PR #455)
|
||||
* Allow filters to include/exclude rooms at the room level
|
||||
rather than just from the components of the sync for each
|
||||
room. (PR #454)
|
||||
* Include urls for room avatars in the response to ``/publicRooms`` (PR #453)
|
||||
* Don't set a identicon as the avatar for a user when they register (PR #450)
|
||||
* Add a ``display_name`` to third-party invites (PR #449)
|
||||
* Send more information to the identity server for third-party invites so that
|
||||
it can send richer messages to the invitee (PR #446)
|
||||
* Cache the responses to ``/initialSync`` for 5 minutes. If a client
|
||||
retries a request to ``/initialSync`` before the a response was computed
|
||||
to the first request then the same response is used for both requests
|
||||
(PR #457)
|
||||
* Fix a bug where synapse would always request the signing keys of
|
||||
remote servers even when the key was cached locally (PR #452)
|
||||
* Fix 500 when pagination search results (PR #447)
|
||||
* Fix a bug where synapse was leaking raw email address in third-party invites
|
||||
(PR #448)
|
||||
|
||||
Changes in synapse v0.12.0-rc2 (2015-12-14)
|
||||
===========================================
|
||||
|
||||
* Add caches for whether rooms have been forgotten by a user (PR #434)
|
||||
* Remove instructions to use ``--process-dependency-link`` since all of the
|
||||
dependencies of synapse are on PyPI (PR #436)
|
||||
* Parallelise the processing of ``/sync`` requests (PR #437)
|
||||
* Fix race updating presence in ``/events`` (PR #444)
|
||||
* Fix bug back-populating search results (PR #441)
|
||||
* Fix bug calculating state in ``/sync`` requests (PR #442)
|
||||
|
||||
Changes in synapse v0.12.0-rc1 (2015-12-10)
|
||||
===========================================
|
||||
|
||||
* Host the client APIs released as r0 by
|
||||
https://matrix.org/docs/spec/r0.0.0/client_server.html
|
||||
on paths prefixed by ``/_matrix/client/r0``. (PR #430, PR #415, PR #400)
|
||||
* Updates the client APIs to match r0 of the matrix specification.
|
||||
|
||||
* All APIs return events in the new event format, old APIs also include
|
||||
the fields needed to parse the event using the old format for
|
||||
compatibility. (PR #402)
|
||||
* Search results are now given as a JSON array rather than
|
||||
a JSON object (PR #405)
|
||||
* Miscellaneous changes to search (PR #403, PR #406, PR #412)
|
||||
* Filter JSON objects may now be passed as query parameters to ``/sync``
|
||||
(PR #431)
|
||||
* Fix implementation of ``/admin/whois`` (PR #418)
|
||||
* Only include the rooms that user has left in ``/sync`` if the client
|
||||
requests them in the filter (PR #423)
|
||||
* Don't push for ``m.room.message`` by default (PR #411)
|
||||
* Add API for setting per account user data (PR #392)
|
||||
* Allow users to forget rooms (PR #385)
|
||||
|
||||
* Performance improvements and monitoring:
|
||||
|
||||
* Add per-request counters for CPU time spent on the main python thread.
|
||||
(PR #421, PR #420)
|
||||
* Add per-request counters for time spent in the database (PR #429)
|
||||
* Make state updates in the C+S API idempotent (PR #416)
|
||||
* Only fire ``user_joined_room`` if the user has actually joined. (PR #410)
|
||||
* Reuse a single http client, rather than creating new ones (PR #413)
|
||||
|
||||
* Fixed a bug upgrading from older versions of synapse on postgresql (PR #417)
|
||||
|
||||
Changes in synapse v0.11.1 (2015-11-20)
|
||||
=======================================
|
||||
|
||||
* Add extra options to search API (PR #394)
|
||||
* Fix bug where we did not correctly cap federation retry timers. This meant it
|
||||
could take several hours for servers to start talking to ressurected servers,
|
||||
even when they were receiving traffic from them (PR #393)
|
||||
* Don't advertise login token flow unless CAS is enabled. This caused issues
|
||||
where some clients would always use the fallback API if they did not
|
||||
recognize all login flows (PR #391)
|
||||
* Change /v2 sync API to rename ``private_user_data`` to ``account_data``
|
||||
(PR #386)
|
||||
* Change /v2 sync API to remove the ``event_map`` and rename keys in ``rooms``
|
||||
object (PR #389)
|
||||
|
||||
Changes in synapse v0.11.0-r2 (2015-11-19)
|
||||
==========================================
|
||||
|
||||
* Fix bug in database port script (PR #387)
|
||||
|
||||
Changes in synapse v0.11.0-r1 (2015-11-18)
|
||||
==========================================
|
||||
|
||||
* Retry and fail federation requests more aggressively for requests that block
|
||||
client side requests (PR #384)
|
||||
|
||||
Changes in synapse v0.11.0 (2015-11-17)
|
||||
=======================================
|
||||
|
||||
* Change CAS login API (PR #349)
|
||||
|
||||
Changes in synapse v0.11.0-rc2 (2015-11-13)
|
||||
===========================================
|
||||
|
||||
* Various changes to /sync API response format (PR #373)
|
||||
* Fix regression when setting display name in newly joined room over
|
||||
federation (PR #368)
|
||||
* Fix problem where /search was slow when using SQLite (PR #366)
|
||||
|
||||
Changes in synapse v0.11.0-rc1 (2015-11-11)
|
||||
===========================================
|
||||
|
||||
* Add Search API (PR #307, #324, #327, #336, #350, #359)
|
||||
* Add 'archived' state to v2 /sync API (PR #316)
|
||||
* Add ability to reject invites (PR #317)
|
||||
* Add config option to disable password login (PR #322)
|
||||
* Add the login fallback API (PR #330)
|
||||
* Add room context API (PR #334)
|
||||
* Add room tagging support (PR #335)
|
||||
* Update v2 /sync API to match spec (PR #305, #316, #321, #332, #337, #341)
|
||||
* Change retry schedule for application services (PR #320)
|
||||
* Change retry schedule for remote servers (PR #340)
|
||||
* Fix bug where we hosted static content in the incorrect place (PR #329)
|
||||
* Fix bug where we didn't increment retry interval for remote servers (PR #343)
|
||||
|
||||
Changes in synapse v0.10.1-rc1 (2015-10-15)
|
||||
===========================================
|
||||
|
||||
* Add support for CAS, thanks to Steven Hammerton (PR #295, #296)
|
||||
* Add support for using macaroons for ``access_token`` (PR #256, #229)
|
||||
* Add support for ``m.room.canonical_alias`` (PR #287)
|
||||
* Add support for viewing the history of rooms that they have left. (PR #276,
|
||||
#294)
|
||||
* Add support for refresh tokens (PR #240)
|
||||
* Add flag on creation which disables federation of the room (PR #279)
|
||||
* Add some room state to invites. (PR #275)
|
||||
* Atomically persist events when joining a room over federation (PR #283)
|
||||
* Change default history visibility for private rooms (PR #271)
|
||||
* Allow users to redact their own sent events (PR #262)
|
||||
* Use tox for tests (PR #247)
|
||||
* Split up syutil into separate libraries (PR #243)
|
||||
|
||||
Changes in synapse v0.10.0-r2 (2015-09-16)
|
||||
==========================================
|
||||
|
||||
* Fix bug where we always fetched remote server signing keys instead of using
|
||||
ones in our cache.
|
||||
* Fix adding threepids to an existing account.
|
||||
* Fix bug with invinting over federation where remote server was already in
|
||||
the room. (PR #281, SYN-392)
|
||||
|
||||
Changes in synapse v0.10.0-r1 (2015-09-08)
|
||||
==========================================
|
||||
|
||||
* Fix bug with python packaging
|
||||
|
||||
Changes in synapse v0.10.0 (2015-09-03)
|
||||
=======================================
|
||||
|
||||
No change from release candidate.
|
||||
|
||||
Changes in synapse v0.10.0-rc6 (2015-09-02)
|
||||
===========================================
|
||||
|
||||
* Remove some of the old database upgrade scripts.
|
||||
* Fix database port script to work with newly created sqlite databases.
|
||||
|
||||
Changes in synapse v0.10.0-rc5 (2015-08-27)
|
||||
===========================================
|
||||
|
||||
* Fix bug that broke downloading files with ascii filenames across federation.
|
||||
|
||||
Changes in synapse v0.10.0-rc4 (2015-08-27)
|
||||
===========================================
|
||||
|
||||
* Allow UTF-8 filenames for upload. (PR #259)
|
||||
|
||||
Changes in synapse v0.10.0-rc3 (2015-08-25)
|
||||
===========================================
|
||||
|
||||
* Add ``--keys-directory`` config option to specify where files such as
|
||||
certs and signing keys should be stored in, when using ``--generate-config``
|
||||
or ``--generate-keys``. (PR #250)
|
||||
* Allow ``--config-path`` to specify a directory, causing synapse to use all
|
||||
\*.yaml files in the directory as config files. (PR #249)
|
||||
* Add ``web_client_location`` config option to specify static files to be
|
||||
hosted by synapse under ``/_matrix/client``. (PR #245)
|
||||
* Add helper utility to synapse to read and parse the config files and extract
|
||||
the value of a given key. For example::
|
||||
|
||||
$ python -m synapse.config read server_name -c homeserver.yaml
|
||||
localhost
|
||||
|
||||
(PR #246)
|
||||
|
||||
|
||||
Changes in synapse v0.10.0-rc2 (2015-08-24)
|
||||
===========================================
|
||||
|
||||
* Fix bug where we incorrectly populated the ``event_forward_extremities``
|
||||
table, resulting in problems joining large remote rooms (e.g.
|
||||
``#matrix:matrix.org``)
|
||||
* Reduce the number of times we wake up pushers by not listening for presence
|
||||
or typing events, reducing the CPU cost of each pusher.
|
||||
|
||||
|
||||
Changes in synapse v0.10.0-rc1 (2015-08-21)
|
||||
===========================================
|
||||
|
||||
Also see v0.9.4-rc1 changelog, which has been amalgamated into this release.
|
||||
|
||||
General:
|
||||
|
||||
* Upgrade to Twisted 15 (PR #173)
|
||||
* Add support for serving and fetching encryption keys over federation.
|
||||
(PR #208)
|
||||
* Add support for logging in with email address (PR #234)
|
||||
* Add support for new ``m.room.canonical_alias`` event. (PR #233)
|
||||
* Change synapse to treat user IDs case insensitively during registration and
|
||||
login. (If two users already exist with case insensitive matching user ids,
|
||||
synapse will continue to require them to specify their user ids exactly.)
|
||||
* Error if a user tries to register with an email already in use. (PR #211)
|
||||
* Add extra and improve existing caches (PR #212, #219, #226, #228)
|
||||
* Batch various storage request (PR #226, #228)
|
||||
* Fix bug where we didn't correctly log the entity that triggered the request
|
||||
if the request came in via an application service (PR #230)
|
||||
* Fix bug where we needlessly regenerated the full list of rooms an AS is
|
||||
interested in. (PR #232)
|
||||
* Add support for AS's to use v2_alpha registration API (PR #210)
|
||||
|
||||
|
||||
Configuration:
|
||||
|
||||
* Add ``--generate-keys`` that will generate any missing cert and key files in
|
||||
the configuration files. This is equivalent to running ``--generate-config``
|
||||
on an existing configuration file. (PR #220)
|
||||
* ``--generate-config`` now no longer requires a ``--server-name`` parameter
|
||||
when used on existing configuration files. (PR #220)
|
||||
* Add ``--print-pidfile`` flag that controls the printing of the pid to stdout
|
||||
of the demonised process. (PR #213)
|
||||
|
||||
Media Repository:
|
||||
|
||||
* Fix bug where we picked a lower resolution image than requested. (PR #205)
|
||||
* Add support for specifying if a the media repository should dynamically
|
||||
thumbnail images or not. (PR #206)
|
||||
|
||||
Metrics:
|
||||
|
||||
* Add statistics from the reactor to the metrics API. (PR #224, #225)
|
||||
|
||||
Demo Homeservers:
|
||||
|
||||
* Fix starting the demo homeservers without rate-limiting enabled. (PR #182)
|
||||
* Fix enabling registration on demo homeservers (PR #223)
|
||||
|
||||
|
||||
Changes in synapse v0.9.4-rc1 (2015-07-21)
|
||||
==========================================
|
||||
|
||||
General:
|
||||
|
||||
* Add basic implementation of receipts. (SPEC-99)
|
||||
* Add support for configuration presets in room creation API. (PR #203)
|
||||
* Add auth event that limits the visibility of history for new users.
|
||||
(SPEC-134)
|
||||
* Add SAML2 login/registration support. (PR #201. Thanks Muthu Subramanian!)
|
||||
* Add client side key management APIs for end to end encryption. (PR #198)
|
||||
* Change power level semantics so that you cannot kick, ban or change power
|
||||
levels of users that have equal or greater power level than you. (SYN-192)
|
||||
* Improve performance by bulk inserting events where possible. (PR #193)
|
||||
* Improve performance by bulk verifying signatures where possible. (PR #194)
|
||||
|
||||
|
||||
Configuration:
|
||||
|
||||
* Add support for including TLS certificate chains.
|
||||
|
||||
Media Repository:
|
||||
|
||||
* Add Content-Disposition headers to content repository responses. (SYN-150)
|
||||
|
||||
|
||||
Changes in synapse v0.9.3 (2015-07-01)
|
||||
======================================
|
||||
|
||||
No changes from v0.9.3 Release Candidate 1.
|
||||
|
||||
Changes in synapse v0.9.3-rc1 (2015-06-23)
|
||||
==========================================
|
||||
|
||||
General:
|
||||
|
||||
* Fix a memory leak in the notifier. (SYN-412)
|
||||
* Improve performance of room initial sync. (SYN-418)
|
||||
* General improvements to logging.
|
||||
* Remove ``access_token`` query params from ``INFO`` level logging.
|
||||
|
||||
Configuration:
|
||||
|
||||
* Add support for specifying and configuring multiple listeners. (SYN-389)
|
||||
|
||||
Application services:
|
||||
|
||||
* Fix bug where synapse failed to send user queries to application services.
|
||||
|
||||
Changes in synapse v0.9.2-r2 (2015-06-15)
|
||||
=========================================
|
||||
|
||||
Fix packaging so that schema delta python files get included in the package.
|
||||
|
||||
Changes in synapse v0.9.2 (2015-06-12)
|
||||
======================================
|
||||
|
||||
General:
|
||||
|
||||
* Use ultrajson for json (de)serialisation when a canonical encoding is not
|
||||
required. Ultrajson is significantly faster than simplejson in certain
|
||||
circumstances.
|
||||
* Use connection pools for outgoing HTTP connections.
|
||||
* Process thumbnails on separate threads.
|
||||
|
||||
Configuration:
|
||||
|
||||
* Add option, ``gzip_responses``, to disable HTTP response compression.
|
||||
|
||||
Federation:
|
||||
|
||||
* Improve resilience of backfill by ensuring we fetch any missing auth events.
|
||||
* Improve performance of backfill and joining remote rooms by removing
|
||||
unnecessary computations. This included handling events we'd previously
|
||||
handled as well as attempting to compute the current state for outliers.
|
||||
|
||||
|
||||
Changes in synapse v0.9.1 (2015-05-26)
|
||||
======================================
|
||||
|
||||
General:
|
||||
|
||||
* Add support for backfilling when a client paginates. This allows servers to
|
||||
request history for a room from remote servers when a client tries to
|
||||
paginate history the server does not have - SYN-36
|
||||
* Fix bug where you couldn't disable non-default pushrules - SYN-378
|
||||
* Fix ``register_new_user`` script - SYN-359
|
||||
* Improve performance of fetching events from the database, this improves both
|
||||
initialSync and sending of events.
|
||||
* Improve performance of event streams, allowing synapse to handle more
|
||||
simultaneous connected clients.
|
||||
|
||||
Federation:
|
||||
|
||||
* Fix bug with existing backfill implementation where it returned the wrong
|
||||
selection of events in some circumstances.
|
||||
* Improve performance of joining remote rooms.
|
||||
|
||||
Configuration:
|
||||
|
||||
* Add support for changing the bind host of the metrics listener via the
|
||||
``metrics_bind_host`` option.
|
||||
|
||||
|
||||
Changes in synapse v0.9.0-r5 (2015-05-21)
|
||||
=========================================
|
||||
|
||||
* Add more database caches to reduce amount of work done for each pusher. This
|
||||
radically reduces CPU usage when multiple pushers are set up in the same room.
|
||||
|
||||
Changes in synapse v0.9.0 (2015-05-07)
|
||||
======================================
|
||||
|
||||
General:
|
||||
|
||||
* Add support for using a PostgreSQL database instead of SQLite. See
|
||||
`docs/postgres.rst`_ for details.
|
||||
* Add password change and reset APIs. See `Registration`_ in the spec.
|
||||
* Fix memory leak due to not releasing stale notifiers - SYN-339.
|
||||
* Fix race in caches that occasionally caused some presence updates to be
|
||||
dropped - SYN-369.
|
||||
* Check server name has not changed on restart.
|
||||
* Add a sample systemd unit file and a logger configuration in
|
||||
contrib/systemd. Contributed Ivan Shapovalov.
|
||||
|
||||
Federation:
|
||||
|
||||
* Add key distribution mechanisms for fetching public keys of unavailable
|
||||
remote home servers. See `Retrieving Server Keys`_ in the spec.
|
||||
|
||||
Configuration:
|
||||
|
||||
* Add support for multiple config files.
|
||||
* Add support for dictionaries in config files.
|
||||
* Remove support for specifying config options on the command line, except
|
||||
for:
|
||||
|
||||
* ``--daemonize`` - Daemonize the home server.
|
||||
* ``--manhole`` - Turn on the twisted telnet manhole service on the given
|
||||
port.
|
||||
* ``--database-path`` - The path to a sqlite database to use.
|
||||
* ``--verbose`` - The verbosity level.
|
||||
* ``--log-file`` - File to log to.
|
||||
* ``--log-config`` - Python logging config file.
|
||||
* ``--enable-registration`` - Enable registration for new users.
|
||||
|
||||
Application services:
|
||||
|
||||
* Reliably retry sending of events from Synapse to application services, as per
|
||||
`Application Services`_ spec.
|
||||
* Application services can no longer register via the ``/register`` API,
|
||||
instead their configuration should be saved to a file and listed in the
|
||||
synapse ``app_service_config_files`` config option. The AS configuration file
|
||||
has the same format as the old ``/register`` request.
|
||||
See `docs/application_services.rst`_ for more information.
|
||||
|
||||
.. _`docs/postgres.rst`: docs/postgres.rst
|
||||
.. _`docs/application_services.rst`: docs/application_services.rst
|
||||
.. _`Registration`: https://github.com/matrix-org/matrix-doc/blob/master/specification/10_client_server_api.rst#registration
|
||||
.. _`Retrieving Server Keys`: https://github.com/matrix-org/matrix-doc/blob/6f2698/specification/30_server_server_api.rst#retrieving-server-keys
|
||||
.. _`Application Services`: https://github.com/matrix-org/matrix-doc/blob/0c6bd9/specification/25_application_service_api.rst#home-server---application-service-api
|
||||
|
||||
Changes in synapse v0.8.1 (2015-03-18)
|
||||
======================================
|
||||
|
||||
* Disable registration by default. New users can be added using the command
|
||||
``register_new_matrix_user`` or by enabling registration in the config.
|
||||
* Add metrics to synapse. To enable metrics use config options
|
||||
``enable_metrics`` and ``metrics_port``.
|
||||
* Fix bug where banning only kicked the user.
|
||||
|
||||
Changes in synapse v0.8.0 (2015-03-06)
|
||||
======================================
|
||||
|
||||
@@ -1106,7 +271,7 @@ See UPGRADE for information about changes to the client server API, including
|
||||
breaking backwards compatibility with VoIP calls and registration API.
|
||||
|
||||
Homeserver:
|
||||
* When a user changes their displayname or avatar the server will now update
|
||||
* When a user changes their displayname or avatar the server will now update
|
||||
all their join states to reflect this.
|
||||
* The server now adds "age" key to events to indicate how old they are. This
|
||||
is clock independent, so at no point does any server or webclient have to
|
||||
@@ -1164,7 +329,7 @@ Changes in synapse 0.2.2 (2014-09-06)
|
||||
=====================================
|
||||
|
||||
Homeserver:
|
||||
* When the server returns state events it now also includes the previous
|
||||
* When the server returns state events it now also includes the previous
|
||||
content.
|
||||
* Add support for inviting people when creating a new room.
|
||||
* Make the homeserver inform the room via `m.room.aliases` when a new alias
|
||||
@@ -1176,7 +341,7 @@ Webclient:
|
||||
* Handle `m.room.aliases` events.
|
||||
* Asynchronously send messages and show a local echo.
|
||||
* Inform the UI when a message failed to send.
|
||||
* Only autoscroll on receiving a new message if the user was already at the
|
||||
* Only autoscroll on receiving a new message if the user was already at the
|
||||
bottom of the screen.
|
||||
* Add support for ban/kick reasons.
|
||||
|
||||
|
||||
118
CONTRIBUTING.rst
118
CONTRIBUTING.rst
@@ -1,118 +0,0 @@
|
||||
Contributing code to Matrix
|
||||
===========================
|
||||
|
||||
Everyone is welcome to contribute code to Matrix
|
||||
(https://github.com/matrix-org), provided that they are willing to license
|
||||
their contributions under the same license as the project itself. We follow a
|
||||
simple 'inbound=outbound' model for contributions: the act of submitting an
|
||||
'inbound' contribution means that the contributor agrees to license the code
|
||||
under the same terms as the project's overall 'outbound' license - in our
|
||||
case, this is almost always Apache Software License v2 (see LICENSE).
|
||||
|
||||
How to contribute
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
The preferred and easiest way to contribute changes to Matrix is to fork the
|
||||
relevant project on github, and then create a pull request to ask us to pull
|
||||
your changes into our repo
|
||||
(https://help.github.com/articles/using-pull-requests/)
|
||||
|
||||
**The single biggest thing you need to know is: please base your changes on
|
||||
the develop branch - /not/ master.**
|
||||
|
||||
We use the master branch to track the most recent release, so that folks who
|
||||
blindly clone the repo and automatically check out master get something that
|
||||
works. Develop is the unstable branch where all the development actually
|
||||
happens: the workflow is that contributors should fork the develop branch to
|
||||
make a 'feature' branch for a particular contribution, and then make a pull
|
||||
request to merge this back into the matrix.org 'official' develop branch. We
|
||||
use github's pull request workflow to review the contribution, and either ask
|
||||
you to make any refinements needed or merge it and make them ourselves. The
|
||||
changes will then land on master when we next do a release.
|
||||
|
||||
We use Jenkins for continuous integration (http://matrix.org/jenkins), and
|
||||
typically all pull requests get automatically tested Jenkins: if your change breaks the build, Jenkins will yell about it in #matrix-dev:matrix.org so please lurk there and keep an eye open.
|
||||
|
||||
Code style
|
||||
~~~~~~~~~~
|
||||
|
||||
All Matrix projects have a well-defined code-style - and sometimes we've even
|
||||
got as far as documenting it... For instance, synapse's code style doc lives
|
||||
at https://github.com/matrix-org/synapse/tree/master/docs/code_style.rst.
|
||||
|
||||
Please ensure your changes match the cosmetic style of the existing project,
|
||||
and **never** mix cosmetic and functional changes in the same commit, as it
|
||||
makes it horribly hard to review otherwise.
|
||||
|
||||
Attribution
|
||||
~~~~~~~~~~~
|
||||
|
||||
Everyone who contributes anything to Matrix is welcome to be listed in the
|
||||
AUTHORS.rst file for the project in question. Please feel free to include a
|
||||
change to AUTHORS.rst in your pull request to list yourself and a short
|
||||
description of the area(s) you've worked on. Also, we sometimes have swag to
|
||||
give away to contributors - if you feel that Matrix-branded apparel is missing
|
||||
from your life, please mail us your shipping address to matrix at matrix.org and we'll try to fix it :)
|
||||
|
||||
Sign off
|
||||
~~~~~~~~
|
||||
|
||||
In order to have a concrete record that your contribution is intentional
|
||||
and you agree to license it under the same terms as the project's license, we've adopted the
|
||||
same lightweight approach that the Linux Kernel
|
||||
(https://www.kernel.org/doc/Documentation/SubmittingPatches), Docker
|
||||
(https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
|
||||
projects use: the DCO (Developer Certificate of Origin:
|
||||
http://developercertificate.org/). This is a simple declaration that you wrote
|
||||
the contribution or otherwise have the right to contribute it to Matrix::
|
||||
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
|
||||
If you agree to this for your contribution, then all that's needed is to
|
||||
include the line in your commit or pull request comment::
|
||||
|
||||
Signed-off-by: Your Name <your@email.example.org>
|
||||
|
||||
...using your real name; unfortunately pseudonyms and anonymous contributions
|
||||
can't be accepted. Git makes this trivial - just use the -s flag when you do
|
||||
``git commit``, having first set ``user.name`` and ``user.email`` git configs
|
||||
(which you should have done anyway :)
|
||||
|
||||
Conclusion
|
||||
~~~~~~~~~~
|
||||
|
||||
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!
|
||||
20
MANIFEST.in
20
MANIFEST.in
@@ -3,26 +3,12 @@ include LICENSE
|
||||
include VERSION
|
||||
include *.rst
|
||||
include demo/README
|
||||
include demo/demo.tls.dh
|
||||
include demo/*.py
|
||||
include demo/*.sh
|
||||
|
||||
recursive-include synapse/storage/schema *.sql
|
||||
recursive-include synapse/storage/schema *.py
|
||||
|
||||
recursive-include demo *.dh
|
||||
recursive-include demo *.py
|
||||
recursive-include demo *.sh
|
||||
recursive-include docs *
|
||||
recursive-include res *
|
||||
recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.py
|
||||
|
||||
recursive-include synapse/static *.css
|
||||
recursive-include synapse/static *.gif
|
||||
recursive-include synapse/static *.html
|
||||
recursive-include synapse/static *.js
|
||||
|
||||
exclude jenkins.sh
|
||||
exclude jenkins*.sh
|
||||
|
||||
prune demo/etc
|
||||
|
||||
461
README.rst
461
README.rst
@@ -1,5 +1,3 @@
|
||||
.. contents::
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
@@ -7,7 +5,7 @@ Matrix is an ambitious new ecosystem for open federated Instant Messaging and
|
||||
VoIP. The basics you need to know to get up and running are:
|
||||
|
||||
- Everything in Matrix happens in a room. Rooms are distributed and do not
|
||||
exist on any single server. Rooms can be located using convenience aliases
|
||||
exist on any single server. Rooms can be located using convenience aliases
|
||||
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
||||
|
||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||
@@ -20,10 +18,10 @@ The overall architecture is::
|
||||
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
||||
|
||||
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
||||
accessed by any client from https://matrix.org/blog/try-matrix-now or via IRC
|
||||
bridge at irc://irc.freenode.net/matrix.
|
||||
accessed by the web client at http://matrix.org/alpha or via an IRC bridge at
|
||||
irc://irc.freenode.net/matrix.
|
||||
|
||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||
|
||||
About Matrix
|
||||
@@ -58,44 +56,36 @@ the spec in the context of a codebase and let you run your own homeserver and
|
||||
generally help bootstrap the ecosystem.
|
||||
|
||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||
a Matrix homeserver. The homeserver stores all their personal chat history and
|
||||
user account information - much as a mail client connects through to an
|
||||
IMAP/SMTP server. Just like email, you can either run your own Matrix
|
||||
homeserver and control and own your own communications and history or use one
|
||||
hosted by someone else (e.g. matrix.org) - there is no single point of control
|
||||
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
||||
etc.
|
||||
a Matrix homeserver which stores all their personal chat history and user
|
||||
account information - much as a mail client connects through to an IMAP/SMTP
|
||||
server. Just like email, you can either run your own Matrix homeserver and
|
||||
control and own your own communications and history or use one hosted by
|
||||
someone else (e.g. matrix.org) - there is no single point of control or
|
||||
mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts, etc.
|
||||
|
||||
Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
|
||||
web client demo implemented in AngularJS) and cmdclient (a basic Python
|
||||
command line utility which lets you easily see what the JSON APIs are up to).
|
||||
|
||||
Meanwhile, iOS and Android SDKs and clients are available from:
|
||||
Meanwhile, iOS and Android SDKs and clients are currently in development and available from:
|
||||
|
||||
- https://github.com/matrix-org/matrix-ios-sdk
|
||||
- https://github.com/matrix-org/matrix-ios-kit
|
||||
- https://github.com/matrix-org/matrix-ios-console
|
||||
- https://github.com/matrix-org/matrix-android-sdk
|
||||
|
||||
We'd like to invite you to join #matrix:matrix.org (via
|
||||
https://matrix.org/blog/try-matrix-now), run a homeserver, take a look at the
|
||||
Matrix spec at https://matrix.org/docs/spec and API docs at
|
||||
https://matrix.org/docs/api, experiment with the APIs and the demo clients, and
|
||||
report any bugs via https://matrix.org/jira.
|
||||
We'd like to invite you to join #matrix:matrix.org (via http://matrix.org/alpha), run a homeserver, take a look at the Matrix spec at
|
||||
http://matrix.org/docs/spec, experiment with the APIs and the demo
|
||||
clients, and report any bugs via http://matrix.org/jira.
|
||||
|
||||
Thanks for using Matrix!
|
||||
|
||||
[1] End-to-end encryption is currently in development - see https://matrix.org/git/olm
|
||||
[1] End-to-end encryption is currently in development
|
||||
|
||||
Synapse Installation
|
||||
====================
|
||||
|
||||
Synapse is the reference python/twisted Matrix homeserver implementation.
|
||||
Homeserver Installation
|
||||
=======================
|
||||
|
||||
System requirements:
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- POSIX-compliant system (tested on Linux & OSX)
|
||||
- Python 2.7
|
||||
- At least 512 MB RAM.
|
||||
|
||||
Synapse is written in python but some of the libraries is uses are written in
|
||||
C. So before we can install synapse itself we need a working C compiler and the
|
||||
@@ -103,210 +93,117 @@ header files for python C extensions.
|
||||
|
||||
Installing prerequisites on Ubuntu or Debian::
|
||||
|
||||
sudo apt-get install build-essential python2.7-dev libffi-dev \
|
||||
python-pip python-setuptools sqlite3 \
|
||||
libssl-dev python-virtualenv libjpeg-dev libxslt1-dev
|
||||
|
||||
$ sudo apt-get install build-essential python2.7-dev libffi-dev \
|
||||
python-pip python-setuptools sqlite3 \
|
||||
libssl-dev python-virtualenv libjpeg-dev
|
||||
|
||||
Installing prerequisites on ArchLinux::
|
||||
|
||||
sudo pacman -S base-devel python2 python-pip \
|
||||
python-setuptools python-virtualenv sqlite3
|
||||
|
||||
Installing prerequisites on CentOS 7::
|
||||
|
||||
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
lcms2-devel libwebp-devel tcl-devel tk-devel \
|
||||
python-virtualenv libffi-devel openssl-devel
|
||||
sudo yum groupinstall "Development Tools"
|
||||
$ sudo pacman -S base-devel python2 python-pip \
|
||||
python-setuptools python-virtualenv sqlite3
|
||||
|
||||
Installing prerequisites on Mac OS X::
|
||||
|
||||
xcode-select --install
|
||||
sudo easy_install pip
|
||||
sudo pip install virtualenv
|
||||
|
||||
Installing prerequisites on Raspbian::
|
||||
|
||||
sudo apt-get install build-essential python2.7-dev libffi-dev \
|
||||
python-pip python-setuptools sqlite3 \
|
||||
libssl-dev python-virtualenv libjpeg-dev
|
||||
sudo pip install --upgrade pip
|
||||
sudo pip install --upgrade ndg-httpsclient
|
||||
sudo pip install --upgrade virtualenv
|
||||
|
||||
$ xcode-select --install
|
||||
$ sudo pip install virtualenv
|
||||
|
||||
To install the synapse homeserver run::
|
||||
|
||||
virtualenv -p python2.7 ~/.synapse
|
||||
source ~/.synapse/bin/activate
|
||||
pip install --upgrade setuptools
|
||||
pip install https://github.com/matrix-org/synapse/tarball/master
|
||||
$ virtualenv ~/.synapse
|
||||
$ source ~/.synapse/bin/activate
|
||||
$ pip install --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
||||
|
||||
This installs synapse, along with the libraries it uses, into a virtual
|
||||
environment under ``~/.synapse``. Feel free to pick a different directory
|
||||
if you prefer.
|
||||
|
||||
In case of problems, please see the _Troubleshooting section below.
|
||||
|
||||
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
||||
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
||||
|
||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||
for details.
|
||||
environment under ``~/.synapse``.
|
||||
|
||||
To set up your homeserver, run (in your virtualenv, as before)::
|
||||
|
||||
cd ~/.synapse
|
||||
python -m synapse.app.homeserver \
|
||||
$ cd ~/.synapse
|
||||
$ python -m synapse.app.homeserver \
|
||||
--server-name machine.my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config \
|
||||
--report-stats=[yes|no]
|
||||
--generate-config
|
||||
|
||||
...substituting your host and domain name as appropriate.
|
||||
|
||||
This will generate you a config file that you can then customise, but it will
|
||||
also generate a set of keys for you. These keys will allow your Home Server to
|
||||
identify itself to other Home Servers, so don't lose or delete them. It would be
|
||||
wise to back them up somewhere safe. If, for whatever reason, you do need to
|
||||
change your Home Server's keys, you may find that other Home Servers have the
|
||||
old key cached. If you update the signing key, you should change the name of the
|
||||
key in the <server name>.signing.key file (the second word) to something different.
|
||||
|
||||
By default, registration of new users is disabled. You can either enable
|
||||
registration in the config by specifying ``enable_registration: true``
|
||||
(it is then recommended to also set up CAPTCHA - see docs/CAPTCHA_SETUP), or
|
||||
you can use the command line to register new users::
|
||||
|
||||
$ source ~/.synapse/bin/activate
|
||||
$ synctl start # if not already running
|
||||
$ register_new_matrix_user -c homeserver.yaml https://localhost:8448
|
||||
New user localpart: erikj
|
||||
Password:
|
||||
Confirm password:
|
||||
Success!
|
||||
Substituting your host and domain name as appropriate.
|
||||
|
||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||
a TURN server. See docs/turn-howto.rst for details.
|
||||
|
||||
Running Synapse
|
||||
===============
|
||||
Troubleshooting Installation
|
||||
----------------------------
|
||||
|
||||
To actually run your new homeserver, pick a working directory for Synapse to
|
||||
run (e.g. ``~/.synapse``), and::
|
||||
Synapse requires pip 1.7 or later, so if your OS provides too old a version and
|
||||
you get errors about ``error: no such option: --process-dependency-links`` you
|
||||
may need to manually upgrade it::
|
||||
|
||||
cd ~/.synapse
|
||||
source ./bin/activate
|
||||
synctl start
|
||||
$ sudo pip install --upgrade pip
|
||||
|
||||
Using PostgreSQL
|
||||
================
|
||||
If pip crashes mid-installation for reason (e.g. lost terminal), pip may
|
||||
refuse to run until you remove the temporary installation directory it
|
||||
created. To reset the installation::
|
||||
|
||||
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
||||
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
||||
traditionally used for convenience and simplicity.
|
||||
$ rm -rf /tmp/pip_install_matrix
|
||||
|
||||
The advantages of Postgres include:
|
||||
pip seems to leak *lots* of memory during installation. For instance, a Linux
|
||||
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
|
||||
happens, you will have to individually install the dependencies which are
|
||||
failing, e.g.::
|
||||
|
||||
* significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
* allowing the DB to be run on separate hardware
|
||||
* allowing basic active/backup high-availability with a "hot spare" synapse
|
||||
pointing at the same DB master, as well as enabling DB replication in
|
||||
synapse itself.
|
||||
$ pip install twisted
|
||||
|
||||
The only disadvantage is that the code is relatively new as of April 2015 and
|
||||
may have a few regressions relative to SQLite.
|
||||
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
`docs/postgres.rst <docs/postgres.rst>`_.
|
||||
|
||||
Platform Specific Instructions
|
||||
==============================
|
||||
|
||||
Debian
|
||||
------
|
||||
|
||||
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
|
||||
Note that these packages do not include a client - choose one from
|
||||
https://matrix.org/blog/try-matrix-now/ (or build your own with one of our SDKs :)
|
||||
|
||||
Fedora
|
||||
------
|
||||
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||
On OSX, if you encounter clang: error: unknown argument: '-mno-fused-madd' you
|
||||
will need to export CFLAGS=-Qunused-arguments.
|
||||
|
||||
ArchLinux
|
||||
---------
|
||||
|
||||
The quickest way to get up and running with ArchLinux is probably with Ivan
|
||||
Shapovalov's AUR package from
|
||||
https://aur.archlinux.org/packages/matrix-synapse/, which should pull in all
|
||||
the necessary dependencies.
|
||||
|
||||
Alternatively, to install using pip a few changes may be needed as ArchLinux
|
||||
defaults to python 3, but synapse currently assumes python 2.7 by default:
|
||||
Installation on ArchLinux may encounter a few hiccups as Arch defaults to
|
||||
python 3, but synapse currently assumes python 2.7 by default.
|
||||
|
||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 )::
|
||||
|
||||
sudo pip2.7 install --upgrade pip
|
||||
|
||||
$ sudo pip2.7 install --upgrade pip
|
||||
|
||||
You also may need to explicitly specify python 2.7 again during the install
|
||||
request::
|
||||
|
||||
pip2.7 install https://github.com/matrix-org/synapse/tarball/master
|
||||
|
||||
$ pip2.7 install --process-dependency-links \
|
||||
https://github.com/matrix-org/synapse/tarball/master
|
||||
|
||||
If you encounter an error with lib bcrypt causing an Wrong ELF Class:
|
||||
ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
|
||||
compile it under the right architecture. (This should not be needed if
|
||||
installing under virtualenv)::
|
||||
|
||||
sudo pip2.7 uninstall py-bcrypt
|
||||
sudo pip2.7 install py-bcrypt
|
||||
$ sudo pip2.7 uninstall py-bcrypt
|
||||
$ sudo pip2.7 install py-bcrypt
|
||||
|
||||
During setup of homeserver you need to call python2.7 directly again::
|
||||
|
||||
During setup of Synapse you need to call python2.7 directly again::
|
||||
|
||||
cd ~/.synapse
|
||||
python2.7 -m synapse.app.homeserver \
|
||||
$ cd ~/.synapse
|
||||
$ python2.7 -m synapse.app.homeserver \
|
||||
--server-name machine.my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config
|
||||
|
||||
|
||||
...substituting your host and domain name as appropriate.
|
||||
|
||||
FreeBSD
|
||||
-------
|
||||
|
||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||
|
||||
- Ports: ``cd /usr/ports/net/py-matrix-synapse && make install clean``
|
||||
- Packages: ``pkg install py27-matrix-synapse``
|
||||
|
||||
NixOS
|
||||
-----
|
||||
|
||||
Robin Lambertz has packaged Synapse for NixOS at:
|
||||
https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix
|
||||
|
||||
Windows Install
|
||||
---------------
|
||||
Synapse can be installed on Cygwin. It requires the following Cygwin packages:
|
||||
|
||||
- gcc
|
||||
- git
|
||||
- libffi-devel
|
||||
- openssl (and openssl-devel, python-openssl)
|
||||
- python
|
||||
- python-setuptools
|
||||
- gcc
|
||||
- git
|
||||
- libffi-devel
|
||||
- openssl (and openssl-devel, python-openssl)
|
||||
- python
|
||||
- python-setuptools
|
||||
|
||||
The content repository requires additional packages and will be unable to process
|
||||
uploads without them:
|
||||
|
||||
- libjpeg8
|
||||
- libjpeg8-devel
|
||||
- zlib
|
||||
|
||||
- libjpeg8
|
||||
- libjpeg8-devel
|
||||
- zlib
|
||||
If you choose to install Synapse without these packages, you will need to reinstall
|
||||
``pillow`` for changes to be applied, e.g. ``pip uninstall pillow`` ``pip install
|
||||
pillow --user``
|
||||
@@ -322,55 +219,21 @@ Troubleshooting:
|
||||
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
|
||||
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
|
||||
|
||||
Troubleshooting
|
||||
===============
|
||||
Running Your Homeserver
|
||||
=======================
|
||||
|
||||
Troubleshooting Installation
|
||||
----------------------------
|
||||
To actually run your new homeserver, pick a working directory for Synapse to run
|
||||
(e.g. ``~/.synapse``), and::
|
||||
|
||||
Synapse requires pip 1.7 or later, so if your OS provides too old a version you
|
||||
may need to manually upgrade it::
|
||||
|
||||
sudo pip install --upgrade pip
|
||||
|
||||
Installing may fail with ``Could not find any downloads that satisfy the requirement pymacaroons-pynacl (from matrix-synapse==0.12.0)``.
|
||||
You can fix this by manually upgrading pip and virtualenv::
|
||||
|
||||
sudo pip install --upgrade virtualenv
|
||||
|
||||
You can next rerun ``virtualenv -p python2.7 synapse`` to update the virtual env.
|
||||
|
||||
Installing may fail during installing virtualenv with ``InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.``
|
||||
You can fix this by manually installing ndg-httpsclient::
|
||||
|
||||
pip install --upgrade ndg-httpsclient
|
||||
|
||||
Installing may fail with ``mock requires setuptools>=17.1. Aborting installation``.
|
||||
You can fix this by upgrading setuptools::
|
||||
|
||||
pip install --upgrade setuptools
|
||||
|
||||
If pip crashes mid-installation for reason (e.g. lost terminal), pip may
|
||||
refuse to run until you remove the temporary installation directory it
|
||||
created. To reset the installation::
|
||||
|
||||
rm -rf /tmp/pip_install_matrix
|
||||
|
||||
pip seems to leak *lots* of memory during installation. For instance, a Linux
|
||||
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
|
||||
happens, you will have to individually install the dependencies which are
|
||||
failing, e.g.::
|
||||
|
||||
pip install twisted
|
||||
|
||||
On OS X, if you encounter clang: error: unknown argument: '-mno-fused-madd' you
|
||||
will need to export CFLAGS=-Qunused-arguments.
|
||||
$ cd ~/.synapse
|
||||
$ source ./bin/activate
|
||||
$ synctl start
|
||||
|
||||
Troubleshooting Running
|
||||
-----------------------
|
||||
|
||||
If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
|
||||
to manually upgrade PyNaCL, as synapse uses NaCl (http://nacl.cr.yp.to/) for
|
||||
If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
|
||||
to manually upgrade PyNaCL, as synapse uses NaCl (http://nacl.cr.yp.to/) for
|
||||
encryption and digital signatures.
|
||||
Unfortunately PyNACL currently has a few issues
|
||||
(https://github.com/pyca/pynacl/issues/53) and
|
||||
@@ -379,46 +242,44 @@ correctly, causing all tests to fail with errors about missing "sodium.h". To
|
||||
fix try re-installing from PyPI or directly from
|
||||
(https://github.com/pyca/pynacl)::
|
||||
|
||||
# Install from PyPI
|
||||
pip install --user --upgrade --force pynacl
|
||||
|
||||
# Install from github
|
||||
pip install --user https://github.com/pyca/pynacl/tarball/master
|
||||
$ # Install from PyPI
|
||||
$ pip install --user --upgrade --force pynacl
|
||||
$ # Install from github
|
||||
$ pip install --user https://github.com/pyca/pynacl/tarball/master
|
||||
|
||||
ArchLinux
|
||||
~~~~~~~~~
|
||||
---------
|
||||
|
||||
If running `$ synctl start` fails with 'returned non-zero exit status 1',
|
||||
you will need to explicitly call Python2.7 - either running as::
|
||||
|
||||
python2.7 -m synapse.app.homeserver --daemonize -c homeserver.yaml
|
||||
If running `$ synctl start` fails wit 'returned non-zero exit status 1', you will need to explicitly call Python2.7 - either running as::
|
||||
|
||||
$ python2.7 -m synapse.app.homeserver --daemonize -c homeserver.yaml --pid-file homeserver.pid
|
||||
|
||||
...or by editing synctl with the correct python executable.
|
||||
|
||||
Synapse Development
|
||||
===================
|
||||
Homeserver Development
|
||||
======================
|
||||
|
||||
To check out a synapse for development, clone the git repo into a working
|
||||
To check out a homeserver for development, clone the git repo into a working
|
||||
directory of your choice::
|
||||
|
||||
git clone https://github.com/matrix-org/synapse.git
|
||||
cd synapse
|
||||
$ git clone https://github.com/matrix-org/synapse.git
|
||||
$ cd synapse
|
||||
|
||||
Synapse has a number of external dependencies, that are easiest
|
||||
The homeserver has a number of external dependencies, that are easiest
|
||||
to install using pip and a virtualenv::
|
||||
|
||||
virtualenv env
|
||||
source env/bin/activate
|
||||
python synapse/python_dependencies.py | xargs -n1 pip install
|
||||
pip install setuptools_trial mock
|
||||
$ virtualenv env
|
||||
$ source env/bin/activate
|
||||
$ python synapse/python_dependencies.py | xargs -n1 pip install
|
||||
$ pip install setuptools_trial mock
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
Once this is done, you may wish to run Synapse's unit tests, to
|
||||
Once this is done, you may wish to run the homeserver's unit tests, to
|
||||
check that everything is installed as it should be::
|
||||
|
||||
python setup.py test
|
||||
$ python setup.py test
|
||||
|
||||
This should end with a 'PASSED' result::
|
||||
|
||||
@@ -427,14 +288,17 @@ This should end with a 'PASSED' result::
|
||||
PASSED (successes=143)
|
||||
|
||||
|
||||
Upgrading an existing Synapse
|
||||
=============================
|
||||
Upgrading an existing homeserver
|
||||
================================
|
||||
|
||||
The instructions for upgrading synapse are in `UPGRADE.rst`_.
|
||||
Please check these instructions as upgrading may require extra steps for some
|
||||
versions of synapse.
|
||||
IMPORTANT: Before upgrading an existing homeserver to a new version, please
|
||||
refer to UPGRADE.rst for any additional instructions.
|
||||
|
||||
Otherwise, simply re-install the new codebase over the current one - e.g.
|
||||
by ``pip install --process-dependency-links
|
||||
https://github.com/matrix-org/synapse/tarball/master``
|
||||
if using pip, or by ``git pull`` if running off a git working copy.
|
||||
|
||||
.. _UPGRADE.rst: UPGRADE.rst
|
||||
|
||||
Setting up Federation
|
||||
=====================
|
||||
@@ -445,7 +309,7 @@ You have two choices here, which will influence the form of your Matrix user
|
||||
IDs:
|
||||
|
||||
1) Use the machine's own hostname as available on public DNS in the form of
|
||||
its A records. This is easier to set up initially, perhaps for
|
||||
its A or AAAA records. This is easier to set up initially, perhaps for
|
||||
testing, but lacks the flexibility of SRV.
|
||||
|
||||
2) Set up a SRV record for your domain name. This requires you create a SRV
|
||||
@@ -456,11 +320,11 @@ IDs:
|
||||
For the first form, simply pass the required hostname (of the machine) as the
|
||||
--server-name parameter::
|
||||
|
||||
python -m synapse.app.homeserver \
|
||||
$ python -m synapse.app.homeserver \
|
||||
--server-name machine.my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config
|
||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
$ python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
|
||||
Alternatively, you can run ``synctl start`` to guide you through the process.
|
||||
|
||||
@@ -470,37 +334,38 @@ and port where the server is running. (At the current time synapse does not
|
||||
support clustering multiple servers into a single logical homeserver). The DNS
|
||||
record would then look something like::
|
||||
|
||||
$ dig -t srv _matrix._tcp.machine.my.domain.name
|
||||
$ dig -t srv _matrix._tcp.machine.my.domaine.name
|
||||
_matrix._tcp IN SRV 10 0 8448 machine.my.domain.name.
|
||||
|
||||
|
||||
At this point, you should then run the homeserver with the hostname of this
|
||||
SRV record, as that is the name other machines will expect it to have::
|
||||
|
||||
python -m synapse.app.homeserver \
|
||||
$ python -m synapse.app.homeserver \
|
||||
--server-name YOURDOMAIN \
|
||||
--bind-port 8448 \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config
|
||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
$ python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
|
||||
|
||||
If you've already generated the config file, you need to edit the "server_name"
|
||||
in you ```homeserver.yaml``` file. If you've already started Synapse and a
|
||||
database has been created, you will have to recreate the database.
|
||||
|
||||
You may additionally want to pass one or more "-v" options, in order to
|
||||
increase the verbosity of logging output; at least for initial testing.
|
||||
|
||||
Running a Demo Federation of Synapses
|
||||
-------------------------------------
|
||||
For the initial alpha release, the homeserver is not speaking TLS for
|
||||
either client-server or server-server traffic for ease of debugging. We have
|
||||
also not spent any time yet getting the homeserver to run behind loadbalancers.
|
||||
|
||||
Running a Demo Federation of Homeservers
|
||||
----------------------------------------
|
||||
|
||||
If you want to get up and running quickly with a trio of homeservers in a
|
||||
private federation (``localhost:8080``, ``localhost:8081`` and
|
||||
``localhost:8082``) which you can then access through the webclient running at
|
||||
http://localhost:8080. Simply run::
|
||||
|
||||
demo/start.sh
|
||||
|
||||
$ demo/start.sh
|
||||
|
||||
This is mainly useful just for development purposes.
|
||||
|
||||
Running The Demo Web Client
|
||||
@@ -527,10 +392,7 @@ account. Your name will take the form of::
|
||||
Specify your desired localpart in the topmost box of the "Register for an
|
||||
account" form, and click the "Register" button. Hostnames can contain ports if
|
||||
required due to lack of SRV records (e.g. @matthew:localhost:8448 on an
|
||||
internal synapse sandbox running on localhost).
|
||||
|
||||
If registration fails, you may need to enable it in the homeserver (see
|
||||
`Synapse Installation`_ above)
|
||||
internal synapse sandbox running on localhost)
|
||||
|
||||
|
||||
Logging In To An Existing Account
|
||||
@@ -539,6 +401,7 @@ Logging In To An Existing Account
|
||||
Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into
|
||||
the form and click the Login button.
|
||||
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
|
||||
@@ -555,51 +418,14 @@ track 3PID logins and publish end-user public keys.
|
||||
|
||||
It's currently early days for identity servers as Matrix is not yet using 3PIDs
|
||||
as the primary means of identity and E2E encryption is not complete. As such,
|
||||
we are running a single identity server (https://matrix.org) at the current
|
||||
we are running a single identity server (http://matrix.org:8090) at the current
|
||||
time.
|
||||
|
||||
|
||||
URL Previews
|
||||
============
|
||||
|
||||
Synapse 0.15.0 introduces an experimental new API for previewing URLs at
|
||||
/_matrix/media/r0/preview_url. This is disabled by default. To turn it on
|
||||
you must enable the `url_preview_enabled: True` config parameter and explicitly
|
||||
specify the IP ranges that Synapse is not allowed to spider for previewing in
|
||||
the `url_preview_ip_range_blacklist` configuration parameter. This is critical
|
||||
from a security perspective to stop arbitrary Matrix users spidering 'internal'
|
||||
URLs on your network. At the very least we recommend that your loopback and
|
||||
RFC1918 IP addresses are blacklisted.
|
||||
|
||||
This also requires the optional lxml and netaddr python dependencies to be
|
||||
installed.
|
||||
|
||||
|
||||
Password reset
|
||||
==============
|
||||
|
||||
If a user has registered an email address to their account using an identity
|
||||
server, they can request a password-reset token via clients such as Vector.
|
||||
|
||||
A manual password reset can be done via direct database access as follows.
|
||||
|
||||
First calculate the hash of the new password:
|
||||
|
||||
$ source ~/.synapse/bin/activate
|
||||
$ ./scripts/hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
||||
Then update the `users` table in the database:
|
||||
|
||||
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
||||
WHERE name='@test:test.com';
|
||||
|
||||
Where's the spec?!
|
||||
==================
|
||||
|
||||
The source of the matrix spec lives at https://github.com/matrix-org/matrix-doc.
|
||||
The source of the matrix spec lives at https://github.com/matrix-org/matrix-doc.
|
||||
A recent HTML snapshot of this lives at http://matrix.org/docs/spec
|
||||
|
||||
|
||||
@@ -609,27 +435,10 @@ Building Internal API Documentation
|
||||
Before building internal API documentation install sphinx and
|
||||
sphinxcontrib-napoleon::
|
||||
|
||||
pip install sphinx
|
||||
pip install sphinxcontrib-napoleon
|
||||
$ pip install sphinx
|
||||
$ pip install sphinxcontrib-napoleon
|
||||
|
||||
Building internal API documentation::
|
||||
|
||||
python setup.py build_sphinx
|
||||
|
||||
|
||||
|
||||
Help!! Synapse eats all my RAM!
|
||||
===============================
|
||||
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. Roughly speaking, a SYNAPSE_CACHE_FACTOR of 1.0 will max out
|
||||
at around 3-4GB of resident memory - this is what we currently run the
|
||||
matrix.org on. The default setting is currently 0.1, which is probably
|
||||
around a ~700MB footprint. You can dial it down further to 0.02 if
|
||||
desired, which targets roughly ~512MB. Conversely you can dial it up if
|
||||
you need performance for lots of users and have a box with a lot of RAM.
|
||||
$ python setup.py build_sphinx
|
||||
|
||||
|
||||
88
UPGRADE.rst
88
UPGRADE.rst
@@ -1,91 +1,3 @@
|
||||
Upgrading Synapse
|
||||
=================
|
||||
|
||||
Before upgrading check if any special steps are required to upgrade from the
|
||||
what you currently have installed to current version of synapse. The extra
|
||||
instructions that may be required are listed later in this document.
|
||||
|
||||
If synapse was installed in a virtualenv then active that virtualenv before
|
||||
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then run:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
source ~/.synapse/bin/activate
|
||||
|
||||
If synapse was installed using pip then upgrade to the latest version by
|
||||
running:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
||||
|
||||
If synapse was installed using git then upgrade to the latest version by
|
||||
running:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
# Pull the latest version of the master branch.
|
||||
git pull
|
||||
# Update the versions of synapse's python dependencies.
|
||||
python synapse/python_dependencies.py | xargs -n1 pip install
|
||||
|
||||
|
||||
Upgrading to v0.15.0
|
||||
====================
|
||||
|
||||
If you want to use the new URL previewing API (/_matrix/media/r0/preview_url)
|
||||
then you have to explicitly enable it in the config and update your dependencies
|
||||
dependencies. See README.rst for details.
|
||||
|
||||
|
||||
Upgrading to v0.11.0
|
||||
====================
|
||||
|
||||
This release includes the option to send anonymous usage stats to matrix.org,
|
||||
and requires that administrators explictly opt in or out by setting the
|
||||
``report_stats`` option to either ``true`` or ``false``.
|
||||
|
||||
We would really appreciate it if you could help our project out by reporting
|
||||
anonymized usage statistics from your homeserver. Only very basic aggregate
|
||||
data (e.g. number of users) will be reported, but it helps us to track the
|
||||
growth of the Matrix community, and helps us to make Matrix a success, as well
|
||||
as to convince other networks that they should peer with us.
|
||||
|
||||
|
||||
Upgrading to v0.9.0
|
||||
===================
|
||||
|
||||
Application services have had a breaking API change in this version.
|
||||
|
||||
They can no longer register themselves with a home server using the AS HTTP API. This
|
||||
decision was made because a compromised application service with free reign to register
|
||||
any regex in effect grants full read/write access to the home server if a regex of ``.*``
|
||||
is used. An attack where a compromised AS re-registers itself with ``.*`` was deemed too
|
||||
big of a security risk to ignore, and so the ability to register with the HS remotely has
|
||||
been removed.
|
||||
|
||||
It has been replaced by specifying a list of application service registrations in
|
||||
``homeserver.yaml``::
|
||||
|
||||
app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
|
||||
|
||||
Where ``registration-01.yaml`` looks like::
|
||||
|
||||
url: <String> # e.g. "https://my.application.service.com"
|
||||
as_token: <String>
|
||||
hs_token: <String>
|
||||
sender_localpart: <String> # This is a new field which denotes the user_id localpart when using the AS token
|
||||
namespaces:
|
||||
users:
|
||||
- exclusive: <Boolean>
|
||||
regex: <String> # e.g. "@prefix_.*"
|
||||
aliases:
|
||||
- exclusive: <Boolean>
|
||||
regex: <String>
|
||||
rooms:
|
||||
- exclusive: <Boolean>
|
||||
regex: <String>
|
||||
|
||||
Upgrading to v0.8.0
|
||||
===================
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,151 +0,0 @@
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import pydot
|
||||
import cgi
|
||||
import simplejson as json
|
||||
import datetime
|
||||
import argparse
|
||||
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
|
||||
def make_graph(file_name, room_id, file_prefix, limit):
|
||||
print "Reading lines"
|
||||
with open(file_name) as f:
|
||||
lines = f.readlines()
|
||||
|
||||
print "Read lines"
|
||||
|
||||
events = [FrozenEvent(json.loads(line)) for line in lines]
|
||||
|
||||
print "Loaded events."
|
||||
|
||||
events.sort(key=lambda e: e.depth)
|
||||
|
||||
print "Sorted events"
|
||||
|
||||
if limit:
|
||||
events = events[-int(limit):]
|
||||
|
||||
node_map = {}
|
||||
|
||||
graph = pydot.Dot(graph_name="Test")
|
||||
|
||||
for event in events:
|
||||
t = datetime.datetime.fromtimestamp(
|
||||
float(event.origin_server_ts) / 1000
|
||||
).strftime('%Y-%m-%d %H:%M:%S,%f')
|
||||
|
||||
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
|
||||
content = content.replace("\n", "<br/>\n")
|
||||
|
||||
print content
|
||||
content = []
|
||||
for key, value in unfreeze(event.get_dict()["content"]).items():
|
||||
if value is None:
|
||||
value = "<null>"
|
||||
elif isinstance(value, basestring):
|
||||
pass
|
||||
else:
|
||||
value = json.dumps(value)
|
||||
|
||||
content.append(
|
||||
"<b>%s</b>: %s," % (
|
||||
cgi.escape(key, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
||||
cgi.escape(value, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
||||
)
|
||||
)
|
||||
|
||||
content = "<br/>\n".join(content)
|
||||
|
||||
print content
|
||||
|
||||
label = (
|
||||
"<"
|
||||
"<b>%(name)s </b><br/>"
|
||||
"Type: <b>%(type)s </b><br/>"
|
||||
"State key: <b>%(state_key)s </b><br/>"
|
||||
"Content: <b>%(content)s </b><br/>"
|
||||
"Time: <b>%(time)s </b><br/>"
|
||||
"Depth: <b>%(depth)s </b><br/>"
|
||||
">"
|
||||
) % {
|
||||
"name": event.event_id,
|
||||
"type": event.type,
|
||||
"state_key": event.get("state_key", None),
|
||||
"content": content,
|
||||
"time": t,
|
||||
"depth": event.depth,
|
||||
}
|
||||
|
||||
node = pydot.Node(
|
||||
name=event.event_id,
|
||||
label=label,
|
||||
)
|
||||
|
||||
node_map[event.event_id] = node
|
||||
graph.add_node(node)
|
||||
|
||||
print "Created Nodes"
|
||||
|
||||
for event in events:
|
||||
for prev_id, _ in event.prev_events:
|
||||
try:
|
||||
end_node = node_map[prev_id]
|
||||
except:
|
||||
end_node = pydot.Node(
|
||||
name=prev_id,
|
||||
label="<<b>%s</b>>" % (prev_id,),
|
||||
)
|
||||
|
||||
node_map[prev_id] = end_node
|
||||
graph.add_node(end_node)
|
||||
|
||||
edge = pydot.Edge(node_map[event.event_id], end_node)
|
||||
graph.add_edge(edge)
|
||||
|
||||
print "Created edges"
|
||||
|
||||
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
|
||||
|
||||
print "Created Dot"
|
||||
|
||||
graph.write_svg("%s.svg" % file_prefix, prog='dot')
|
||||
|
||||
print "Created svg"
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate a PDU graph for a given room by reading "
|
||||
"from a file with line deliminated events. \n"
|
||||
"Requires pydot."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--prefix", dest="prefix",
|
||||
help="String to prefix output files with",
|
||||
default="graph_output"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l", "--limit",
|
||||
help="Only retrieve the last N events.",
|
||||
)
|
||||
parser.add_argument('event_file')
|
||||
parser.add_argument('room')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
make_graph(args.event_file, args.room, args.prefix, args.limit)
|
||||
@@ -1,93 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
from argparse import ArgumentParser
|
||||
import json
|
||||
import requests
|
||||
import sys
|
||||
import urllib
|
||||
|
||||
def _mkurl(template, kws):
|
||||
for key in kws:
|
||||
template = template.replace(key, kws[key])
|
||||
return template
|
||||
|
||||
def main(hs, room_id, access_token, user_id_prefix, why):
|
||||
if not why:
|
||||
why = "Automated kick."
|
||||
print "Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)
|
||||
room_state_url = _mkurl(
|
||||
"$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN",
|
||||
{
|
||||
"$HS": hs,
|
||||
"$ROOM": room_id,
|
||||
"$TOKEN": access_token
|
||||
}
|
||||
)
|
||||
print "Getting room state => %s" % room_state_url
|
||||
res = requests.get(room_state_url)
|
||||
print "HTTP %s" % res.status_code
|
||||
state_events = res.json()
|
||||
if "error" in state_events:
|
||||
print "FATAL"
|
||||
print state_events
|
||||
return
|
||||
|
||||
kick_list = []
|
||||
room_name = room_id
|
||||
for event in state_events:
|
||||
if not event["type"] == "m.room.member":
|
||||
if event["type"] == "m.room.name":
|
||||
room_name = event["content"].get("name")
|
||||
continue
|
||||
if not event["content"].get("membership") == "join":
|
||||
continue
|
||||
if event["state_key"].startswith(user_id_prefix):
|
||||
kick_list.append(event["state_key"])
|
||||
|
||||
if len(kick_list) == 0:
|
||||
print "No user IDs match the prefix '%s'" % user_id_prefix
|
||||
return
|
||||
|
||||
print "The following user IDs will be kicked from %s" % room_name
|
||||
for uid in kick_list:
|
||||
print uid
|
||||
doit = raw_input("Continue? [Y]es\n")
|
||||
if len(doit) > 0 and doit.lower() == 'y':
|
||||
print "Kicking members..."
|
||||
# encode them all
|
||||
kick_list = [urllib.quote(uid) for uid in kick_list]
|
||||
for uid in kick_list:
|
||||
kick_url = _mkurl(
|
||||
"$HS/_matrix/client/api/v1/rooms/$ROOM/state/m.room.member/$UID?access_token=$TOKEN",
|
||||
{
|
||||
"$HS": hs,
|
||||
"$UID": uid,
|
||||
"$ROOM": room_id,
|
||||
"$TOKEN": access_token
|
||||
}
|
||||
)
|
||||
kick_body = {
|
||||
"membership": "leave",
|
||||
"reason": why
|
||||
}
|
||||
print "Kicking %s" % uid
|
||||
res = requests.put(kick_url, data=json.dumps(kick_body))
|
||||
if res.status_code != 200:
|
||||
print "ERROR: HTTP %s" % res.status_code
|
||||
if res.json().get("error"):
|
||||
print "ERROR: JSON %s" % res.json()
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")
|
||||
parser.add_argument("-u","--user-id",help="The user ID prefix e.g. '@irc_'")
|
||||
parser.add_argument("-t","--token",help="Your access_token")
|
||||
parser.add_argument("-r","--room",help="The room ID to kick members in")
|
||||
parser.add_argument("-s","--homeserver",help="The base HS url e.g. http://matrix.org")
|
||||
parser.add_argument("-w","--why",help="Reason for the kick. Optional.")
|
||||
args = parser.parse_args()
|
||||
if not args.room or not args.token or not args.user_id or not args.homeserver:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
else:
|
||||
main(args.homeserver, args.room, args.token, args.user_id, args.why)
|
||||
@@ -1,25 +0,0 @@
|
||||
version: 1
|
||||
|
||||
# In systemd's journal, loglevel is implicitly stored, so let's omit it
|
||||
# from the message text.
|
||||
formatters:
|
||||
journal_fmt:
|
||||
format: '%(name)s: [%(request)s] %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
journal:
|
||||
class: systemd.journal.JournalHandler
|
||||
formatter: journal_fmt
|
||||
filters: [context]
|
||||
SYSLOG_IDENTIFIER: synapse
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [journal]
|
||||
|
||||
disable_existing_loggers: False
|
||||
@@ -1,17 +0,0 @@
|
||||
# This assumes that Synapse has been installed as a system package
|
||||
# (e.g. https://aur.archlinux.org/packages/matrix-synapse/ for ArchLinux)
|
||||
# rather than in a user home directory or similar under virtualenv.
|
||||
|
||||
[Unit]
|
||||
Description=Synapse Matrix homeserver
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=synapse
|
||||
Group=synapse
|
||||
EnvironmentFile=-/etc/sysconfig/synapse
|
||||
WorkingDirectory=/var/lib/synapse
|
||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -126,26 +126,12 @@ sub on_unknown_event
|
||||
if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
||||
$bridgestate->{$room_id}->{gathered_candidates} = 1;
|
||||
my $offer = $bridgestate->{$room_id}->{offer};
|
||||
my $candidate_block = {
|
||||
audio => '',
|
||||
video => '',
|
||||
};
|
||||
my $candidate_block = "";
|
||||
foreach (@{$event->{content}->{candidates}}) {
|
||||
if ($_->{sdpMid}) {
|
||||
$candidate_block->{$_->{sdpMid}} .= "a=" . $_->{candidate} . "\r\n";
|
||||
}
|
||||
else {
|
||||
$candidate_block->{audio} .= "a=" . $_->{candidate} . "\r\n";
|
||||
$candidate_block->{video} .= "a=" . $_->{candidate} . "\r\n";
|
||||
}
|
||||
$candidate_block .= "a=" . $_->{candidate} . "\r\n";
|
||||
}
|
||||
|
||||
# XXX: assumes audio comes first
|
||||
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{audio}/;
|
||||
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{video}/;
|
||||
|
||||
$offer =~ s/(m=video)/$candidate_block->{audio}$1/;
|
||||
$offer =~ s/(.$)/$1\n$candidate_block->{video}$1/;
|
||||
# XXX: collate using the right m= line - for now assume audio call
|
||||
$offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/;
|
||||
|
||||
my $f = send_verto_json_request("verto.invite", {
|
||||
"sdp" => $offer,
|
||||
@@ -186,18 +172,23 @@ sub on_room_message
|
||||
warn "[Matrix] in $room_id: $from: " . $content->{body} . "\n";
|
||||
}
|
||||
|
||||
my $verto_connecting = $loop->new_future;
|
||||
$bot_verto->connect(
|
||||
%{ $CONFIG{"verto-bot"} },
|
||||
on_connected => sub {
|
||||
warn("[Verto] connected to websocket");
|
||||
$verto_connecting->done($bot_verto) if not $verto_connecting->is_done;
|
||||
},
|
||||
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||
);
|
||||
|
||||
Future->needs_all(
|
||||
$bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {
|
||||
$bot_matrix->start;
|
||||
}),
|
||||
|
||||
$bot_verto->connect(
|
||||
%{ $CONFIG{"verto-bot"} },
|
||||
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||
)->on_done( sub {
|
||||
warn("[Verto] connected to websocket");
|
||||
}),
|
||||
$verto_connecting,
|
||||
)->get;
|
||||
|
||||
$loop->attach_signal(
|
||||
|
||||
@@ -86,7 +86,7 @@ sub create_virtual_user
|
||||
"user": "$localpart"
|
||||
}
|
||||
EOT
|
||||
)->get;
|
||||
)->get;
|
||||
warn $response->as_string if ($response->code != 200);
|
||||
}
|
||||
|
||||
@@ -266,21 +266,17 @@ my $as_url = $CONFIG{"matrix-bot"}->{as_url};
|
||||
|
||||
Future->needs_all(
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new( $CONFIG{"matrix"}->{server}."/_matrix/appservice/v1/register" ),
|
||||
content_type => "application/json",
|
||||
content => <<EOT
|
||||
method => "POST",
|
||||
uri => URI->new( $CONFIG{"matrix"}->{server}."/_matrix/appservice/v1/register" ),
|
||||
content_type => "application/json",
|
||||
content => <<EOT
|
||||
{
|
||||
"as_token": "$as_token",
|
||||
"url": "$as_url",
|
||||
"namespaces": { "users": [ { "regex": "\@\\\\+.*", "exclusive": false } ] }
|
||||
"namespaces": { "users": ["\@\\\\+.*"] }
|
||||
}
|
||||
EOT
|
||||
)->then( sub{
|
||||
my ($response) = (@_);
|
||||
warn $response->as_string if ($response->code != 200);
|
||||
return Future->done;
|
||||
}),
|
||||
),
|
||||
$verto_connecting,
|
||||
)->get;
|
||||
|
||||
|
||||
@@ -7,9 +7,6 @@ matrix:
|
||||
matrix-bot:
|
||||
user_id: '@vertobot:matrix.org'
|
||||
password: ''
|
||||
domain: 'matrix.org"
|
||||
as_url: 'http://localhost:8009'
|
||||
as_token: 'vertobot123'
|
||||
|
||||
verto-bot:
|
||||
host: webrtc.freeswitch.org
|
||||
|
||||
@@ -11,4 +11,7 @@ requires 'YAML', 0;
|
||||
requires 'JSON', 0;
|
||||
requires 'Getopt::Long', 0;
|
||||
|
||||
on 'test' => sub {
|
||||
requires 'Test::More', '>= 0.98';
|
||||
};
|
||||
|
||||
|
||||
@@ -11,9 +11,7 @@ if [ -f $PID_FILE ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for port in 8080 8081 8082; do
|
||||
rm -rf $DIR/$port
|
||||
rm -rf $DIR/media_store.$port
|
||||
done
|
||||
find "$DIR" -name "*.log" -delete
|
||||
find "$DIR" -name "*.db" -delete
|
||||
|
||||
rm -rf $DIR/etc
|
||||
|
||||
@@ -8,49 +8,37 @@ cd "$DIR/.."
|
||||
|
||||
mkdir -p demo/etc
|
||||
|
||||
export PYTHONPATH=$(readlink -f $(pwd))
|
||||
|
||||
|
||||
echo $PYTHONPATH
|
||||
# Check the --no-rate-limit param
|
||||
PARAMS=""
|
||||
if [ $# -eq 1 ]; then
|
||||
if [ $1 = "--no-rate-limit" ]; then
|
||||
PARAMS="--rc-messages-per-second 1000 --rc-message-burst-count 1000"
|
||||
fi
|
||||
fi
|
||||
|
||||
for port in 8080 8081 8082; do
|
||||
echo "Starting server on port $port... "
|
||||
|
||||
https_port=$((port + 400))
|
||||
mkdir -p demo/$port
|
||||
pushd demo/$port
|
||||
|
||||
#rm $DIR/etc/$port.config
|
||||
python -m synapse.app.homeserver \
|
||||
--generate-config \
|
||||
--config-path "demo/etc/$port.config" \
|
||||
-p "$https_port" \
|
||||
--unsecure-port "$port" \
|
||||
-H "localhost:$https_port" \
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
--report-stats no
|
||||
|
||||
# Check script parameters
|
||||
if [ $# -eq 1 ]; then
|
||||
if [ $1 = "--no-rate-limit" ]; then
|
||||
# Set high limits in config file to disable rate limiting
|
||||
perl -p -i -e 's/rc_messages_per_second.*/rc_messages_per_second: 1000/g' $DIR/etc/$port.config
|
||||
perl -p -i -e 's/rc_message_burst_count.*/rc_message_burst_count: 1000/g' $DIR/etc/$port.config
|
||||
fi
|
||||
fi
|
||||
|
||||
perl -p -i -e 's/^enable_registration:.*/enable_registration: true/g' $DIR/etc/$port.config
|
||||
|
||||
if ! grep -F "full_twisted_stacktraces" -q $DIR/etc/$port.config; then
|
||||
echo "full_twisted_stacktraces: true" >> $DIR/etc/$port.config
|
||||
fi
|
||||
if ! grep -F "report_stats" -q $DIR/etc/$port.config ; then
|
||||
echo "report_stats: false" >> $DIR/etc/$port.config
|
||||
fi
|
||||
-f "$DIR/$port.log" \
|
||||
-d "$DIR/$port.db" \
|
||||
-D --pid-file "$DIR/$port.pid" \
|
||||
--manhole $((port + 1000)) \
|
||||
--tls-dh-params-path "demo/demo.tls.dh" \
|
||||
--media-store-path "demo/media_store.$port" \
|
||||
$PARAMS $SYNAPSE_PARAMS \
|
||||
|
||||
python -m synapse.app.homeserver \
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
-D \
|
||||
--config-path "demo/etc/$port.config" \
|
||||
-vv \
|
||||
|
||||
popd
|
||||
done
|
||||
|
||||
cd "$CWD"
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
Captcha can be enabled for this home server. This file explains how to do that.
|
||||
The captcha mechanism used is Google's ReCaptcha. This requires API keys from Google.
|
||||
|
||||
Getting keys
|
||||
------------
|
||||
Requires a public/private key pair from:
|
||||
|
||||
https://developers.google.com/recaptcha/
|
||||
|
||||
|
||||
Setting ReCaptcha Keys
|
||||
----------------------
|
||||
The keys are a config option on the home server config. If they are not
|
||||
visible, you can generate them via --generate-config. Set the following value:
|
||||
|
||||
recaptcha_public_key: YOUR_PUBLIC_KEY
|
||||
recaptcha_private_key: YOUR_PRIVATE_KEY
|
||||
|
||||
In addition, you MUST enable captchas via:
|
||||
|
||||
enable_registration_captcha: true
|
||||
|
||||
Configuring IP used for auth
|
||||
----------------------------
|
||||
The ReCaptcha API requires that the IP address of the user who solved the
|
||||
captcha is sent. If the client is connecting through a proxy or load balancer,
|
||||
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
||||
IP address. This can be configured as an option on the home server like so:
|
||||
|
||||
captcha_ip_origin_is_x_forwarded: true
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
Registering an Application Service
|
||||
==================================
|
||||
|
||||
The registration of new application services depends on the homeserver used.
|
||||
In synapse, you need to create a new configuration file for your AS and add it
|
||||
to the list specified under the ``app_service_config_files`` config
|
||||
option in your synapse config.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
app_service_config_files:
|
||||
- /home/matrix/.synapse/<your-AS>.yaml
|
||||
|
||||
|
||||
The format of the AS configuration file is as follows:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
url: <base url of AS>
|
||||
as_token: <token AS will add to requests to HS>
|
||||
hs_token: <token HS will add to requests to AS>
|
||||
sender_localpart: <localpart of AS user>
|
||||
namespaces:
|
||||
users: # List of users we're interested in
|
||||
- exclusive: <bool>
|
||||
regex: <regex>
|
||||
- ...
|
||||
aliases: [] # List of aliases we're interested in
|
||||
rooms: [] # List of room ids we're interested in
|
||||
|
||||
See the spec_ for further details on how application services work.
|
||||
|
||||
.. _spec: https://matrix.org/docs/spec/application_service/unstable.html
|
||||
@@ -43,10 +43,7 @@ Basically, PEP8
|
||||
together, or want to deliberately extend or preserve vertical/horizontal
|
||||
space)
|
||||
|
||||
Comments should follow the `google code style <http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
||||
This is so that we can generate documentation with
|
||||
`sphinx <http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
||||
`examples <http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
||||
in the sphinx documentation.
|
||||
Comments should follow the google code style. This is so that we can generate
|
||||
documentation with sphinx (http://sphinxcontrib-napoleon.readthedocs.org/en/latest/)
|
||||
|
||||
Code should pass pep8 --max-line-length=100 without any warnings.
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
What do I do about "Unexpected logging context" debug log-lines everywhere?
|
||||
|
||||
<Mjark> The logging context lives in thread local storage
|
||||
<Mjark> Sometimes it gets out of sync with what it should actually be, usually because something scheduled something to run on the reactor without preserving the logging context.
|
||||
<Matthew> what is the impact of it getting out of sync? and how and when should we preserve log context?
|
||||
<Mjark> The impact is that some of the CPU and database metrics will be under-reported, and some log lines will be mis-attributed.
|
||||
<Mjark> It should happen auto-magically in all the APIs that do IO or otherwise defer to the reactor.
|
||||
<Erik> Mjark: the other place is if we branch, e.g. using defer.gatherResults
|
||||
|
||||
Unanswered: how and when should we preserve log context?
|
||||
@@ -1,50 +0,0 @@
|
||||
How to monitor Synapse metrics using Prometheus
|
||||
===============================================
|
||||
|
||||
1: Install prometheus:
|
||||
Follow instructions at http://prometheus.io/docs/introduction/install/
|
||||
|
||||
2: Enable synapse metrics:
|
||||
Simply setting a (local) port number will enable it. Pick a port.
|
||||
prometheus itself defaults to 9090, so starting just above that for
|
||||
locally monitored services seems reasonable. E.g. 9092:
|
||||
|
||||
Add to homeserver.yaml
|
||||
|
||||
metrics_port: 9092
|
||||
|
||||
Restart synapse
|
||||
|
||||
3: Check out synapse-prometheus-config
|
||||
https://github.com/matrix-org/synapse-prometheus-config
|
||||
|
||||
4: Add ``synapse.html`` and ``synapse.rules``
|
||||
The ``.html`` file needs to appear in prometheus's ``consoles`` directory,
|
||||
and the ``.rules`` file needs to be invoked somewhere in the main config
|
||||
file. A symlink to each from the git checkout into the prometheus directory
|
||||
might be easiest to ensure ``git pull`` keeps it updated.
|
||||
|
||||
5: Add a prometheus target for synapse
|
||||
This is easiest if prometheus runs on the same machine as synapse, as it can
|
||||
then just use localhost::
|
||||
|
||||
global: {
|
||||
rule_file: "synapse.rules"
|
||||
}
|
||||
|
||||
job: {
|
||||
name: "synapse"
|
||||
|
||||
target_group: {
|
||||
target: "http://localhost:9092/"
|
||||
}
|
||||
}
|
||||
|
||||
6: Start prometheus::
|
||||
|
||||
./prometheus -config.file=prometheus.conf
|
||||
|
||||
7: Wait a few seconds for it to start and perform the first scrape,
|
||||
then visit the console:
|
||||
|
||||
http://server-where-prometheus-runs:9090/consoles/synapse.html
|
||||
@@ -1,120 +0,0 @@
|
||||
Using Postgres
|
||||
--------------
|
||||
|
||||
Set up database
|
||||
===============
|
||||
|
||||
The PostgreSQL database used *must* have the correct encoding set, otherwise
|
||||
would not be able to store UTF8 strings. To create a database with the correct
|
||||
encoding use, e.g.::
|
||||
|
||||
CREATE DATABASE synapse
|
||||
ENCODING 'UTF8'
|
||||
LC_COLLATE='C'
|
||||
LC_CTYPE='C'
|
||||
template=template0
|
||||
OWNER synapse_user;
|
||||
|
||||
This would create an appropriate database named ``synapse`` owned by the
|
||||
``synapse_user`` user (which must already exist).
|
||||
|
||||
Set up client in Debian/Ubuntu
|
||||
===========================
|
||||
|
||||
Postgres support depends on the postgres python connector ``psycopg2``. In the
|
||||
virtual env::
|
||||
|
||||
sudo apt-get install libpq-dev
|
||||
pip install psycopg2
|
||||
|
||||
Set up client in RHEL/CentOs 7
|
||||
==============================
|
||||
|
||||
Make sure you have the appropriate version of postgres-devel installed. For a
|
||||
postgres 9.4, use the postgres 9.4 packages from
|
||||
[here](https://wiki.postgresql.org/wiki/YUM_Installation).
|
||||
|
||||
As with Debian/Ubuntu, postgres support depends on the postgres python connector
|
||||
``psycopg2``. In the virtual env::
|
||||
|
||||
sudo yum install postgresql-devel libpqxx-devel.x86_64
|
||||
export PATH=/usr/pgsql-9.4/bin/:$PATH
|
||||
pip install psycopg2
|
||||
|
||||
Synapse config
|
||||
==============
|
||||
|
||||
When you are ready to start using PostgreSQL, add the following line to your
|
||||
config file::
|
||||
|
||||
database:
|
||||
name: psycopg2
|
||||
args:
|
||||
user: <user>
|
||||
password: <pass>
|
||||
database: <db>
|
||||
host: <host>
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
|
||||
All key, values in ``args`` are passed to the ``psycopg2.connect(..)``
|
||||
function, except keys beginning with ``cp_``, which are consumed by the twisted
|
||||
adbapi connection pool.
|
||||
|
||||
|
||||
Porting from SQLite
|
||||
===================
|
||||
|
||||
Overview
|
||||
~~~~~~~~
|
||||
|
||||
The script ``synapse_port_db`` allows porting an existing synapse server
|
||||
backed by SQLite to using PostgreSQL. This is done in as a two phase process:
|
||||
|
||||
1. Copy the existing SQLite database to a separate location (while the server
|
||||
is down) and running the port script against that offline database.
|
||||
2. Shut down the server. Rerun the port script to port any data that has come
|
||||
in since taking the first snapshot. Restart server against the PostgreSQL
|
||||
database.
|
||||
|
||||
The port script is designed to be run repeatedly against newer snapshots of the
|
||||
SQLite database file. This makes it safe to repeat step 1 if there was a delay
|
||||
between taking the previous snapshot and being ready to do step 2.
|
||||
|
||||
It is safe to at any time kill the port script and restart it.
|
||||
|
||||
Using the port script
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Firstly, shut down the currently running synapse server and copy its database
|
||||
file (typically ``homeserver.db``) to another location. Once the copy is
|
||||
complete, restart synapse. For instance::
|
||||
|
||||
./synctl stop
|
||||
cp homeserver.db homeserver.db.snapshot
|
||||
./synctl start
|
||||
|
||||
Assuming your new config file (as described in the section *Synapse config*)
|
||||
is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
|
||||
``homeserver.db.snapshot`` then simply run::
|
||||
|
||||
synapse_port_db --sqlite-database homeserver.db.snapshot \
|
||||
--postgres-config homeserver-postgres.yaml
|
||||
|
||||
The flag ``--curses`` displays a coloured curses progress UI.
|
||||
|
||||
If the script took a long time to complete, or time has otherwise passed since
|
||||
the original snapshot was taken, repeat the previous steps with a newer
|
||||
snapshot.
|
||||
|
||||
To complete the conversion shut down the synapse server and run the port
|
||||
script one last time, e.g. if the SQLite database is at ``homeserver.db``
|
||||
run::
|
||||
|
||||
synapse_port_db --sqlite-database homeserver.db \
|
||||
--postgres-config database_config.yaml
|
||||
|
||||
Once that has completed, change the synapse config to point at the PostgreSQL
|
||||
database configuration file using the ``database_config`` parameter (see
|
||||
`Synapse Config`_) and restart synapse. Synapse should now be running against
|
||||
PostgreSQL.
|
||||
@@ -1,58 +0,0 @@
|
||||
Replication Architecture
|
||||
========================
|
||||
|
||||
Motivation
|
||||
----------
|
||||
|
||||
We'd like to be able to split some of the work that synapse does into multiple
|
||||
python processes. In theory multiple synapse processes could share a single
|
||||
postgresql database and we'd scale up by running more synapse processes.
|
||||
However much of synapse assumes that only one process is interacting with the
|
||||
database, both for assigning unique identifiers when inserting into tables,
|
||||
notifying components about new updates, and for invalidating its caches.
|
||||
|
||||
So running multiple copies of the current code isn't an option. One way to
|
||||
run multiple processes would be to have a single writer process and multiple
|
||||
reader processes connected to the same database. In order to do this we'd need
|
||||
a way for the reader process to invalidate its in-memory caches when an update
|
||||
happens on the writer. One way to do this is for the writer to present an
|
||||
append-only log of updates which the readers can consume to invalidate their
|
||||
caches and to push updates to listening clients or pushers.
|
||||
|
||||
Synapse already stores much of its data as an append-only log so that it can
|
||||
correctly respond to /sync requests so the amount of code changes needed to
|
||||
expose the append-only log to the readers should be fairly minimal.
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
The Replication API
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Synapse will optionally expose a long poll HTTP API for extracting updates. The
|
||||
API will have a similar shape to /sync in that clients provide tokens
|
||||
indicating where in the log they have reached and a timeout. The synapse server
|
||||
then either responds with updates immediately if it already has updates or it
|
||||
waits until the timeout for more updates. If the timeout expires and nothing
|
||||
happened then the server returns an empty response.
|
||||
|
||||
However unlike the /sync API this replication API is returning synapse specific
|
||||
data rather than trying to implement a matrix specification. The replication
|
||||
results are returned as arrays of rows where the rows are mostly lifted
|
||||
directly from the database. This avoids unnecessary JSON parsing on the server
|
||||
and hopefully avoids an impedance mismatch between the data returned and the
|
||||
required updates to the datastore.
|
||||
|
||||
This does not replicate all the database tables as many of the database tables
|
||||
are indexes that can be recovered from the contents of other tables.
|
||||
|
||||
The format and parameters for the api are documented in
|
||||
``synapse/replication/resource.py``.
|
||||
|
||||
|
||||
The Slaved DataStore
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are read-only version of the synapse storage layer in
|
||||
``synapse/replication/slave/storage`` that use the response of the replication
|
||||
API to invalidate their caches.
|
||||
@@ -9,35 +9,31 @@ the Home Server to generate credentials that are valid for use on the TURN
|
||||
server through the use of a secret shared between the Home Server and the
|
||||
TURN server.
|
||||
|
||||
This document describes how to install coturn
|
||||
(https://github.com/coturn/coturn) which also supports the TURN REST API,
|
||||
This document described how to install coturn
|
||||
(https://code.google.com/p/coturn/) which also supports the TURN REST API,
|
||||
and integrate it with synapse.
|
||||
|
||||
coturn Setup
|
||||
============
|
||||
|
||||
You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process.
|
||||
|
||||
1. Check out coturn::
|
||||
|
||||
git clone https://github.com/coturn/coturn.git coturn
|
||||
svn checkout http://coturn.googlecode.com/svn/trunk/ coturn
|
||||
cd coturn
|
||||
|
||||
2. Configure it::
|
||||
|
||||
./configure
|
||||
|
||||
You may need to install ``libevent2``: if so, you should do so
|
||||
You may need to install libevent2: if so, you should do so
|
||||
in the way recommended by your operating system.
|
||||
You can ignore warnings about lack of database support: a
|
||||
database is unnecessary for this purpose.
|
||||
|
||||
3. Build and install it::
|
||||
|
||||
make
|
||||
make install
|
||||
|
||||
4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant
|
||||
4. Make a config file in /etc/turnserver.conf. You can customise
|
||||
a config file from turnserver.conf.default. The relevant
|
||||
lines, with example values, are::
|
||||
|
||||
lt-cred-mech
|
||||
@@ -45,7 +41,7 @@ You may be able to setup coturn via your package manager, or set it up manually
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
|
||||
See turnserver.conf for explanations of the options.
|
||||
See turnserver.conf.default for explanations of the options.
|
||||
One way to generate the static-auth-secret is with pwgen::
|
||||
|
||||
pwgen -s 64 1
|
||||
@@ -58,7 +54,6 @@ You may be able to setup coturn via your package manager, or set it up manually
|
||||
import your private key and certificate.
|
||||
|
||||
7. Start the turn server::
|
||||
|
||||
bin/turnserver -o
|
||||
|
||||
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
URL Previews
|
||||
============
|
||||
|
||||
Design notes on a URL previewing service for Matrix:
|
||||
|
||||
Options are:
|
||||
|
||||
1. Have an AS which listens for URLs, downloads them, and inserts an event that describes their metadata.
|
||||
* Pros:
|
||||
* Decouples the implementation entirely from Synapse.
|
||||
* Uses existing Matrix events & content repo to store the metadata.
|
||||
* Cons:
|
||||
* Which AS should provide this service for a room, and why should you trust it?
|
||||
* Doesn't work well with E2E; you'd have to cut the AS into every room
|
||||
* the AS would end up subscribing to every room anyway.
|
||||
|
||||
2. Have a generic preview API (nothing to do with Matrix) that provides a previewing service:
|
||||
* Pros:
|
||||
* Simple and flexible; can be used by any clients at any point
|
||||
* Cons:
|
||||
* If each HS provides one of these independently, all the HSes in a room may needlessly DoS the target URI
|
||||
* We need somewhere to store the URL metadata rather than just using Matrix itself
|
||||
* We can't piggyback on matrix to distribute the metadata between HSes.
|
||||
|
||||
3. Make the synapse of the sending user responsible for spidering the URL and inserting an event asynchronously which describes the metadata.
|
||||
* Pros:
|
||||
* Works transparently for all clients
|
||||
* Piggy-backs nicely on using Matrix for distributing the metadata.
|
||||
* No confusion as to which AS
|
||||
* Cons:
|
||||
* Doesn't work with E2E
|
||||
* We might want to decouple the implementation of the spider from the HS, given spider behaviour can be quite complicated and evolve much more rapidly than the HS. It's more like a bot than a core part of the server.
|
||||
|
||||
4. Make the sending client use the preview API and insert the event itself when successful.
|
||||
* Pros:
|
||||
* Works well with E2E
|
||||
* No custom server functionality
|
||||
* Lets the client customise the preview that they send (like on FB)
|
||||
* Cons:
|
||||
* Entirely specific to the sending client, whereas it'd be nice if /any/ URL was correctly previewed if clients support it.
|
||||
|
||||
5. Have the option of specifying a shared (centralised) previewing service used by a room, to avoid all the different HSes in the room DoSing the target.
|
||||
|
||||
Best solution is probably a combination of both 2 and 4.
|
||||
* Sending clients do their best to create and send a preview at the point of sending the message, perhaps delaying the message until the preview is computed? (This also lets the user validate the preview before sending)
|
||||
* Receiving clients have the option of going and creating their own preview if one doesn't arrive soon enough (or if the original sender didn't create one)
|
||||
|
||||
This is a bit magical though in that the preview could come from two entirely different sources - the sending HS or your local one. However, this can always be exposed to users: "Generate your own URL previews if none are available?"
|
||||
|
||||
This is tantamount also to senders calculating their own thumbnails for sending in advance of the main content - we are trusting the sender not to lie about the content in the thumbnail. Whereas currently thumbnails are calculated by the receiving homeserver to avoid this attack.
|
||||
|
||||
However, this kind of phishing attack does exist whether we let senders pick their thumbnails or not, in that a malicious sender can send normal text messages around the attachment claiming it to be legitimate. We could rely on (future) reputation/abuse management to punish users who phish (be it with bogus metadata or bogus descriptions). Bogus metadata is particularly bad though, especially if it's avoidable.
|
||||
|
||||
As a first cut, let's do #2 and have the receiver hit the API to calculate its own previews (as it does currently for image thumbnails). We can then extend/optimise this to option 4 as a special extra if needed.
|
||||
|
||||
API
|
||||
---
|
||||
|
||||
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
||||
200 OK
|
||||
{
|
||||
"og:type" : "article"
|
||||
"og:url" : "https://twitter.com/matrixdotorg/status/684074366691356672"
|
||||
"og:title" : "Matrix on Twitter"
|
||||
"og:image" : "https://pbs.twimg.com/profile_images/500400952029888512/yI0qtFi7_400x400.png"
|
||||
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
||||
"og:site_name" : "Twitter"
|
||||
}
|
||||
|
||||
* Downloads the URL
|
||||
* If HTML, just stores it in RAM and parses it for OG meta tags
|
||||
* Download any media OG meta tags to the media repo, and refer to them in the OG via mxc:// URIs.
|
||||
* If a media filetype we know we can thumbnail: store it on disk, and hand it to the thumbnailer. Generate OG meta tags from the thumbnailer contents.
|
||||
* Otherwise, don't bother downloading further.
|
||||
@@ -1,87 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
$TOX_BIN/pip install psycopg2
|
||||
$TOX_BIN/pip install lxml
|
||||
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
|
||||
if [[ ! -e .dendron-base ]]; then
|
||||
git clone https://github.com/matrix-org/dendron.git .dendron-base --mirror
|
||||
else
|
||||
(cd .dendron-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf dendron
|
||||
git clone .dendron-base dendron --shared
|
||||
cd dendron
|
||||
|
||||
: ${GOPATH:=${WORKSPACE}/.gopath}
|
||||
if [[ "${GOPATH}" != *:* ]]; then
|
||||
mkdir -p "${GOPATH}"
|
||||
export PATH="${GOPATH}/bin:${PATH}"
|
||||
fi
|
||||
export GOPATH
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
go get github.com/constabulary/gb/...
|
||||
gb generate
|
||||
gb build
|
||||
|
||||
cd ..
|
||||
|
||||
|
||||
if [[ ! -e .sytest-base ]]; then
|
||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||
else
|
||||
(cd .sytest-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf sytest
|
||||
git clone .sytest-base sytest --shared
|
||||
cd sytest
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
: ${PORT_BASE:=8000}
|
||||
: ${PORT_COUNT=20}
|
||||
|
||||
./jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
mkdir -p var
|
||||
|
||||
echo >&2 "Running sytest with PostgreSQL";
|
||||
./jenkins/install_and_run.sh --python $TOX_BIN/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||
--pusher \
|
||||
--synchrotron \
|
||||
--port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1))
|
||||
|
||||
cd ..
|
||||
@@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox -e packaging -e pep8
|
||||
@@ -1,64 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
$TOX_BIN/pip install psycopg2
|
||||
$TOX_BIN/pip install lxml
|
||||
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
|
||||
if [[ ! -e .sytest-base ]]; then
|
||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||
else
|
||||
(cd .sytest-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf sytest
|
||||
git clone .sytest-base sytest --shared
|
||||
cd sytest
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
: ${PORT_BASE:=8000}
|
||||
: ${PORT_COUNT=20}
|
||||
|
||||
./jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
echo >&2 "Running sytest with PostgreSQL";
|
||||
./jenkins/install_and_run.sh --coverage \
|
||||
--python $TOX_BIN/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1)) \
|
||||
|
||||
cd ..
|
||||
cp sytest/.coverage.* .
|
||||
|
||||
# Combine the coverage reports
|
||||
echo "Combining:" .coverage.*
|
||||
$TOX_BIN/python -m coverage combine
|
||||
# Output coverage to coverage.xml
|
||||
$TOX_BIN/coverage xml -o coverage.xml
|
||||
@@ -1,58 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
$TOX_BIN/pip install lxml
|
||||
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
|
||||
if [[ ! -e .sytest-base ]]; then
|
||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||
else
|
||||
(cd .sytest-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf sytest
|
||||
git clone .sytest-base sytest --shared
|
||||
cd sytest
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
: ${PORT_COUNT=20}
|
||||
: ${PORT_BASE:=8000}
|
||||
./jenkins/install_and_run.sh --coverage \
|
||||
--python $TOX_BIN/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1)) \
|
||||
|
||||
cd ..
|
||||
cp sytest/.coverage.* .
|
||||
|
||||
# Combine the coverage reports
|
||||
echo "Combining:" .coverage.*
|
||||
$TOX_BIN/python -m coverage combine
|
||||
# Output coverage to coverage.xml
|
||||
$TOX_BIN/coverage xml -o coverage.xml
|
||||
@@ -1,29 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
|
||||
tox -e py27
|
||||
@@ -1,7 +0,0 @@
|
||||
.header {
|
||||
border-bottom: 4px solid #e4f7ed ! important;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #76CFA6 ! important;
|
||||
}
|
||||
@@ -1,156 +0,0 @@
|
||||
body {
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
pre, code {
|
||||
word-break: break-word;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
#page {
|
||||
font-family: 'Open Sans', Helvetica, Arial, Sans-Serif;
|
||||
font-color: #454545;
|
||||
font-size: 12pt;
|
||||
width: 100%;
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
#inner {
|
||||
width: 640px;
|
||||
}
|
||||
|
||||
.header {
|
||||
width: 100%;
|
||||
height: 87px;
|
||||
color: #454545;
|
||||
border-bottom: 4px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.logo {
|
||||
text-align: right;
|
||||
margin-left: 20px;
|
||||
}
|
||||
|
||||
.salutation {
|
||||
padding-top: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.summarytext {
|
||||
}
|
||||
|
||||
.room {
|
||||
width: 100%;
|
||||
color: #454545;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_header td {
|
||||
padding-top: 38px;
|
||||
padding-bottom: 10px;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_name {
|
||||
vertical-align: middle;
|
||||
font-size: 18px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.room_header h2 {
|
||||
margin-top: 0px;
|
||||
margin-left: 75px;
|
||||
font-size: 20px;
|
||||
}
|
||||
|
||||
.room_avatar {
|
||||
width: 56px;
|
||||
line-height: 0px;
|
||||
text-align: center;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.room_avatar img {
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
object-fit: cover;
|
||||
border-radius: 24px;
|
||||
}
|
||||
|
||||
.notif {
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
margin-top: 16px;
|
||||
padding-bottom: 16px;
|
||||
}
|
||||
|
||||
.historical_message .sender_avatar {
|
||||
opacity: 0.3;
|
||||
}
|
||||
|
||||
/* spell out opacity and historical_message class names for Outlook aka Word */
|
||||
.historical_message .sender_name {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_time {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_body {
|
||||
color: #c7c7c7;
|
||||
}
|
||||
|
||||
.historical_message td,
|
||||
.message td {
|
||||
padding-top: 10px;
|
||||
}
|
||||
|
||||
.sender_avatar {
|
||||
width: 56px;
|
||||
text-align: center;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
.sender_avatar img {
|
||||
margin-top: -2px;
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
border-radius: 16px;
|
||||
}
|
||||
|
||||
.sender_name {
|
||||
display: inline;
|
||||
font-size: 13px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_time {
|
||||
text-align: right;
|
||||
width: 100px;
|
||||
font-size: 11px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_body {
|
||||
}
|
||||
|
||||
.notif_link td {
|
||||
padding-top: 10px;
|
||||
padding-bottom: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #454545;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.debug {
|
||||
font-size: 10px;
|
||||
color: #888;
|
||||
}
|
||||
|
||||
.footer {
|
||||
margin-top: 20px;
|
||||
text-align: center;
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
{% for message in notif.messages %}
|
||||
<tr class="{{ "historical_message" if message.is_historical else "message" }}">
|
||||
<td class="sender_avatar">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
{% if message.sender_avatar_url %}
|
||||
<img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
|
||||
{% else %}
|
||||
{% if message.sender_hash % 3 == 0 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif message.sender_hash % 3 == 1 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="message_contents">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
<div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div>
|
||||
{% endif %}
|
||||
<div class="message_body">
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
<img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
<span class="filename">{{ message.body_text_plain }}</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
</td>
|
||||
<td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
<tr class="notif_link">
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ notif.link }}">View {{ room.title }}</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
@@ -1,16 +0,0 @@
|
||||
{% for message in notif.messages %}
|
||||
{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
View {{ room.title }} at {{ notif.link }}
|
||||
@@ -1,53 +0,0 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<style type="text/css">
|
||||
{% include 'mail.css' without context %}
|
||||
{% include "mail-%s.css" % app_name ignore missing without context %}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table id="page">
|
||||
<tr>
|
||||
<td> </td>
|
||||
<td id="inner">
|
||||
<table class="header">
|
||||
<tr>
|
||||
<td>
|
||||
<div class="salutation">Hi {{ user_display_name }},</div>
|
||||
<div class="summarytext">{{ summary_text }}</div>
|
||||
</td>
|
||||
<td class="logo">
|
||||
{% if app_name == "Vector" %}
|
||||
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
||||
{% else %}
|
||||
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
{% for room in rooms %}
|
||||
{% include 'room.html' with context %}
|
||||
{% endfor %}
|
||||
<div class="footer">
|
||||
<a href="{{ unsubscribe_link }}">Unsubscribe</a>
|
||||
<br/>
|
||||
<br/>
|
||||
<div class="debug">
|
||||
Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
|
||||
an event was received at {{ reason.received_at|format_ts("%c") }}
|
||||
which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
|
||||
{% if reason.last_sent_ts %}
|
||||
and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
|
||||
which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
|
||||
{% else %}
|
||||
and we don't have a last time we sent a mail for this room.
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td> </td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,10 +0,0 @@
|
||||
Hi {{ user_display_name }},
|
||||
|
||||
{{ summary_text }}
|
||||
|
||||
{% for room in rooms %}
|
||||
{% include 'room.txt' with context %}
|
||||
{% endfor %}
|
||||
|
||||
You can disable these notifications at {{ unsubscribe_link }}
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
<table class="room">
|
||||
<tr class="room_header">
|
||||
<td class="room_avatar">
|
||||
{% if room.avatar_url %}
|
||||
<img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
|
||||
{% else %}
|
||||
{% if room.hash % 3 == 0 %}
|
||||
<img alt="" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif room.hash % 3 == 1 %}
|
||||
<img alt="" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img alt="" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="room_name" colspan="2">
|
||||
{{ room.title }}
|
||||
</td>
|
||||
</tr>
|
||||
{% if room.invite %}
|
||||
<tr>
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ room.link }}">Join the conversation.</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.html' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</table>
|
||||
@@ -1,9 +0,0 @@
|
||||
{{ room.title }}
|
||||
|
||||
{% if room.invite %}
|
||||
You've been invited, join at {{ room.link }}
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.txt' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -1,116 +0,0 @@
|
||||
import psycopg2
|
||||
import yaml
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import hashlib
|
||||
from unpaddedbase64 import encode_base64
|
||||
from signedjson.key import read_signing_keys
|
||||
from signedjson.sign import sign_json
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
|
||||
def select_v1_keys(connection):
|
||||
cursor = connection.cursor()
|
||||
cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys")
|
||||
rows = cursor.fetchall()
|
||||
cursor.close()
|
||||
results = {}
|
||||
for server_name, key_id, verify_key in rows:
|
||||
results.setdefault(server_name, {})[key_id] = encode_base64(verify_key)
|
||||
return results
|
||||
|
||||
|
||||
def select_v1_certs(connection):
|
||||
cursor = connection.cursor()
|
||||
cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates")
|
||||
rows = cursor.fetchall()
|
||||
cursor.close()
|
||||
results = {}
|
||||
for server_name, tls_certificate in rows:
|
||||
results[server_name] = tls_certificate
|
||||
return results
|
||||
|
||||
|
||||
def select_v2_json(connection):
|
||||
cursor = connection.cursor()
|
||||
cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json")
|
||||
rows = cursor.fetchall()
|
||||
cursor.close()
|
||||
results = {}
|
||||
for server_name, key_id, key_json in rows:
|
||||
results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8"))
|
||||
return results
|
||||
|
||||
|
||||
def convert_v1_to_v2(server_name, valid_until, keys, certificate):
|
||||
return {
|
||||
"old_verify_keys": {},
|
||||
"server_name": server_name,
|
||||
"verify_keys": {
|
||||
key_id: {"key": key}
|
||||
for key_id, key in keys.items()
|
||||
},
|
||||
"valid_until_ts": valid_until,
|
||||
"tls_fingerprints": [fingerprint(certificate)],
|
||||
}
|
||||
|
||||
|
||||
def fingerprint(certificate):
|
||||
finger = hashlib.sha256(certificate)
|
||||
return {"sha256": encode_base64(finger.digest())}
|
||||
|
||||
|
||||
def rows_v2(server, json):
|
||||
valid_until = json["valid_until_ts"]
|
||||
key_json = encode_canonical_json(json)
|
||||
for key_id in json["verify_keys"]:
|
||||
yield (server, key_id, "-", valid_until, valid_until, buffer(key_json))
|
||||
|
||||
|
||||
def main():
|
||||
config = yaml.load(open(sys.argv[1]))
|
||||
valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
|
||||
|
||||
server_name = config["server_name"]
|
||||
signing_key = read_signing_keys(open(config["signing_key_path"]))[0]
|
||||
|
||||
database = config["database"]
|
||||
assert database["name"] == "psycopg2", "Can only convert for postgresql"
|
||||
args = database["args"]
|
||||
args.pop("cp_max")
|
||||
args.pop("cp_min")
|
||||
connection = psycopg2.connect(**args)
|
||||
keys = select_v1_keys(connection)
|
||||
certificates = select_v1_certs(connection)
|
||||
json = select_v2_json(connection)
|
||||
|
||||
result = {}
|
||||
for server in keys:
|
||||
if not server in json:
|
||||
v2_json = convert_v1_to_v2(
|
||||
server, valid_until, keys[server], certificates[server]
|
||||
)
|
||||
v2_json = sign_json(v2_json, server_name, signing_key)
|
||||
result[server] = v2_json
|
||||
|
||||
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
|
||||
|
||||
rows = list(
|
||||
row for server, json in result.items()
|
||||
for row in rows_v2(server, json)
|
||||
)
|
||||
|
||||
cursor = connection.cursor()
|
||||
cursor.executemany(
|
||||
"INSERT INTO server_keys_json ("
|
||||
" server_name, key_id, from_server,"
|
||||
" ts_added_ms, ts_valid_until_ms, key_json"
|
||||
") VALUES (%s, %s, %s, %s, %s, %s)",
|
||||
rows
|
||||
)
|
||||
connection.commit()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,196 +0,0 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import ast
|
||||
import yaml
|
||||
|
||||
class DefinitionVisitor(ast.NodeVisitor):
|
||||
def __init__(self):
|
||||
super(DefinitionVisitor, self).__init__()
|
||||
self.functions = {}
|
||||
self.classes = {}
|
||||
self.names = {}
|
||||
self.attrs = set()
|
||||
self.definitions = {
|
||||
'def': self.functions,
|
||||
'class': self.classes,
|
||||
'names': self.names,
|
||||
'attrs': self.attrs,
|
||||
}
|
||||
|
||||
def visit_Name(self, node):
|
||||
self.names.setdefault(type(node.ctx).__name__, set()).add(node.id)
|
||||
|
||||
def visit_Attribute(self, node):
|
||||
self.attrs.add(node.attr)
|
||||
for child in ast.iter_child_nodes(node):
|
||||
self.visit(child)
|
||||
|
||||
def visit_ClassDef(self, node):
|
||||
visitor = DefinitionVisitor()
|
||||
self.classes[node.name] = visitor.definitions
|
||||
for child in ast.iter_child_nodes(node):
|
||||
visitor.visit(child)
|
||||
|
||||
def visit_FunctionDef(self, node):
|
||||
visitor = DefinitionVisitor()
|
||||
self.functions[node.name] = visitor.definitions
|
||||
for child in ast.iter_child_nodes(node):
|
||||
visitor.visit(child)
|
||||
|
||||
|
||||
def non_empty(defs):
|
||||
functions = {name: non_empty(f) for name, f in defs['def'].items()}
|
||||
classes = {name: non_empty(f) for name, f in defs['class'].items()}
|
||||
result = {}
|
||||
if functions: result['def'] = functions
|
||||
if classes: result['class'] = classes
|
||||
names = defs['names']
|
||||
uses = []
|
||||
for name in names.get('Load', ()):
|
||||
if name not in names.get('Param', ()) and name not in names.get('Store', ()):
|
||||
uses.append(name)
|
||||
uses.extend(defs['attrs'])
|
||||
if uses: result['uses'] = uses
|
||||
result['names'] = names
|
||||
result['attrs'] = defs['attrs']
|
||||
return result
|
||||
|
||||
|
||||
def definitions_in_code(input_code):
|
||||
input_ast = ast.parse(input_code)
|
||||
visitor = DefinitionVisitor()
|
||||
visitor.visit(input_ast)
|
||||
definitions = non_empty(visitor.definitions)
|
||||
return definitions
|
||||
|
||||
|
||||
def definitions_in_file(filepath):
|
||||
with open(filepath) as f:
|
||||
return definitions_in_code(f.read())
|
||||
|
||||
|
||||
def defined_names(prefix, defs, names):
|
||||
for name, funcs in defs.get('def', {}).items():
|
||||
names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
|
||||
defined_names(prefix + name + ".", funcs, names)
|
||||
|
||||
for name, funcs in defs.get('class', {}).items():
|
||||
names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
|
||||
defined_names(prefix + name + ".", funcs, names)
|
||||
|
||||
|
||||
def used_names(prefix, item, defs, names):
|
||||
for name, funcs in defs.get('def', {}).items():
|
||||
used_names(prefix + name + ".", name, funcs, names)
|
||||
|
||||
for name, funcs in defs.get('class', {}).items():
|
||||
used_names(prefix + name + ".", name, funcs, names)
|
||||
|
||||
path = prefix.rstrip('.')
|
||||
for used in defs.get('uses', ()):
|
||||
if used in names:
|
||||
if item:
|
||||
names[item].setdefault('uses', []).append(used)
|
||||
names[used].setdefault('used', {}).setdefault(item, []).append(path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys, os, argparse, re
|
||||
|
||||
parser = argparse.ArgumentParser(description='Find definitions.')
|
||||
parser.add_argument(
|
||||
"--unused", action="store_true", help="Only list unused definitions"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pattern", action="append", metavar="REGEXP",
|
||||
help="Search for a pattern"
|
||||
)
|
||||
parser.add_argument(
|
||||
"directories", nargs='+', metavar="DIR",
|
||||
help="Directories to search for definitions"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--referrers", default=0, type=int,
|
||||
help="Include referrers up to the given depth"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--referred", default=0, type=int,
|
||||
help="Include referred down to the given depth"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--format", default="yaml",
|
||||
help="Output format, one of 'yaml' or 'dot'"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
definitions = {}
|
||||
for directory in args.directories:
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for filename in files:
|
||||
if filename.endswith(".py"):
|
||||
filepath = os.path.join(root, filename)
|
||||
definitions[filepath] = definitions_in_file(filepath)
|
||||
|
||||
names = {}
|
||||
for filepath, defs in definitions.items():
|
||||
defined_names(filepath + ":", defs, names)
|
||||
|
||||
for filepath, defs in definitions.items():
|
||||
used_names(filepath + ":", None, defs, names)
|
||||
|
||||
patterns = [re.compile(pattern) for pattern in args.pattern or ()]
|
||||
ignore = [re.compile(pattern) for pattern in args.ignore or ()]
|
||||
|
||||
result = {}
|
||||
for name, definition in names.items():
|
||||
if patterns and not any(pattern.match(name) for pattern in patterns):
|
||||
continue
|
||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||
continue
|
||||
if args.unused and definition.get('used'):
|
||||
continue
|
||||
result[name] = definition
|
||||
|
||||
referrer_depth = args.referrers
|
||||
referrers = set()
|
||||
while referrer_depth:
|
||||
referrer_depth -= 1
|
||||
for entry in result.values():
|
||||
for used_by in entry.get("used", ()):
|
||||
referrers.add(used_by)
|
||||
for name, definition in names.items():
|
||||
if not name in referrers:
|
||||
continue
|
||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||
continue
|
||||
result[name] = definition
|
||||
|
||||
referred_depth = args.referred
|
||||
referred = set()
|
||||
while referred_depth:
|
||||
referred_depth -= 1
|
||||
for entry in result.values():
|
||||
for uses in entry.get("uses", ()):
|
||||
referred.add(uses)
|
||||
for name, definition in names.items():
|
||||
if not name in referred:
|
||||
continue
|
||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||
continue
|
||||
result[name] = definition
|
||||
|
||||
if args.format == 'yaml':
|
||||
yaml.dump(result, sys.stdout, default_flow_style=False)
|
||||
elif args.format == 'dot':
|
||||
print "digraph {"
|
||||
for name, entry in result.items():
|
||||
print name
|
||||
for used_by in entry.get("used", ()):
|
||||
if used_by in result:
|
||||
print used_by, "->", name
|
||||
print "}"
|
||||
else:
|
||||
raise ValueError("Unknown format %r" % (args.format))
|
||||
@@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
|
||||
import pymacaroons
|
||||
import sys
|
||||
|
||||
if len(sys.argv) == 1:
|
||||
sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],))
|
||||
sys.exit(1)
|
||||
|
||||
macaroon_string = sys.argv[1]
|
||||
key = sys.argv[2] if len(sys.argv) > 2 else None
|
||||
|
||||
macaroon = pymacaroons.Macaroon.deserialize(macaroon_string)
|
||||
print macaroon.inspect()
|
||||
|
||||
print ""
|
||||
|
||||
verifier = pymacaroons.Verifier()
|
||||
verifier.satisfy_general(lambda c: True)
|
||||
try:
|
||||
verifier.verify(macaroon, key)
|
||||
print "Signature is correct"
|
||||
except Exception as e:
|
||||
print e.message
|
||||
@@ -1,62 +0,0 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import ast
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
PATTERNS_V1 = []
|
||||
PATTERNS_V2 = []
|
||||
|
||||
RESULT = {
|
||||
"v1": PATTERNS_V1,
|
||||
"v2": PATTERNS_V2,
|
||||
}
|
||||
|
||||
class CallVisitor(ast.NodeVisitor):
|
||||
def visit_Call(self, node):
|
||||
if isinstance(node.func, ast.Name):
|
||||
name = node.func.id
|
||||
else:
|
||||
return
|
||||
|
||||
|
||||
if name == "client_path_patterns":
|
||||
PATTERNS_V1.append(node.args[0].s)
|
||||
elif name == "client_v2_patterns":
|
||||
PATTERNS_V2.append(node.args[0].s)
|
||||
|
||||
|
||||
def find_patterns_in_code(input_code):
|
||||
input_ast = ast.parse(input_code)
|
||||
visitor = CallVisitor()
|
||||
visitor.visit(input_ast)
|
||||
|
||||
|
||||
def find_patterns_in_file(filepath):
|
||||
with open(filepath) as f:
|
||||
find_patterns_in_code(f.read())
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Find url patterns.')
|
||||
|
||||
parser.add_argument(
|
||||
"directories", nargs='+', metavar="DIR",
|
||||
help="Directories to search for definitions"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
for directory in args.directories:
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for filename in files:
|
||||
if filename.endswith(".py"):
|
||||
filepath = os.path.join(root, filename)
|
||||
find_patterns_in_file(filepath)
|
||||
|
||||
PATTERNS_V1.sort()
|
||||
PATTERNS_V2.sort()
|
||||
|
||||
yaml.dump(RESULT, sys.stdout, default_flow_style=False)
|
||||
@@ -1,67 +0,0 @@
|
||||
import requests
|
||||
import collections
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
|
||||
Entry = collections.namedtuple("Entry", "name position rows")
|
||||
|
||||
ROW_TYPES = {}
|
||||
|
||||
|
||||
def row_type_for_columns(name, column_names):
|
||||
column_names = tuple(column_names)
|
||||
row_type = ROW_TYPES.get((name, column_names))
|
||||
if row_type is None:
|
||||
row_type = collections.namedtuple(name, column_names)
|
||||
ROW_TYPES[(name, column_names)] = row_type
|
||||
return row_type
|
||||
|
||||
|
||||
def parse_response(content):
|
||||
streams = json.loads(content)
|
||||
result = {}
|
||||
for name, value in streams.items():
|
||||
row_type = row_type_for_columns(name, value["field_names"])
|
||||
position = value["position"]
|
||||
rows = [row_type(*row) for row in value["rows"]]
|
||||
result[name] = Entry(name, position, rows)
|
||||
return result
|
||||
|
||||
|
||||
def replicate(server, streams):
|
||||
return parse_response(requests.get(
|
||||
server + "/_synapse/replication",
|
||||
verify=False,
|
||||
params=streams
|
||||
).content)
|
||||
|
||||
|
||||
def main():
|
||||
server = sys.argv[1]
|
||||
|
||||
streams = None
|
||||
while not streams:
|
||||
try:
|
||||
streams = {
|
||||
row.name: row.position
|
||||
for row in replicate(server, {"streams":"-1"})["streams"].rows
|
||||
}
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
time.sleep(0.1)
|
||||
|
||||
while True:
|
||||
try:
|
||||
results = replicate(server, streams)
|
||||
except:
|
||||
sys.stdout.write("connection_lost("+ repr(streams) + ")\n")
|
||||
break
|
||||
for update in results.values():
|
||||
for row in update.rows:
|
||||
sys.stdout.write(repr(row) + "\n")
|
||||
streams[update.name] = update.position
|
||||
|
||||
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
@@ -56,9 +56,10 @@ if __name__ == '__main__':
|
||||
|
||||
js = json.load(args.json)
|
||||
|
||||
|
||||
auth = Auth(Mock())
|
||||
check_auth(
|
||||
auth,
|
||||
[FrozenEvent(d) for d in js["auth_chain"]],
|
||||
[FrozenEvent(d) for d in js.get("pdus", [])],
|
||||
[FrozenEvent(d) for d in js["pdus"]],
|
||||
)
|
||||
@@ -1,5 +1,5 @@
|
||||
from synapse.crypto.event_signing import *
|
||||
from unpaddedbase64 import encode_base64
|
||||
from syutil.base64util import encode_base64
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
@@ -1,7 +1,9 @@
|
||||
|
||||
from signedjson.sign import verify_signed_json
|
||||
from signedjson.key import decode_verify_key_bytes, write_signing_keys
|
||||
from unpaddedbase64 import decode_base64
|
||||
from syutil.crypto.jsonsign import verify_signed_json
|
||||
from syutil.crypto.signing_key import (
|
||||
decode_verify_key_bytes, write_signing_keys
|
||||
)
|
||||
from syutil.base64util import decode_base64
|
||||
|
||||
import urllib2
|
||||
import json
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/perl -pi
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
$copyright = <<EOT;
|
||||
/* Copyright 2016 OpenMarket Ltd
|
||||
/* Copyright 2015 OpenMarket Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/perl -pi
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
$copyright = <<EOT;
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
21
scripts/database-prepare-for-0.0.1.sh
Executable file
21
scripts/database-prepare-for-0.0.1.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This is will prepare a synapse database for running with v0.0.1 of synapse.
|
||||
# It will store all the user information, but will *delete* all messages and
|
||||
# room data.
|
||||
|
||||
set -e
|
||||
|
||||
cp "$1" "$1.bak"
|
||||
|
||||
DUMP=$(sqlite3 "$1" << 'EOF'
|
||||
.dump users
|
||||
.dump access_tokens
|
||||
.dump presence
|
||||
.dump profiles
|
||||
EOF
|
||||
)
|
||||
|
||||
rm "$1"
|
||||
|
||||
sqlite3 "$1" <<< "$DUMP"
|
||||
21
scripts/database-prepare-for-0.5.0.sh
Executable file
21
scripts/database-prepare-for-0.5.0.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This is will prepare a synapse database for running with v0.5.0 of synapse.
|
||||
# It will store all the user information, but will *delete* all messages and
|
||||
# room data.
|
||||
|
||||
set -e
|
||||
|
||||
cp "$1" "$1.bak"
|
||||
|
||||
DUMP=$(sqlite3 "$1" << 'EOF'
|
||||
.dump users
|
||||
.dump access_tokens
|
||||
.dump presence
|
||||
.dump profiles
|
||||
EOF
|
||||
)
|
||||
|
||||
rm "$1"
|
||||
|
||||
sqlite3 "$1" <<< "$DUMP"
|
||||
@@ -6,8 +6,8 @@ from synapse.crypto.event_signing import (
|
||||
add_event_pdu_content_hash, compute_pdu_event_reference_hash
|
||||
)
|
||||
from synapse.api.events.utils import prune_pdu
|
||||
from unpaddedbase64 import encode_base64, decode_base64
|
||||
from canonicaljson import encode_canonical_json
|
||||
from syutil.base64util import encode_base64, decode_base64
|
||||
from syutil.jsonutil import encode_canonical_json
|
||||
import sqlite3
|
||||
import sys
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
|
||||
import sys
|
||||
|
||||
import bcrypt
|
||||
import getpass
|
||||
|
||||
import yaml
|
||||
|
||||
bcrypt_rounds=12
|
||||
password_pepper = ""
|
||||
|
||||
def prompt_for_pass():
|
||||
password = getpass.getpass("Password: ")
|
||||
|
||||
if not password:
|
||||
raise Exception("Password cannot be blank.")
|
||||
|
||||
confirm_password = getpass.getpass("Confirm password: ")
|
||||
|
||||
if password != confirm_password:
|
||||
raise Exception("Passwords do not match.")
|
||||
|
||||
return password
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Calculate the hash of a new password, so that passwords"
|
||||
" can be reset")
|
||||
parser.add_argument(
|
||||
"-p", "--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-c", "--config",
|
||||
type=argparse.FileType('r'),
|
||||
help="Path to server config file. Used to read in bcrypt_rounds and password_pepper.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
if "config" in args and args.config:
|
||||
config = yaml.safe_load(args.config)
|
||||
bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
|
||||
password_config = config.get("password_config", {})
|
||||
password_pepper = password_config.get("pepper", password_pepper)
|
||||
password = args.password
|
||||
|
||||
if not password:
|
||||
password = prompt_for_pass()
|
||||
|
||||
print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds))
|
||||
|
||||
@@ -1,174 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import argparse
|
||||
import getpass
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import sys
|
||||
import urllib2
|
||||
import yaml
|
||||
|
||||
|
||||
def request_registration(user, password, server_location, shared_secret, admin=False):
|
||||
mac = hmac.new(
|
||||
key=shared_secret,
|
||||
digestmod=hashlib.sha1,
|
||||
)
|
||||
|
||||
mac.update(user)
|
||||
mac.update("\x00")
|
||||
mac.update(password)
|
||||
mac.update("\x00")
|
||||
mac.update("admin" if admin else "notadmin")
|
||||
|
||||
mac = mac.hexdigest()
|
||||
|
||||
data = {
|
||||
"user": user,
|
||||
"password": password,
|
||||
"mac": mac,
|
||||
"type": "org.matrix.login.shared_secret",
|
||||
"admin": admin,
|
||||
}
|
||||
|
||||
server_location = server_location.rstrip("/")
|
||||
|
||||
print "Sending registration request..."
|
||||
|
||||
req = urllib2.Request(
|
||||
"%s/_matrix/client/api/v1/register" % (server_location,),
|
||||
data=json.dumps(data),
|
||||
headers={'Content-Type': 'application/json'}
|
||||
)
|
||||
try:
|
||||
if sys.version_info[:3] >= (2, 7, 9):
|
||||
# As of version 2.7.9, urllib2 now checks SSL certs
|
||||
import ssl
|
||||
f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
|
||||
else:
|
||||
f = urllib2.urlopen(req)
|
||||
f.read()
|
||||
f.close()
|
||||
print "Success."
|
||||
except urllib2.HTTPError as e:
|
||||
print "ERROR! Received %d %s" % (e.code, e.reason,)
|
||||
if 400 <= e.code < 500:
|
||||
if e.info().type == "application/json":
|
||||
resp = json.load(e)
|
||||
if "error" in resp:
|
||||
print resp["error"]
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def register_new_user(user, password, server_location, shared_secret, admin):
|
||||
if not user:
|
||||
try:
|
||||
default_user = getpass.getuser()
|
||||
except:
|
||||
default_user = None
|
||||
|
||||
if default_user:
|
||||
user = raw_input("New user localpart [%s]: " % (default_user,))
|
||||
if not user:
|
||||
user = default_user
|
||||
else:
|
||||
user = raw_input("New user localpart: ")
|
||||
|
||||
if not user:
|
||||
print "Invalid user name"
|
||||
sys.exit(1)
|
||||
|
||||
if not password:
|
||||
password = getpass.getpass("Password: ")
|
||||
|
||||
if not password:
|
||||
print "Password cannot be blank."
|
||||
sys.exit(1)
|
||||
|
||||
confirm_password = getpass.getpass("Confirm password: ")
|
||||
|
||||
if password != confirm_password:
|
||||
print "Passwords do not match"
|
||||
sys.exit(1)
|
||||
|
||||
if not admin:
|
||||
admin = raw_input("Make admin [no]: ")
|
||||
if admin in ("y", "yes", "true"):
|
||||
admin = True
|
||||
else:
|
||||
admin = False
|
||||
|
||||
request_registration(user, password, server_location, shared_secret, bool(admin))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Used to register new users with a given home server when"
|
||||
" registration has been disabled. The home server must be"
|
||||
" configured with the 'registration_shared_secret' option"
|
||||
" set.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u", "--user",
|
||||
default=None,
|
||||
help="Local part of the new user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-a", "--admin",
|
||||
action="store_true",
|
||||
help="Register new user as an admin. Will prompt if omitted.",
|
||||
)
|
||||
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument(
|
||||
"-c", "--config",
|
||||
type=argparse.FileType('r'),
|
||||
help="Path to server config file. Used to read in shared secret.",
|
||||
)
|
||||
|
||||
group.add_argument(
|
||||
"-k", "--shared-secret",
|
||||
help="Shared secret as defined in server config file.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"server_url",
|
||||
default="https://localhost:8448",
|
||||
nargs='?',
|
||||
help="URL to use to talk to the home server. Defaults to "
|
||||
" 'https://localhost:8448'.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if "config" in args and args.config:
|
||||
config = yaml.safe_load(args.config)
|
||||
secret = config.get("registration_shared_secret", None)
|
||||
if not secret:
|
||||
print "No 'registration_shared_secret' defined in config."
|
||||
sys.exit(1)
|
||||
else:
|
||||
secret = args.shared_secret
|
||||
|
||||
register_new_user(args.user, args.password, args.server_url, secret, args.admin)
|
||||
@@ -1,835 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.enterprise import adbapi
|
||||
|
||||
from synapse.storage._base import LoggingTransaction, SQLBaseStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
|
||||
import argparse
|
||||
import curses
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import yaml
|
||||
|
||||
|
||||
logger = logging.getLogger("synapse_port_db")
|
||||
|
||||
|
||||
BOOLEAN_COLUMNS = {
|
||||
"events": ["processed", "outlier"],
|
||||
"rooms": ["is_public"],
|
||||
"event_edges": ["is_state"],
|
||||
"presence_list": ["accepted"],
|
||||
"presence_stream": ["currently_active"],
|
||||
}
|
||||
|
||||
|
||||
APPEND_ONLY_TABLES = [
|
||||
"event_content_hashes",
|
||||
"event_reference_hashes",
|
||||
"event_signatures",
|
||||
"event_edge_hashes",
|
||||
"events",
|
||||
"event_json",
|
||||
"state_events",
|
||||
"room_memberships",
|
||||
"feedback",
|
||||
"topics",
|
||||
"room_names",
|
||||
"rooms",
|
||||
"local_media_repository",
|
||||
"local_media_repository_thumbnails",
|
||||
"remote_media_cache",
|
||||
"remote_media_cache_thumbnails",
|
||||
"redactions",
|
||||
"event_edges",
|
||||
"event_auth",
|
||||
"received_transactions",
|
||||
"sent_transactions",
|
||||
"transaction_id_to_pdu",
|
||||
"users",
|
||||
"state_groups",
|
||||
"state_groups_state",
|
||||
"event_to_state_groups",
|
||||
"rejections",
|
||||
"event_search",
|
||||
]
|
||||
|
||||
|
||||
end_error_exec_info = None
|
||||
|
||||
|
||||
class Store(object):
|
||||
"""This object is used to pull out some of the convenience API from the
|
||||
Storage layer.
|
||||
|
||||
*All* database interactions should go through this object.
|
||||
"""
|
||||
def __init__(self, db_pool, engine):
|
||||
self.db_pool = db_pool
|
||||
self.database_engine = engine
|
||||
|
||||
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
|
||||
_simple_insert = SQLBaseStore.__dict__["_simple_insert"]
|
||||
|
||||
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
||||
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
|
||||
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
|
||||
_simple_select_one_onecol_txn = SQLBaseStore.__dict__["_simple_select_one_onecol_txn"]
|
||||
|
||||
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
||||
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
||||
|
||||
def runInteraction(self, desc, func, *args, **kwargs):
|
||||
def r(conn):
|
||||
try:
|
||||
i = 0
|
||||
N = 5
|
||||
while True:
|
||||
try:
|
||||
txn = conn.cursor()
|
||||
return func(
|
||||
LoggingTransaction(txn, desc, self.database_engine, []),
|
||||
*args, **kwargs
|
||||
)
|
||||
except self.database_engine.module.DatabaseError as e:
|
||||
if self.database_engine.is_deadlock(e):
|
||||
logger.warn("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
|
||||
if i < N:
|
||||
i += 1
|
||||
conn.rollback()
|
||||
continue
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.debug("[TXN FAIL] {%s} %s", desc, e)
|
||||
raise
|
||||
|
||||
return self.db_pool.runWithConnection(r)
|
||||
|
||||
def execute(self, f, *args, **kwargs):
|
||||
return self.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
||||
def execute_sql(self, sql, *args):
|
||||
def r(txn):
|
||||
txn.execute(sql, args)
|
||||
return txn.fetchall()
|
||||
return self.runInteraction("execute_sql", r)
|
||||
|
||||
def insert_many_txn(self, txn, table, headers, rows):
|
||||
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
|
||||
table,
|
||||
", ".join(k for k in headers),
|
||||
", ".join("%s" for _ in headers)
|
||||
)
|
||||
|
||||
try:
|
||||
txn.executemany(sql, rows)
|
||||
except:
|
||||
logger.exception(
|
||||
"Failed to insert: %s",
|
||||
table,
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
class Porter(object):
|
||||
def __init__(self, **kwargs):
|
||||
self.__dict__.update(kwargs)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def setup_table(self, table):
|
||||
if table in APPEND_ONLY_TABLES:
|
||||
# It's safe to just carry on inserting.
|
||||
next_chunk = yield self.postgres_store._simple_select_one_onecol(
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
retcol="rowid",
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
total_to_port = None
|
||||
if next_chunk is None:
|
||||
if table == "sent_transactions":
|
||||
next_chunk, already_ported, total_to_port = (
|
||||
yield self._setup_sent_transactions()
|
||||
)
|
||||
else:
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": table, "rowid": 1}
|
||||
)
|
||||
|
||||
next_chunk = 1
|
||||
already_ported = 0
|
||||
|
||||
if total_to_port is None:
|
||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||
table, next_chunk
|
||||
)
|
||||
else:
|
||||
def delete_all(txn):
|
||||
txn.execute(
|
||||
"DELETE FROM port_from_sqlite3 WHERE table_name = %s",
|
||||
(table,)
|
||||
)
|
||||
txn.execute("TRUNCATE %s CASCADE" % (table,))
|
||||
|
||||
yield self.postgres_store.execute(delete_all)
|
||||
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": table, "rowid": 0}
|
||||
)
|
||||
|
||||
next_chunk = 1
|
||||
|
||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||
table, next_chunk
|
||||
)
|
||||
|
||||
defer.returnValue((table, already_ported, total_to_port, next_chunk))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_table(self, table, postgres_size, table_size, next_chunk):
|
||||
if not table_size:
|
||||
return
|
||||
|
||||
self.progress.add_table(table, postgres_size, table_size)
|
||||
|
||||
if table == "event_search":
|
||||
yield self.handle_search_table(postgres_size, table_size, next_chunk)
|
||||
return
|
||||
|
||||
select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
||||
% (table,)
|
||||
)
|
||||
|
||||
while True:
|
||||
def r(txn):
|
||||
txn.execute(select, (next_chunk, self.batch_size,))
|
||||
rows = txn.fetchall()
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
return headers, rows
|
||||
|
||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||
|
||||
if rows:
|
||||
next_chunk = rows[-1][0] + 1
|
||||
|
||||
self._convert_rows(table, headers, rows)
|
||||
|
||||
def insert(txn):
|
||||
self.postgres_store.insert_many_txn(
|
||||
txn, table, headers[1:], rows
|
||||
)
|
||||
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
updatevalues={"rowid": next_chunk},
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
|
||||
postgres_size += len(rows)
|
||||
|
||||
self.progress.update(table, postgres_size)
|
||||
else:
|
||||
return
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_search_table(self, postgres_size, table_size, next_chunk):
|
||||
select = (
|
||||
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
|
||||
" FROM event_search as es"
|
||||
" INNER JOIN events AS e USING (event_id, room_id)"
|
||||
" WHERE es.rowid >= ?"
|
||||
" ORDER BY es.rowid LIMIT ?"
|
||||
)
|
||||
|
||||
while True:
|
||||
def r(txn):
|
||||
txn.execute(select, (next_chunk, self.batch_size,))
|
||||
rows = txn.fetchall()
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
return headers, rows
|
||||
|
||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||
|
||||
if rows:
|
||||
next_chunk = rows[-1][0] + 1
|
||||
|
||||
# We have to treat event_search differently since it has a
|
||||
# different structure in the two different databases.
|
||||
def insert(txn):
|
||||
sql = (
|
||||
"INSERT INTO event_search (event_id, room_id, key,"
|
||||
" sender, vector, origin_server_ts, stream_ordering)"
|
||||
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
||||
)
|
||||
|
||||
rows_dict = [
|
||||
dict(zip(headers, row))
|
||||
for row in rows
|
||||
]
|
||||
|
||||
txn.executemany(sql, [
|
||||
(
|
||||
row["event_id"],
|
||||
row["room_id"],
|
||||
row["key"],
|
||||
row["sender"],
|
||||
row["value"],
|
||||
row["origin_server_ts"],
|
||||
row["stream_ordering"],
|
||||
)
|
||||
for row in rows_dict
|
||||
])
|
||||
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": "event_search"},
|
||||
updatevalues={"rowid": next_chunk},
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
|
||||
postgres_size += len(rows)
|
||||
|
||||
self.progress.update("event_search", postgres_size)
|
||||
|
||||
else:
|
||||
return
|
||||
|
||||
|
||||
def setup_db(self, db_config, database_engine):
|
||||
db_conn = database_engine.module.connect(
|
||||
**{
|
||||
k: v for k, v in db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
)
|
||||
|
||||
prepare_database(db_conn, database_engine, config=None)
|
||||
|
||||
db_conn.commit()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def run(self):
|
||||
try:
|
||||
sqlite_db_pool = adbapi.ConnectionPool(
|
||||
self.sqlite_config["name"],
|
||||
**self.sqlite_config["args"]
|
||||
)
|
||||
|
||||
postgres_db_pool = adbapi.ConnectionPool(
|
||||
self.postgres_config["name"],
|
||||
**self.postgres_config["args"]
|
||||
)
|
||||
|
||||
sqlite_engine = create_engine(sqlite_config)
|
||||
postgres_engine = create_engine(postgres_config)
|
||||
|
||||
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
|
||||
self.postgres_store = Store(postgres_db_pool, postgres_engine)
|
||||
|
||||
yield self.postgres_store.execute(
|
||||
postgres_engine.check_database
|
||||
)
|
||||
|
||||
# Step 1. Set up databases.
|
||||
self.progress.set_state("Preparing SQLite3")
|
||||
self.setup_db(sqlite_config, sqlite_engine)
|
||||
|
||||
self.progress.set_state("Preparing PostgreSQL")
|
||||
self.setup_db(postgres_config, postgres_engine)
|
||||
|
||||
# Step 2. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
||||
table="sqlite_master",
|
||||
keyvalues={
|
||||
"type": "table",
|
||||
},
|
||||
retcol="name",
|
||||
)
|
||||
|
||||
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
||||
table="information_schema.tables",
|
||||
keyvalues={
|
||||
"table_schema": "public",
|
||||
},
|
||||
retcol="distinct table_name",
|
||||
)
|
||||
|
||||
tables = set(sqlite_tables) & set(postgres_tables)
|
||||
|
||||
self.progress.set_state("Creating tables")
|
||||
|
||||
logger.info("Found %d tables", len(tables))
|
||||
|
||||
def create_port_table(txn):
|
||||
txn.execute(
|
||||
"CREATE TABLE port_from_sqlite3 ("
|
||||
" table_name varchar(100) NOT NULL UNIQUE,"
|
||||
" rowid bigint NOT NULL"
|
||||
")"
|
||||
)
|
||||
|
||||
try:
|
||||
yield self.postgres_store.runInteraction(
|
||||
"create_port_table", create_port_table
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info("Failed to create port table: %s", e)
|
||||
|
||||
self.progress.set_state("Setting up")
|
||||
|
||||
# Set up tables.
|
||||
setup_res = yield defer.gatherResults(
|
||||
[
|
||||
self.setup_table(table)
|
||||
for table in tables
|
||||
if table not in ["schema_version", "applied_schema_deltas"]
|
||||
and not table.startswith("sqlite_")
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
|
||||
# Process tables.
|
||||
yield defer.gatherResults(
|
||||
[
|
||||
self.handle_table(*res)
|
||||
for res in setup_res
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
|
||||
self.progress.done()
|
||||
except:
|
||||
global end_error_exec_info
|
||||
end_error_exec_info = sys.exc_info()
|
||||
logger.exception("")
|
||||
finally:
|
||||
reactor.stop()
|
||||
|
||||
def _convert_rows(self, table, headers, rows):
|
||||
bool_col_names = BOOLEAN_COLUMNS.get(table, [])
|
||||
|
||||
bool_cols = [
|
||||
i for i, h in enumerate(headers) if h in bool_col_names
|
||||
]
|
||||
|
||||
def conv(j, col):
|
||||
if j in bool_cols:
|
||||
return bool(col)
|
||||
return col
|
||||
|
||||
for i, row in enumerate(rows):
|
||||
rows[i] = tuple(
|
||||
conv(j, col)
|
||||
for j, col in enumerate(row)
|
||||
if j > 0
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _setup_sent_transactions(self):
|
||||
# Only save things from the last day
|
||||
yesterday = int(time.time()*1000) - 86400000
|
||||
|
||||
# And save the max transaction id from each destination
|
||||
select = (
|
||||
"SELECT rowid, * FROM sent_transactions WHERE rowid IN ("
|
||||
"SELECT max(rowid) FROM sent_transactions"
|
||||
" GROUP BY destination"
|
||||
")"
|
||||
)
|
||||
|
||||
def r(txn):
|
||||
txn.execute(select)
|
||||
rows = txn.fetchall()
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
ts_ind = headers.index('ts')
|
||||
|
||||
return headers, [r for r in rows if r[ts_ind] < yesterday]
|
||||
|
||||
headers, rows = yield self.sqlite_store.runInteraction(
|
||||
"select", r,
|
||||
)
|
||||
|
||||
self._convert_rows("sent_transactions", headers, rows)
|
||||
|
||||
inserted_rows = len(rows)
|
||||
if inserted_rows:
|
||||
max_inserted_rowid = max(r[0] for r in rows)
|
||||
|
||||
def insert(txn):
|
||||
self.postgres_store.insert_many_txn(
|
||||
txn, "sent_transactions", headers[1:], rows
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
else:
|
||||
max_inserted_rowid = 0
|
||||
|
||||
def get_start_id(txn):
|
||||
txn.execute(
|
||||
"SELECT rowid FROM sent_transactions WHERE ts >= ?"
|
||||
" ORDER BY rowid ASC LIMIT 1",
|
||||
(yesterday,)
|
||||
)
|
||||
|
||||
rows = txn.fetchall()
|
||||
if rows:
|
||||
return rows[0][0]
|
||||
else:
|
||||
return 1
|
||||
|
||||
next_chunk = yield self.sqlite_store.execute(get_start_id)
|
||||
next_chunk = max(max_inserted_rowid + 1, next_chunk)
|
||||
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": "sent_transactions", "rowid": next_chunk}
|
||||
)
|
||||
|
||||
def get_sent_table_size(txn):
|
||||
txn.execute(
|
||||
"SELECT count(*) FROM sent_transactions"
|
||||
" WHERE ts >= ?",
|
||||
(yesterday,)
|
||||
)
|
||||
size, = txn.fetchone()
|
||||
return int(size)
|
||||
|
||||
remaining_count = yield self.sqlite_store.execute(
|
||||
get_sent_table_size
|
||||
)
|
||||
|
||||
total_count = remaining_count + inserted_rows
|
||||
|
||||
defer.returnValue((next_chunk, inserted_rows, total_count))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_remaining_count_to_port(self, table, next_chunk):
|
||||
rows = yield self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
|
||||
next_chunk,
|
||||
)
|
||||
|
||||
defer.returnValue(rows[0][0])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_already_ported_count(self, table):
|
||||
rows = yield self.postgres_store.execute_sql(
|
||||
"SELECT count(*) FROM %s" % (table,),
|
||||
)
|
||||
|
||||
defer.returnValue(rows[0][0])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_total_count_to_port(self, table, next_chunk):
|
||||
remaining, done = yield defer.gatherResults(
|
||||
[
|
||||
self._get_remaining_count_to_port(table, next_chunk),
|
||||
self._get_already_ported_count(table),
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
|
||||
remaining = int(remaining) if remaining else 0
|
||||
done = int(done) if done else 0
|
||||
|
||||
defer.returnValue((done, remaining + done))
|
||||
|
||||
|
||||
##############################################
|
||||
###### The following is simply UI stuff ######
|
||||
##############################################
|
||||
|
||||
|
||||
class Progress(object):
|
||||
"""Used to report progress of the port
|
||||
"""
|
||||
def __init__(self):
|
||||
self.tables = {}
|
||||
|
||||
self.start_time = int(time.time())
|
||||
|
||||
def add_table(self, table, cur, size):
|
||||
self.tables[table] = {
|
||||
"start": cur,
|
||||
"num_done": cur,
|
||||
"total": size,
|
||||
"perc": int(cur * 100 / size),
|
||||
}
|
||||
|
||||
def update(self, table, num_done):
|
||||
data = self.tables[table]
|
||||
data["num_done"] = num_done
|
||||
data["perc"] = int(num_done * 100 / data["total"])
|
||||
|
||||
def done(self):
|
||||
pass
|
||||
|
||||
|
||||
class CursesProgress(Progress):
|
||||
"""Reports progress to a curses window
|
||||
"""
|
||||
def __init__(self, stdscr):
|
||||
self.stdscr = stdscr
|
||||
|
||||
curses.use_default_colors()
|
||||
curses.curs_set(0)
|
||||
|
||||
curses.init_pair(1, curses.COLOR_RED, -1)
|
||||
curses.init_pair(2, curses.COLOR_GREEN, -1)
|
||||
|
||||
self.last_update = 0
|
||||
|
||||
self.finished = False
|
||||
|
||||
self.total_processed = 0
|
||||
self.total_remaining = 0
|
||||
|
||||
super(CursesProgress, self).__init__()
|
||||
|
||||
def update(self, table, num_done):
|
||||
super(CursesProgress, self).update(table, num_done)
|
||||
|
||||
self.total_processed = 0
|
||||
self.total_remaining = 0
|
||||
for table, data in self.tables.items():
|
||||
self.total_processed += data["num_done"] - data["start"]
|
||||
self.total_remaining += data["total"] - data["num_done"]
|
||||
|
||||
self.render()
|
||||
|
||||
def render(self, force=False):
|
||||
now = time.time()
|
||||
|
||||
if not force and now - self.last_update < 0.2:
|
||||
# reactor.callLater(1, self.render)
|
||||
return
|
||||
|
||||
self.stdscr.clear()
|
||||
|
||||
rows, cols = self.stdscr.getmaxyx()
|
||||
|
||||
duration = int(now) - int(self.start_time)
|
||||
|
||||
minutes, seconds = divmod(duration, 60)
|
||||
duration_str = '%02dm %02ds' % (minutes, seconds,)
|
||||
|
||||
if self.finished:
|
||||
status = "Time spent: %s (Done!)" % (duration_str,)
|
||||
else:
|
||||
|
||||
if self.total_processed > 0:
|
||||
left = float(self.total_remaining) / self.total_processed
|
||||
|
||||
est_remaining = (int(now) - self.start_time) * left
|
||||
est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60)
|
||||
else:
|
||||
est_remaining_str = "Unknown"
|
||||
status = (
|
||||
"Time spent: %s (est. remaining: %s)"
|
||||
% (duration_str, est_remaining_str,)
|
||||
)
|
||||
|
||||
self.stdscr.addstr(
|
||||
0, 0,
|
||||
status,
|
||||
curses.A_BOLD,
|
||||
)
|
||||
|
||||
max_len = max([len(t) for t in self.tables.keys()])
|
||||
|
||||
left_margin = 5
|
||||
middle_space = 1
|
||||
|
||||
items = self.tables.items()
|
||||
items.sort(
|
||||
key=lambda i: (i[1]["perc"], i[0]),
|
||||
)
|
||||
|
||||
for i, (table, data) in enumerate(items):
|
||||
if i + 2 >= rows:
|
||||
break
|
||||
|
||||
perc = data["perc"]
|
||||
|
||||
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
|
||||
|
||||
self.stdscr.addstr(
|
||||
i+2, left_margin + max_len - len(table),
|
||||
table,
|
||||
curses.A_BOLD | color,
|
||||
)
|
||||
|
||||
size = 20
|
||||
|
||||
progress = "[%s%s]" % (
|
||||
"#" * int(perc*size/100),
|
||||
" " * (size - int(perc*size/100)),
|
||||
)
|
||||
|
||||
self.stdscr.addstr(
|
||||
i+2, left_margin + max_len + middle_space,
|
||||
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
|
||||
)
|
||||
|
||||
if self.finished:
|
||||
self.stdscr.addstr(
|
||||
rows-1, 0,
|
||||
"Press any key to exit...",
|
||||
)
|
||||
|
||||
self.stdscr.refresh()
|
||||
self.last_update = time.time()
|
||||
|
||||
def done(self):
|
||||
self.finished = True
|
||||
self.render(True)
|
||||
self.stdscr.getch()
|
||||
|
||||
def set_state(self, state):
|
||||
self.stdscr.clear()
|
||||
self.stdscr.addstr(
|
||||
0, 0,
|
||||
state + "...",
|
||||
curses.A_BOLD,
|
||||
)
|
||||
self.stdscr.refresh()
|
||||
|
||||
|
||||
class TerminalProgress(Progress):
|
||||
"""Just prints progress to the terminal
|
||||
"""
|
||||
def update(self, table, num_done):
|
||||
super(TerminalProgress, self).update(table, num_done)
|
||||
|
||||
data = self.tables[table]
|
||||
|
||||
print "%s: %d%% (%d/%d)" % (
|
||||
table, data["perc"],
|
||||
data["num_done"], data["total"],
|
||||
)
|
||||
|
||||
def set_state(self, state):
|
||||
print state + "..."
|
||||
|
||||
|
||||
##############################################
|
||||
##############################################
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="A script to port an existing synapse SQLite database to"
|
||||
" a new PostgreSQL database."
|
||||
)
|
||||
parser.add_argument("-v", action='store_true')
|
||||
parser.add_argument(
|
||||
"--sqlite-database", required=True,
|
||||
help="The snapshot of the SQLite database file. This must not be"
|
||||
" currently used by a running synapse server"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--postgres-config", type=argparse.FileType('r'), required=True,
|
||||
help="The database config file for the PostgreSQL database"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--curses", action='store_true',
|
||||
help="display a curses based progress UI"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--batch-size", type=int, default=1000,
|
||||
help="The number of rows to select from the SQLite table each"
|
||||
" iteration [default=1000]",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logging_config = {
|
||||
"level": logging.DEBUG if args.v else logging.INFO,
|
||||
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
|
||||
}
|
||||
|
||||
if args.curses:
|
||||
logging_config["filename"] = "port-synapse.log"
|
||||
|
||||
logging.basicConfig(**logging_config)
|
||||
|
||||
sqlite_config = {
|
||||
"name": "sqlite3",
|
||||
"args": {
|
||||
"database": args.sqlite_database,
|
||||
"cp_min": 1,
|
||||
"cp_max": 1,
|
||||
"check_same_thread": False,
|
||||
},
|
||||
}
|
||||
|
||||
postgres_config = yaml.safe_load(args.postgres_config)
|
||||
|
||||
if "database" in postgres_config:
|
||||
postgres_config = postgres_config["database"]
|
||||
|
||||
if "name" not in postgres_config:
|
||||
sys.stderr.write("Malformed database config: no 'name'")
|
||||
sys.exit(2)
|
||||
if postgres_config["name"] != "psycopg2":
|
||||
sys.stderr.write("Database must use 'psycopg2' connector.")
|
||||
sys.exit(3)
|
||||
|
||||
def start(stdscr=None):
|
||||
if stdscr:
|
||||
progress = CursesProgress(stdscr)
|
||||
else:
|
||||
progress = TerminalProgress()
|
||||
|
||||
porter = Porter(
|
||||
sqlite_config=sqlite_config,
|
||||
postgres_config=postgres_config,
|
||||
progress=progress,
|
||||
batch_size=args.batch_size,
|
||||
)
|
||||
|
||||
reactor.callWhenRunning(porter.run)
|
||||
|
||||
reactor.run()
|
||||
|
||||
if args.curses:
|
||||
curses.wrapper(start)
|
||||
else:
|
||||
start()
|
||||
|
||||
if end_error_exec_info:
|
||||
exc_type, exc_value, exc_traceback = end_error_exec_info
|
||||
traceback.print_exception(exc_type, exc_value, exc_traceback)
|
||||
331
scripts/upgrade_db_to_v0.6.0.py
Normal file
331
scripts/upgrade_db_to_v0.6.0.py
Normal file
@@ -0,0 +1,331 @@
|
||||
|
||||
from synapse.storage import SCHEMA_VERSION, read_schema
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.signatures import SignatureStore
|
||||
from synapse.storage.event_federation import EventFederationStore
|
||||
|
||||
from syutil.base64util import encode_base64, decode_base64
|
||||
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
|
||||
from synapse.events.builder import EventBuilder
|
||||
from synapse.events.utils import prune_event
|
||||
|
||||
from synapse.crypto.event_signing import check_event_content_hash
|
||||
|
||||
from syutil.crypto.jsonsign import (
|
||||
verify_signed_json, SignatureVerifyException,
|
||||
)
|
||||
from syutil.crypto.signing_key import decode_verify_key_bytes
|
||||
|
||||
from syutil.jsonutil import encode_canonical_json
|
||||
|
||||
import argparse
|
||||
# import dns.resolver
|
||||
import hashlib
|
||||
import httplib
|
||||
import json
|
||||
import sqlite3
|
||||
import syutil
|
||||
import urllib2
|
||||
|
||||
|
||||
delta_sql = """
|
||||
CREATE TABLE IF NOT EXISTS event_json(
|
||||
event_id TEXT NOT NULL,
|
||||
room_id TEXT NOT NULL,
|
||||
internal_metadata NOT NULL,
|
||||
json BLOB NOT NULL,
|
||||
CONSTRAINT ev_j_uniq UNIQUE (event_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS event_json_id ON event_json(event_id);
|
||||
CREATE INDEX IF NOT EXISTS event_json_room_id ON event_json(room_id);
|
||||
|
||||
PRAGMA user_version = 10;
|
||||
"""
|
||||
|
||||
|
||||
class Store(object):
|
||||
_get_event_signatures_txn = SignatureStore.__dict__["_get_event_signatures_txn"]
|
||||
_get_event_content_hashes_txn = SignatureStore.__dict__["_get_event_content_hashes_txn"]
|
||||
_get_event_reference_hashes_txn = SignatureStore.__dict__["_get_event_reference_hashes_txn"]
|
||||
_get_prev_event_hashes_txn = SignatureStore.__dict__["_get_prev_event_hashes_txn"]
|
||||
_get_prev_events_and_state = EventFederationStore.__dict__["_get_prev_events_and_state"]
|
||||
_get_auth_events = EventFederationStore.__dict__["_get_auth_events"]
|
||||
cursor_to_dict = SQLBaseStore.__dict__["cursor_to_dict"]
|
||||
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
||||
_simple_select_list_txn = SQLBaseStore.__dict__["_simple_select_list_txn"]
|
||||
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
|
||||
|
||||
def _generate_event_json(self, txn, rows):
|
||||
events = []
|
||||
for row in rows:
|
||||
d = dict(row)
|
||||
|
||||
d.pop("stream_ordering", None)
|
||||
d.pop("topological_ordering", None)
|
||||
d.pop("processed", None)
|
||||
|
||||
if "origin_server_ts" not in d:
|
||||
d["origin_server_ts"] = d.pop("ts", 0)
|
||||
else:
|
||||
d.pop("ts", 0)
|
||||
|
||||
d.pop("prev_state", None)
|
||||
d.update(json.loads(d.pop("unrecognized_keys")))
|
||||
|
||||
d["sender"] = d.pop("user_id")
|
||||
|
||||
d["content"] = json.loads(d["content"])
|
||||
|
||||
if "age_ts" not in d:
|
||||
# For compatibility
|
||||
d["age_ts"] = d.get("origin_server_ts", 0)
|
||||
|
||||
d.setdefault("unsigned", {})["age_ts"] = d.pop("age_ts")
|
||||
|
||||
outlier = d.pop("outlier", False)
|
||||
|
||||
# d.pop("membership", None)
|
||||
|
||||
d.pop("state_hash", None)
|
||||
|
||||
d.pop("replaces_state", None)
|
||||
|
||||
b = EventBuilder(d)
|
||||
b.internal_metadata.outlier = outlier
|
||||
|
||||
events.append(b)
|
||||
|
||||
for i, ev in enumerate(events):
|
||||
signatures = self._get_event_signatures_txn(
|
||||
txn, ev.event_id,
|
||||
)
|
||||
|
||||
ev.signatures = {
|
||||
n: {
|
||||
k: encode_base64(v) for k, v in s.items()
|
||||
}
|
||||
for n, s in signatures.items()
|
||||
}
|
||||
|
||||
hashes = self._get_event_content_hashes_txn(
|
||||
txn, ev.event_id,
|
||||
)
|
||||
|
||||
ev.hashes = {
|
||||
k: encode_base64(v) for k, v in hashes.items()
|
||||
}
|
||||
|
||||
prevs = self._get_prev_events_and_state(txn, ev.event_id)
|
||||
|
||||
ev.prev_events = [
|
||||
(e_id, h)
|
||||
for e_id, h, is_state in prevs
|
||||
if is_state == 0
|
||||
]
|
||||
|
||||
# ev.auth_events = self._get_auth_events(txn, ev.event_id)
|
||||
|
||||
hashes = dict(ev.auth_events)
|
||||
|
||||
for e_id, hash in ev.prev_events:
|
||||
if e_id in hashes and not hash:
|
||||
hash.update(hashes[e_id])
|
||||
#
|
||||
# if hasattr(ev, "state_key"):
|
||||
# ev.prev_state = [
|
||||
# (e_id, h)
|
||||
# for e_id, h, is_state in prevs
|
||||
# if is_state == 1
|
||||
# ]
|
||||
|
||||
return [e.build() for e in events]
|
||||
|
||||
|
||||
store = Store()
|
||||
|
||||
|
||||
# def get_key(server_name):
|
||||
# print "Getting keys for: %s" % (server_name,)
|
||||
# targets = []
|
||||
# if ":" in server_name:
|
||||
# target, port = server_name.split(":")
|
||||
# targets.append((target, int(port)))
|
||||
# try:
|
||||
# answers = dns.resolver.query("_matrix._tcp." + server_name, "SRV")
|
||||
# for srv in answers:
|
||||
# targets.append((srv.target, srv.port))
|
||||
# except dns.resolver.NXDOMAIN:
|
||||
# targets.append((server_name, 8448))
|
||||
# except:
|
||||
# print "Failed to lookup keys for %s" % (server_name,)
|
||||
# return {}
|
||||
#
|
||||
# for target, port in targets:
|
||||
# url = "https://%s:%i/_matrix/key/v1" % (target, port)
|
||||
# try:
|
||||
# keys = json.load(urllib2.urlopen(url, timeout=2))
|
||||
# verify_keys = {}
|
||||
# for key_id, key_base64 in keys["verify_keys"].items():
|
||||
# verify_key = decode_verify_key_bytes(
|
||||
# key_id, decode_base64(key_base64)
|
||||
# )
|
||||
# verify_signed_json(keys, server_name, verify_key)
|
||||
# verify_keys[key_id] = verify_key
|
||||
# print "Got keys for: %s" % (server_name,)
|
||||
# return verify_keys
|
||||
# except urllib2.URLError:
|
||||
# pass
|
||||
# except urllib2.HTTPError:
|
||||
# pass
|
||||
# except httplib.HTTPException:
|
||||
# pass
|
||||
#
|
||||
# print "Failed to get keys for %s" % (server_name,)
|
||||
# return {}
|
||||
|
||||
|
||||
def reinsert_events(cursor, server_name, signing_key):
|
||||
print "Running delta: v10"
|
||||
|
||||
cursor.executescript(delta_sql)
|
||||
|
||||
cursor.execute(
|
||||
"SELECT * FROM events ORDER BY rowid ASC"
|
||||
)
|
||||
|
||||
print "Getting events..."
|
||||
|
||||
rows = store.cursor_to_dict(cursor)
|
||||
|
||||
events = store._generate_event_json(cursor, rows)
|
||||
|
||||
print "Got events from DB."
|
||||
|
||||
algorithms = {
|
||||
"sha256": hashlib.sha256,
|
||||
}
|
||||
|
||||
key_id = "%s:%s" % (signing_key.alg, signing_key.version)
|
||||
verify_key = signing_key.verify_key
|
||||
verify_key.alg = signing_key.alg
|
||||
verify_key.version = signing_key.version
|
||||
|
||||
server_keys = {
|
||||
server_name: {
|
||||
key_id: verify_key
|
||||
}
|
||||
}
|
||||
|
||||
i = 0
|
||||
N = len(events)
|
||||
|
||||
for event in events:
|
||||
if i % 100 == 0:
|
||||
print "Processed: %d/%d events" % (i,N,)
|
||||
i += 1
|
||||
|
||||
# for alg_name in event.hashes:
|
||||
# if check_event_content_hash(event, algorithms[alg_name]):
|
||||
# pass
|
||||
# else:
|
||||
# pass
|
||||
# print "FAIL content hash %s %s" % (alg_name, event.event_id, )
|
||||
|
||||
have_own_correctly_signed = False
|
||||
for host, sigs in event.signatures.items():
|
||||
pruned = prune_event(event)
|
||||
|
||||
for key_id in sigs:
|
||||
if host not in server_keys:
|
||||
server_keys[host] = {} # get_key(host)
|
||||
if key_id in server_keys[host]:
|
||||
try:
|
||||
verify_signed_json(
|
||||
pruned.get_pdu_json(),
|
||||
host,
|
||||
server_keys[host][key_id]
|
||||
)
|
||||
|
||||
if host == server_name:
|
||||
have_own_correctly_signed = True
|
||||
except SignatureVerifyException:
|
||||
print "FAIL signature check %s %s" % (
|
||||
key_id, event.event_id
|
||||
)
|
||||
|
||||
# TODO: Re sign with our own server key
|
||||
if not have_own_correctly_signed:
|
||||
sigs = compute_event_signature(event, server_name, signing_key)
|
||||
event.signatures.update(sigs)
|
||||
|
||||
pruned = prune_event(event)
|
||||
|
||||
for key_id in event.signatures[server_name]:
|
||||
verify_signed_json(
|
||||
pruned.get_pdu_json(),
|
||||
server_name,
|
||||
server_keys[server_name][key_id]
|
||||
)
|
||||
|
||||
event_json = encode_canonical_json(
|
||||
event.get_dict()
|
||||
).decode("UTF-8")
|
||||
|
||||
metadata_json = encode_canonical_json(
|
||||
event.internal_metadata.get_dict()
|
||||
).decode("UTF-8")
|
||||
|
||||
store._simple_insert_txn(
|
||||
cursor,
|
||||
table="event_json",
|
||||
values={
|
||||
"event_id": event.event_id,
|
||||
"room_id": event.room_id,
|
||||
"internal_metadata": metadata_json,
|
||||
"json": event_json,
|
||||
},
|
||||
or_replace=True,
|
||||
)
|
||||
|
||||
|
||||
def main(database, server_name, signing_key):
|
||||
conn = sqlite3.connect(database)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Do other deltas:
|
||||
cursor.execute("PRAGMA user_version")
|
||||
row = cursor.fetchone()
|
||||
|
||||
if row and row[0]:
|
||||
user_version = row[0]
|
||||
# Run every version since after the current version.
|
||||
for v in range(user_version + 1, 10):
|
||||
print "Running delta: %d" % (v,)
|
||||
sql_script = read_schema("delta/v%d" % (v,))
|
||||
cursor.executescript(sql_script)
|
||||
|
||||
reinsert_events(cursor, server_name, signing_key)
|
||||
|
||||
conn.commit()
|
||||
|
||||
print "Success!"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("database")
|
||||
parser.add_argument("server_name")
|
||||
parser.add_argument(
|
||||
"signing_key", type=argparse.FileType('r'),
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
signing_key = syutil.crypto.signing_key.read_signing_keys(
|
||||
args.signing_key
|
||||
)
|
||||
|
||||
main(args.database, args.server_name, signing_key[0])
|
||||
@@ -3,6 +3,9 @@ source-dir = docs/sphinx
|
||||
build-dir = docs/build
|
||||
all_files = 1
|
||||
|
||||
[aliases]
|
||||
test = trial
|
||||
|
||||
[trial]
|
||||
test_suite = tests
|
||||
|
||||
@@ -13,8 +16,3 @@ ignore =
|
||||
docs/*
|
||||
pylint.cfg
|
||||
tox.ini
|
||||
|
||||
[flake8]
|
||||
max-line-length = 90
|
||||
# W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||
ignore = W503
|
||||
|
||||
51
setup.py
51
setup.py
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,10 +14,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import glob
|
||||
import os
|
||||
from setuptools import setup, find_packages, Command
|
||||
import sys
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
@@ -38,39 +36,6 @@ def exec_file(path_segments):
|
||||
exec(code, result)
|
||||
return result
|
||||
|
||||
|
||||
class Tox(Command):
|
||||
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
|
||||
|
||||
def initialize_options(self):
|
||||
self.tox_args = None
|
||||
|
||||
def finalize_options(self):
|
||||
self.test_args = []
|
||||
self.test_suite = True
|
||||
|
||||
def run(self):
|
||||
#import here, cause outside the eggs aren't loaded
|
||||
try:
|
||||
import tox
|
||||
except ImportError:
|
||||
try:
|
||||
self.distribution.fetch_build_eggs("tox")
|
||||
import tox
|
||||
except:
|
||||
raise RuntimeError(
|
||||
"The tests need 'tox' to run. Please install 'tox'."
|
||||
)
|
||||
import shlex
|
||||
args = self.tox_args
|
||||
if args:
|
||||
args = shlex.split(self.tox_args)
|
||||
else:
|
||||
args = []
|
||||
errno = tox.cmdline(args=args)
|
||||
sys.exit(errno)
|
||||
|
||||
|
||||
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
||||
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
||||
long_description = read_file(("README.rst",))
|
||||
@@ -80,11 +45,15 @@ setup(
|
||||
version=version,
|
||||
packages=find_packages(exclude=["tests", "tests.*"]),
|
||||
description="Reference Synapse Home Server",
|
||||
install_requires=dependencies['requirements'](include_conditional=True).keys(),
|
||||
dependency_links=dependencies["DEPENDENCY_LINKS"].values(),
|
||||
install_requires=dependencies["REQUIREMENTS"].keys(),
|
||||
setup_requires=[
|
||||
"Twisted==14.0.2", # Here to override setuptools_trial's dependency on Twisted>=2.4.0
|
||||
"setuptools_trial",
|
||||
"mock"
|
||||
],
|
||||
dependency_links=dependencies["DEPENDENCY_LINKS"],
|
||||
include_package_data=True,
|
||||
zip_safe=False,
|
||||
long_description=long_description,
|
||||
scripts=["synctl"] + glob.glob("scripts/*"),
|
||||
cmdclass={'test': Tox},
|
||||
scripts=["synctl"],
|
||||
)
|
||||
|
||||
@@ -37,13 +37,9 @@ textarea, input {
|
||||
margin: auto
|
||||
}
|
||||
|
||||
.g-recaptcha div {
|
||||
margin: auto;
|
||||
}
|
||||
|
||||
#registrationForm {
|
||||
text-align: left;
|
||||
padding: 5px;
|
||||
padding: 1em;
|
||||
margin-bottom: 40px;
|
||||
display: inline-block;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -16,4 +16,4 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.17.0-rc2"
|
||||
__version__ = "0.8.0"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -27,11 +27,22 @@ class Membership(object):
|
||||
LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)
|
||||
|
||||
|
||||
class Feedback(object):
|
||||
|
||||
"""Represents the types of feedback a user can send in response to a
|
||||
message."""
|
||||
|
||||
DELIVERED = u"delivered"
|
||||
READ = u"read"
|
||||
LIST = (DELIVERED, READ)
|
||||
|
||||
|
||||
class PresenceState(object):
|
||||
"""Represents the presence state of a user."""
|
||||
OFFLINE = u"offline"
|
||||
UNAVAILABLE = u"unavailable"
|
||||
ONLINE = u"online"
|
||||
FREE_FOR_CHAT = u"free_for_chat"
|
||||
|
||||
|
||||
class JoinRules(object):
|
||||
@@ -48,11 +59,7 @@ class LoginType(object):
|
||||
EMAIL_URL = u"m.login.email.url"
|
||||
EMAIL_IDENTITY = u"m.login.email.identity"
|
||||
RECAPTCHA = u"m.login.recaptcha"
|
||||
DUMMY = u"m.login.dummy"
|
||||
|
||||
# Only for C/S API v1
|
||||
APPLICATION_SERVICE = u"m.login.application_service"
|
||||
SHARED_SECRET = u"org.matrix.login.shared_secret"
|
||||
|
||||
|
||||
class EventTypes(object):
|
||||
@@ -62,12 +69,7 @@ class EventTypes(object):
|
||||
PowerLevels = "m.room.power_levels"
|
||||
Aliases = "m.room.aliases"
|
||||
Redaction = "m.room.redaction"
|
||||
ThirdPartyInvite = "m.room.third_party_invite"
|
||||
|
||||
RoomHistoryVisibility = "m.room.history_visibility"
|
||||
CanonicalAlias = "m.room.canonical_alias"
|
||||
RoomAvatar = "m.room.avatar"
|
||||
GuestAccess = "m.room.guest_access"
|
||||
Feedback = "m.room.message.feedback"
|
||||
|
||||
# These are used for validation
|
||||
Message = "m.room.message"
|
||||
@@ -79,9 +81,3 @@ class RejectedReason(object):
|
||||
AUTH_ERROR = "auth_error"
|
||||
REPLACED = "replaced"
|
||||
NOT_ANCESTOR = "not_ancestor"
|
||||
|
||||
|
||||
class RoomCreationPreset(object):
|
||||
PRIVATE_CHAT = "private_chat"
|
||||
PUBLIC_CHAT = "public_chat"
|
||||
TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -29,29 +29,22 @@ class Codes(object):
|
||||
USER_IN_USE = "M_USER_IN_USE"
|
||||
ROOM_IN_USE = "M_ROOM_IN_USE"
|
||||
BAD_PAGINATION = "M_BAD_PAGINATION"
|
||||
BAD_STATE = "M_BAD_STATE"
|
||||
UNKNOWN = "M_UNKNOWN"
|
||||
NOT_FOUND = "M_NOT_FOUND"
|
||||
MISSING_TOKEN = "M_MISSING_TOKEN"
|
||||
UNKNOWN_TOKEN = "M_UNKNOWN_TOKEN"
|
||||
GUEST_ACCESS_FORBIDDEN = "M_GUEST_ACCESS_FORBIDDEN"
|
||||
LIMIT_EXCEEDED = "M_LIMIT_EXCEEDED"
|
||||
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
||||
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
||||
MISSING_PARAM = "M_MISSING_PARAM"
|
||||
TOO_LARGE = "M_TOO_LARGE"
|
||||
MISSING_PARAM = "M_MISSING_PARAM",
|
||||
TOO_LARGE = "M_TOO_LARGE",
|
||||
EXCLUSIVE = "M_EXCLUSIVE"
|
||||
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
||||
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
||||
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
"""An exception with integer code and message string attributes."""
|
||||
|
||||
def __init__(self, code, msg):
|
||||
logger.info("%s: %s, %s", type(self).__name__, code, msg)
|
||||
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
||||
self.code = code
|
||||
self.msg = msg
|
||||
@@ -81,6 +74,11 @@ class SynapseError(CodeMessageException):
|
||||
)
|
||||
|
||||
|
||||
class RoomError(SynapseError):
|
||||
"""An error raised when a room event fails."""
|
||||
pass
|
||||
|
||||
|
||||
class RegistrationError(SynapseError):
|
||||
"""An error raised when a registration event fails."""
|
||||
pass
|
||||
@@ -124,15 +122,6 @@ class AuthError(SynapseError):
|
||||
super(AuthError, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class EventSizeError(SynapseError):
|
||||
"""An error raised when an event is too big."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.TOO_LARGE
|
||||
super(EventSizeError, self).__init__(413, *args, **kwargs)
|
||||
|
||||
|
||||
class EventStreamError(SynapseError):
|
||||
"""An error raised when there a problem with the event stream."""
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,10 +15,6 @@
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.types import UserID, RoomID
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import ujson as json
|
||||
|
||||
|
||||
class Filtering(object):
|
||||
|
||||
@@ -26,20 +22,20 @@ class Filtering(object):
|
||||
super(Filtering, self).__init__()
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_filter(self, user_localpart, filter_id):
|
||||
result = yield self.store.get_user_filter(user_localpart, filter_id)
|
||||
defer.returnValue(FilterCollection(result))
|
||||
result = self.store.get_user_filter(user_localpart, filter_id)
|
||||
result.addCallback(Filter)
|
||||
return result
|
||||
|
||||
def add_user_filter(self, user_localpart, user_filter):
|
||||
self.check_valid_filter(user_filter)
|
||||
self._check_valid_filter(user_filter)
|
||||
return self.store.add_user_filter(user_localpart, user_filter)
|
||||
|
||||
# TODO(paul): surely we should probably add a delete_user_filter or
|
||||
# replace_user_filter at some point? There's no REST API specified for
|
||||
# them however
|
||||
|
||||
def check_valid_filter(self, user_filter_json):
|
||||
def _check_valid_filter(self, user_filter_json):
|
||||
"""Check if the provided filter is valid.
|
||||
|
||||
This inspects all definitions contained within the filter.
|
||||
@@ -54,11 +50,11 @@ class Filtering(object):
|
||||
# many definitions.
|
||||
|
||||
top_level_definitions = [
|
||||
"presence", "account_data"
|
||||
"public_user_data", "private_user_data", "server_data"
|
||||
]
|
||||
|
||||
room_level_definitions = [
|
||||
"state", "timeline", "ephemeral", "account_data"
|
||||
"state", "events", "ephemeral"
|
||||
]
|
||||
|
||||
for key in top_level_definitions:
|
||||
@@ -66,29 +62,10 @@ class Filtering(object):
|
||||
self._check_definition(user_filter_json[key])
|
||||
|
||||
if "room" in user_filter_json:
|
||||
self._check_definition_room_lists(user_filter_json["room"])
|
||||
for key in room_level_definitions:
|
||||
if key in user_filter_json["room"]:
|
||||
self._check_definition(user_filter_json["room"][key])
|
||||
|
||||
def _check_definition_room_lists(self, definition):
|
||||
"""Check that "rooms" and "not_rooms" are lists of room ids if they
|
||||
are present
|
||||
|
||||
Args:
|
||||
definition(dict): The filter definition
|
||||
Raises:
|
||||
SynapseError: If there was a problem with this definition.
|
||||
"""
|
||||
# check rooms are valid room IDs
|
||||
room_id_keys = ["rooms", "not_rooms"]
|
||||
for key in room_id_keys:
|
||||
if key in definition:
|
||||
if type(definition[key]) != list:
|
||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||
for room_id in definition[key]:
|
||||
RoomID.from_string(room_id)
|
||||
|
||||
def _check_definition(self, definition):
|
||||
"""Check if the provided definition is valid.
|
||||
|
||||
@@ -108,7 +85,14 @@ class Filtering(object):
|
||||
400, "Expected JSON object, not %s" % (definition,)
|
||||
)
|
||||
|
||||
self._check_definition_room_lists(definition)
|
||||
# check rooms are valid room IDs
|
||||
room_id_keys = ["rooms", "not_rooms"]
|
||||
for key in room_id_keys:
|
||||
if key in definition:
|
||||
if type(definition[key]) != list:
|
||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||
for room_id in definition[key]:
|
||||
RoomID.from_string(room_id)
|
||||
|
||||
# check senders are valid user IDs
|
||||
user_id_keys = ["senders", "not_senders"]
|
||||
@@ -130,162 +114,116 @@ class Filtering(object):
|
||||
if not isinstance(event_type, basestring):
|
||||
raise SynapseError(400, "Event type should be a string")
|
||||
|
||||
if "format" in definition:
|
||||
event_format = definition["format"]
|
||||
if event_format not in ["federation", "events"]:
|
||||
raise SynapseError(400, "Invalid format: %s" % (event_format,))
|
||||
|
||||
class FilterCollection(object):
|
||||
def __init__(self, filter_json):
|
||||
self._filter_json = filter_json
|
||||
if "select" in definition:
|
||||
event_select_list = definition["select"]
|
||||
for select_key in event_select_list:
|
||||
if select_key not in ["event_id", "origin_server_ts",
|
||||
"thread_id", "content", "content.body"]:
|
||||
raise SynapseError(400, "Bad select: %s" % (select_key,))
|
||||
|
||||
room_filter_json = self._filter_json.get("room", {})
|
||||
|
||||
self._room_filter = Filter({
|
||||
k: v for k, v in room_filter_json.items()
|
||||
if k in ("rooms", "not_rooms")
|
||||
})
|
||||
|
||||
self._room_timeline_filter = Filter(room_filter_json.get("timeline", {}))
|
||||
self._room_state_filter = Filter(room_filter_json.get("state", {}))
|
||||
self._room_ephemeral_filter = Filter(room_filter_json.get("ephemeral", {}))
|
||||
self._room_account_data = Filter(room_filter_json.get("account_data", {}))
|
||||
self._presence_filter = Filter(filter_json.get("presence", {}))
|
||||
self._account_data = Filter(filter_json.get("account_data", {}))
|
||||
|
||||
self.include_leave = filter_json.get("room", {}).get(
|
||||
"include_leave", False
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
||||
|
||||
def get_filter_json(self):
|
||||
return self._filter_json
|
||||
|
||||
def timeline_limit(self):
|
||||
return self._room_timeline_filter.limit()
|
||||
|
||||
def presence_limit(self):
|
||||
return self._presence_filter.limit()
|
||||
|
||||
def ephemeral_limit(self):
|
||||
return self._room_ephemeral_filter.limit()
|
||||
|
||||
def filter_presence(self, events):
|
||||
return self._presence_filter.filter(events)
|
||||
|
||||
def filter_account_data(self, events):
|
||||
return self._account_data.filter(events)
|
||||
|
||||
def filter_room_state(self, events):
|
||||
return self._room_state_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_timeline(self, events):
|
||||
return self._room_timeline_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_ephemeral(self, events):
|
||||
return self._room_ephemeral_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_account_data(self, events):
|
||||
return self._room_account_data.filter(self._room_filter.filter(events))
|
||||
if ("bundle_updates" in definition and
|
||||
type(definition["bundle_updates"]) != bool):
|
||||
raise SynapseError(400, "Bad bundle_updates: expected bool.")
|
||||
|
||||
|
||||
class Filter(object):
|
||||
def __init__(self, filter_json):
|
||||
self.filter_json = filter_json
|
||||
|
||||
self.types = self.filter_json.get("types", None)
|
||||
self.not_types = self.filter_json.get("not_types", [])
|
||||
def filter_public_user_data(self, events):
|
||||
return self._filter_on_key(events, ["public_user_data"])
|
||||
|
||||
self.rooms = self.filter_json.get("rooms", None)
|
||||
self.not_rooms = self.filter_json.get("not_rooms", [])
|
||||
def filter_private_user_data(self, events):
|
||||
return self._filter_on_key(events, ["private_user_data"])
|
||||
|
||||
self.senders = self.filter_json.get("senders", None)
|
||||
self.not_senders = self.filter_json.get("not_senders", [])
|
||||
def filter_room_state(self, events):
|
||||
return self._filter_on_key(events, ["room", "state"])
|
||||
|
||||
self.contains_url = self.filter_json.get("contains_url", None)
|
||||
def filter_room_events(self, events):
|
||||
return self._filter_on_key(events, ["room", "events"])
|
||||
|
||||
def check(self, event):
|
||||
"""Checks whether the filter matches the given event.
|
||||
def filter_room_ephemeral(self, events):
|
||||
return self._filter_on_key(events, ["room", "ephemeral"])
|
||||
|
||||
def _filter_on_key(self, events, keys):
|
||||
filter_json = self.filter_json
|
||||
if not filter_json:
|
||||
return events
|
||||
|
||||
try:
|
||||
# extract the right definition from the filter
|
||||
definition = filter_json
|
||||
for key in keys:
|
||||
definition = definition[key]
|
||||
return self._filter_with_definition(events, definition)
|
||||
except KeyError:
|
||||
# return all events if definition isn't specified.
|
||||
return events
|
||||
|
||||
def _filter_with_definition(self, events, definition):
|
||||
return [e for e in events if self._passes_definition(definition, e)]
|
||||
|
||||
def _passes_definition(self, definition, event):
|
||||
"""Check if the event passes through the given definition.
|
||||
|
||||
Args:
|
||||
definition(dict): The definition to check against.
|
||||
event(Event): The event to check.
|
||||
Returns:
|
||||
bool: True if the event matches
|
||||
True if the event passes through the filter.
|
||||
"""
|
||||
sender = event.get("sender", None)
|
||||
if not sender:
|
||||
# Presence events have their 'sender' in content.user_id
|
||||
content = event.get("content")
|
||||
# account_data has been allowed to have non-dict content, so check type first
|
||||
if isinstance(content, dict):
|
||||
sender = content.get("user_id")
|
||||
# Algorithm notes:
|
||||
# For each key in the definition, check the event meets the criteria:
|
||||
# * For types: Literal match or prefix match (if ends with wildcard)
|
||||
# * For senders/rooms: Literal match only
|
||||
# * "not_" checks take presedence (e.g. if "m.*" is in both 'types'
|
||||
# and 'not_types' then it is treated as only being in 'not_types')
|
||||
|
||||
return self.check_fields(
|
||||
event.get("room_id", None),
|
||||
sender,
|
||||
event.get("type", None),
|
||||
"url" in event.get("content", {})
|
||||
)
|
||||
|
||||
def check_fields(self, room_id, sender, event_type, contains_url):
|
||||
"""Checks whether the filter matches the given event fields.
|
||||
|
||||
Returns:
|
||||
bool: True if the event fields match
|
||||
"""
|
||||
literal_keys = {
|
||||
"rooms": lambda v: room_id == v,
|
||||
"senders": lambda v: sender == v,
|
||||
"types": lambda v: _matches_wildcard(event_type, v)
|
||||
}
|
||||
|
||||
for name, match_func in literal_keys.items():
|
||||
not_name = "not_%s" % (name,)
|
||||
disallowed_values = getattr(self, not_name)
|
||||
if any(map(match_func, disallowed_values)):
|
||||
# room checks
|
||||
if hasattr(event, "room_id"):
|
||||
room_id = event.room_id
|
||||
allow_rooms = definition.get("rooms", None)
|
||||
reject_rooms = definition.get("not_rooms", None)
|
||||
if reject_rooms and room_id in reject_rooms:
|
||||
return False
|
||||
if allow_rooms and room_id not in allow_rooms:
|
||||
return False
|
||||
|
||||
allowed_values = getattr(self, name)
|
||||
if allowed_values is not None:
|
||||
if not any(map(match_func, allowed_values)):
|
||||
return False
|
||||
# sender checks
|
||||
if hasattr(event, "sender"):
|
||||
# Should we be including event.state_key for some event types?
|
||||
sender = event.sender
|
||||
allow_senders = definition.get("senders", None)
|
||||
reject_senders = definition.get("not_senders", None)
|
||||
if reject_senders and sender in reject_senders:
|
||||
return False
|
||||
if allow_senders and sender not in allow_senders:
|
||||
return False
|
||||
|
||||
contains_url_filter = self.filter_json.get("contains_url")
|
||||
if contains_url_filter is not None:
|
||||
if contains_url_filter != contains_url:
|
||||
# type checks
|
||||
if "not_types" in definition:
|
||||
for def_type in definition["not_types"]:
|
||||
if self._event_matches_type(event, def_type):
|
||||
return False
|
||||
if "types" in definition:
|
||||
included = False
|
||||
for def_type in definition["types"]:
|
||||
if self._event_matches_type(event, def_type):
|
||||
included = True
|
||||
break
|
||||
if not included:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def filter_rooms(self, room_ids):
|
||||
"""Apply the 'rooms' filter to a given list of rooms.
|
||||
|
||||
Args:
|
||||
room_ids (list): A list of room_ids.
|
||||
|
||||
Returns:
|
||||
list: A list of room_ids that match the filter
|
||||
"""
|
||||
room_ids = set(room_ids)
|
||||
|
||||
disallowed_rooms = set(self.filter_json.get("not_rooms", []))
|
||||
room_ids -= disallowed_rooms
|
||||
|
||||
allowed_rooms = self.filter_json.get("rooms", None)
|
||||
if allowed_rooms is not None:
|
||||
room_ids &= set(allowed_rooms)
|
||||
|
||||
return room_ids
|
||||
|
||||
def filter(self, events):
|
||||
return filter(self.check, events)
|
||||
|
||||
def limit(self):
|
||||
return self.filter_json.get("limit", 10)
|
||||
|
||||
|
||||
def _matches_wildcard(actual_value, filter_value):
|
||||
if filter_value.endswith("*"):
|
||||
type_prefix = filter_value[:-1]
|
||||
return actual_value.startswith(type_prefix)
|
||||
else:
|
||||
return actual_value == filter_value
|
||||
|
||||
|
||||
DEFAULT_FILTER_COLLECTION = FilterCollection({})
|
||||
def _event_matches_type(self, event, def_type):
|
||||
if def_type.endswith("*"):
|
||||
type_prefix = def_type[:-1]
|
||||
return event.type.startswith(type_prefix)
|
||||
else:
|
||||
return event.type == def_type
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -22,7 +22,5 @@ STATIC_PREFIX = "/_matrix/static"
|
||||
WEB_CLIENT_PREFIX = "/_matrix/client"
|
||||
CONTENT_REPO_PREFIX = "/_matrix/content"
|
||||
SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,20 +12,3 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
from synapse import python_dependencies # noqa: E402
|
||||
|
||||
try:
|
||||
python_dependencies.check_requirements()
|
||||
except python_dependencies.MissingRequirementError as e:
|
||||
message = "\n".join([
|
||||
"Missing Requirement: %s" % (e.message,),
|
||||
"To install run:",
|
||||
" pip install --upgrade --force \"%s\"" % (e.dependency,),
|
||||
"",
|
||||
])
|
||||
sys.stderr.writelines(message)
|
||||
sys.exit(1)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,308 +14,334 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from synapse.config._base import ConfigError
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
from synapse.python_dependencies import (
|
||||
check_requirements, DEPENDENCY_LINKS
|
||||
from synapse.storage import (
|
||||
prepare_database, prepare_sqlite3_database, UpgradeDatabaseException,
|
||||
)
|
||||
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
|
||||
from synapse.storage import are_all_users_on_domain
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||
|
||||
from synapse.server import HomeServer
|
||||
|
||||
from twisted.internet import reactor, task, defer
|
||||
from twisted.application import service
|
||||
from twisted.web.resource import Resource, EncodingResourceWrapper
|
||||
from synapse.python_dependencies import check_requirements
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.enterprise import adbapi
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.static import File
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from synapse.http.server import RootRedirect
|
||||
from twisted.web.server import Site
|
||||
from synapse.http.server import JsonResource, RootRedirect
|
||||
from synapse.rest.appservice.v1 import AppServiceRestResource
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.http.server_key_resource import LocalKey
|
||||
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
|
||||
from synapse.api.urls import (
|
||||
FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
||||
SERVER_KEY_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, STATIC_PREFIX,
|
||||
SERVER_KEY_V2_PREFIX,
|
||||
CLIENT_PREFIX, FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
||||
SERVER_KEY_PREFIX, MEDIA_PREFIX, CLIENT_V2_ALPHA_PREFIX, APP_SERVICE_PREFIX,
|
||||
STATIC_PREFIX
|
||||
)
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.metrics import register_memory_metrics
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
|
||||
from synapse.http.site import SynapseSite
|
||||
|
||||
from synapse import events
|
||||
from synapse.rest.client.v1 import ClientV1RestResource
|
||||
from synapse.rest.client.v2_alpha import ClientV2AlphaRestResource
|
||||
|
||||
from daemonize import Daemonize
|
||||
import twisted.manhole.telnet
|
||||
|
||||
logger = logging.getLogger("synapse.app.homeserver")
|
||||
import synapse
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import resource
|
||||
import subprocess
|
||||
import sqlite3
|
||||
import syweb
|
||||
|
||||
def gz_wrap(r):
|
||||
return EncodingResourceWrapper(r, [GzipEncoderFactory()])
|
||||
|
||||
|
||||
def build_resource_for_web_client(hs):
|
||||
webclient_path = hs.get_config().web_client_location
|
||||
if not webclient_path:
|
||||
try:
|
||||
import syweb
|
||||
except ImportError:
|
||||
quit_with_error(
|
||||
"Could not find a webclient.\n\n"
|
||||
"Please either install the matrix-angular-sdk or configure\n"
|
||||
"the location of the source to serve via the configuration\n"
|
||||
"option `web_client_location`\n\n"
|
||||
"To install the `matrix-angular-sdk` via pip, run:\n\n"
|
||||
" pip install '%(dep)s'\n"
|
||||
"\n"
|
||||
"You can also disable hosting of the webclient via the\n"
|
||||
"configuration option `web_client`\n"
|
||||
% {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
|
||||
)
|
||||
syweb_path = os.path.dirname(syweb.__file__)
|
||||
webclient_path = os.path.join(syweb_path, "webclient")
|
||||
# GZip is disabled here due to
|
||||
# https://twistedmatrix.com/trac/ticket/7678
|
||||
# (It can stay enabled for the API resources: they call
|
||||
# write() with the whole body and then finish() straight
|
||||
# after and so do not trigger the bug.
|
||||
# GzipFile was removed in commit 184ba09
|
||||
# return GzipFile(webclient_path) # TODO configurable?
|
||||
return File(webclient_path) # TODO configurable?
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SynapseHomeServer(HomeServer):
|
||||
def _listener_http(self, config, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
tls = listener_config.get("tls", False)
|
||||
site_tag = listener_config.get("tag", port)
|
||||
|
||||
if tls and config.no_tls:
|
||||
return
|
||||
def build_http_client(self):
|
||||
return MatrixFederationHttpClient(self)
|
||||
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "client":
|
||||
client_resource = ClientRestResource(self)
|
||||
if res["compress"]:
|
||||
client_resource = gz_wrap(client_resource)
|
||||
def build_resource_for_client(self):
|
||||
return ClientV1RestResource(self)
|
||||
|
||||
resources.update({
|
||||
"/_matrix/client/api/v1": client_resource,
|
||||
"/_matrix/client/r0": client_resource,
|
||||
"/_matrix/client/unstable": client_resource,
|
||||
"/_matrix/client/v2_alpha": client_resource,
|
||||
"/_matrix/client/versions": client_resource,
|
||||
})
|
||||
def build_resource_for_client_v2_alpha(self):
|
||||
return ClientV2AlphaRestResource(self)
|
||||
|
||||
if name == "federation":
|
||||
resources.update({
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
})
|
||||
def build_resource_for_federation(self):
|
||||
return JsonResource(self)
|
||||
|
||||
if name in ["static", "client"]:
|
||||
resources.update({
|
||||
STATIC_PREFIX: File(
|
||||
os.path.join(os.path.dirname(synapse.__file__), "static")
|
||||
),
|
||||
})
|
||||
def build_resource_for_app_services(self):
|
||||
return AppServiceRestResource(self)
|
||||
|
||||
if name in ["media", "federation", "client"]:
|
||||
media_repo = MediaRepositoryResource(self)
|
||||
resources.update({
|
||||
MEDIA_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||
self, self.config.uploads_path
|
||||
),
|
||||
})
|
||||
def build_resource_for_web_client(self):
|
||||
syweb_path = os.path.dirname(syweb.__file__)
|
||||
webclient_path = os.path.join(syweb_path, "webclient")
|
||||
return File(webclient_path) # TODO configurable?
|
||||
|
||||
if name in ["keys", "federation"]:
|
||||
resources.update({
|
||||
SERVER_KEY_PREFIX: LocalKey(self),
|
||||
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
|
||||
})
|
||||
def build_resource_for_static_content(self):
|
||||
return File("static")
|
||||
|
||||
if name == "webclient":
|
||||
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
||||
|
||||
if name == "metrics" and self.get_config().enable_metrics:
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
if name == "replication":
|
||||
resources[REPLICATION_PREFIX] = ReplicationResource(self)
|
||||
|
||||
if WEB_CLIENT_PREFIX in resources:
|
||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||
else:
|
||||
root_resource = Resource()
|
||||
|
||||
root_resource = create_resource_tree(resources, root_resource)
|
||||
if tls:
|
||||
reactor.listenSSL(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.https.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
self.tls_server_context_factory,
|
||||
interface=bind_address
|
||||
)
|
||||
else:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=bind_address
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", port)
|
||||
|
||||
def start_listening(self):
|
||||
config = self.get_config()
|
||||
|
||||
for listener in config.listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listener_http(config, listener)
|
||||
elif listener["type"] == "manhole":
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
def run_startup_checks(self, db_conn, database_engine):
|
||||
all_users_native = are_all_users_on_domain(
|
||||
db_conn.cursor(), database_engine, self.hostname
|
||||
def build_resource_for_content_repo(self):
|
||||
return ContentRepoResource(
|
||||
self, self.upload_dir, self.auth, self.content_addr
|
||||
)
|
||||
if not all_users_native:
|
||||
quit_with_error(
|
||||
"Found users in database not native to %s!\n"
|
||||
"You cannot changed a synapse server_name after it's been configured"
|
||||
% (self.hostname,)
|
||||
|
||||
def build_resource_for_media_repository(self):
|
||||
return MediaRepositoryResource(self)
|
||||
|
||||
def build_resource_for_server_key(self):
|
||||
return LocalKey(self)
|
||||
|
||||
def build_db_pool(self):
|
||||
return adbapi.ConnectionPool(
|
||||
"sqlite3", self.get_db_name(),
|
||||
check_same_thread=False,
|
||||
cp_min=1,
|
||||
cp_max=1,
|
||||
cp_openfun=prepare_database, # Prepare the database for each conn
|
||||
# so that :memory: sqlite works
|
||||
)
|
||||
|
||||
def create_resource_tree(self, web_client, redirect_root_to_web_client):
|
||||
"""Create the resource tree for this Home Server.
|
||||
|
||||
This in unduly complicated because Twisted does not support putting
|
||||
child resources more than 1 level deep at a time.
|
||||
|
||||
Args:
|
||||
web_client (bool): True to enable the web client.
|
||||
redirect_root_to_web_client (bool): True to redirect '/' to the
|
||||
location of the web client. This does nothing if web_client is not
|
||||
True.
|
||||
"""
|
||||
# list containing (path_str, Resource) e.g:
|
||||
# [ ("/aaa/bbb/cc", Resource1), ("/aaa/dummy", Resource2) ]
|
||||
desired_tree = [
|
||||
(CLIENT_PREFIX, self.get_resource_for_client()),
|
||||
(CLIENT_V2_ALPHA_PREFIX, self.get_resource_for_client_v2_alpha()),
|
||||
(FEDERATION_PREFIX, self.get_resource_for_federation()),
|
||||
(CONTENT_REPO_PREFIX, self.get_resource_for_content_repo()),
|
||||
(SERVER_KEY_PREFIX, self.get_resource_for_server_key()),
|
||||
(MEDIA_PREFIX, self.get_resource_for_media_repository()),
|
||||
(APP_SERVICE_PREFIX, self.get_resource_for_app_services()),
|
||||
(STATIC_PREFIX, self.get_resource_for_static_content()),
|
||||
]
|
||||
|
||||
if web_client:
|
||||
logger.info("Adding the web client.")
|
||||
desired_tree.append((WEB_CLIENT_PREFIX,
|
||||
self.get_resource_for_web_client()))
|
||||
|
||||
if web_client and redirect_root_to_web_client:
|
||||
self.root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||
else:
|
||||
self.root_resource = Resource()
|
||||
|
||||
# ideally we'd just use getChild and putChild but getChild doesn't work
|
||||
# unless you give it a Request object IN ADDITION to the name :/ So
|
||||
# instead, we'll store a copy of this mapping so we can actually add
|
||||
# extra resources to existing nodes. See self._resource_id for the key.
|
||||
resource_mappings = {}
|
||||
for full_path, res in desired_tree:
|
||||
logger.info("Attaching %s to path %s", res, full_path)
|
||||
last_resource = self.root_resource
|
||||
for path_seg in full_path.split('/')[1:-1]:
|
||||
if path_seg not in last_resource.listNames():
|
||||
# resource doesn't exist, so make a "dummy resource"
|
||||
child_resource = Resource()
|
||||
last_resource.putChild(path_seg, child_resource)
|
||||
res_id = self._resource_id(last_resource, path_seg)
|
||||
resource_mappings[res_id] = child_resource
|
||||
last_resource = child_resource
|
||||
else:
|
||||
# we have an existing Resource, use that instead.
|
||||
res_id = self._resource_id(last_resource, path_seg)
|
||||
last_resource = resource_mappings[res_id]
|
||||
|
||||
# ===========================
|
||||
# now attach the actual desired resource
|
||||
last_path_seg = full_path.split('/')[-1]
|
||||
|
||||
# if there is already a resource here, thieve its children and
|
||||
# replace it
|
||||
res_id = self._resource_id(last_resource, last_path_seg)
|
||||
if res_id in resource_mappings:
|
||||
# there is a dummy resource at this path already, which needs
|
||||
# to be replaced with the desired resource.
|
||||
existing_dummy_resource = resource_mappings[res_id]
|
||||
for child_name in existing_dummy_resource.listNames():
|
||||
child_res_id = self._resource_id(existing_dummy_resource,
|
||||
child_name)
|
||||
child_resource = resource_mappings[child_res_id]
|
||||
# steal the children
|
||||
res.putChild(child_name, child_resource)
|
||||
|
||||
# finally, insert the desired resource in the right place
|
||||
last_resource.putChild(last_path_seg, res)
|
||||
res_id = self._resource_id(last_resource, last_path_seg)
|
||||
resource_mappings[res_id] = res
|
||||
|
||||
return self.root_resource
|
||||
|
||||
def _resource_id(self, resource, path_seg):
|
||||
"""Construct an arbitrary resource ID so you can retrieve the mapping
|
||||
later.
|
||||
|
||||
If you want to represent resource A putChild resource B with path C,
|
||||
the mapping should looks like _resource_id(A,C) = B.
|
||||
|
||||
Args:
|
||||
resource (Resource): The *parent* Resource
|
||||
path_seg (str): The name of the child Resource to be attached.
|
||||
Returns:
|
||||
str: A unique string which can be a key to the child Resource.
|
||||
"""
|
||||
return "%s-%s" % (resource, path_seg)
|
||||
|
||||
def start_listening(self, secure_port, unsecure_port):
|
||||
if secure_port is not None:
|
||||
reactor.listenSSL(
|
||||
secure_port, Site(self.root_resource), self.tls_context_factory
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", secure_port)
|
||||
if unsecure_port is not None:
|
||||
reactor.listenTCP(
|
||||
unsecure_port, Site(self.root_resource)
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", unsecure_port)
|
||||
|
||||
|
||||
def get_version_string():
|
||||
try:
|
||||
null = open(os.devnull, 'w')
|
||||
cwd = os.path.dirname(os.path.abspath(__file__))
|
||||
try:
|
||||
git_branch = subprocess.check_output(
|
||||
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
git_branch = "b=" + git_branch
|
||||
except subprocess.CalledProcessError:
|
||||
git_branch = ""
|
||||
|
||||
try:
|
||||
database_engine.check_database(db_conn.cursor())
|
||||
except IncorrectDatabaseSetup as e:
|
||||
quit_with_error(e.message)
|
||||
git_tag = subprocess.check_output(
|
||||
['git', 'describe', '--exact-match'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
git_tag = "t=" + git_tag
|
||||
except subprocess.CalledProcessError:
|
||||
git_tag = ""
|
||||
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
try:
|
||||
git_commit = subprocess.check_output(
|
||||
['git', 'rev-parse', '--short', 'HEAD'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
except subprocess.CalledProcessError:
|
||||
git_commit = ""
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
try:
|
||||
dirty_string = "-this_is_a_dirty_checkout"
|
||||
is_dirty = subprocess.check_output(
|
||||
['git', 'describe', '--dirty=' + dirty_string],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip().endswith(dirty_string)
|
||||
|
||||
git_dirty = "dirty" if is_dirty else ""
|
||||
except subprocess.CalledProcessError:
|
||||
git_dirty = ""
|
||||
|
||||
if git_branch or git_tag or git_commit or git_dirty:
|
||||
git_version = ",".join(
|
||||
s for s in
|
||||
(git_branch, git_tag, git_commit, git_dirty,)
|
||||
if s
|
||||
)
|
||||
|
||||
return (
|
||||
"Synapse/%s (%s)" % (
|
||||
synapse.__version__, git_version,
|
||||
)
|
||||
).encode("ascii")
|
||||
except Exception as e:
|
||||
logger.warn("Failed to check for git repository: %s", e)
|
||||
|
||||
return ("Synapse/%s" % (synapse.__version__,)).encode("ascii")
|
||||
|
||||
|
||||
def quit_with_error(error_string):
|
||||
message_lines = error_string.split("\n")
|
||||
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
||||
sys.stderr.write("*" * line_length + '\n')
|
||||
for line in message_lines:
|
||||
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
||||
sys.stderr.write("*" * line_length + '\n')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def setup(config_options):
|
||||
"""
|
||||
Args:
|
||||
config_options_options: The options passed to Synapse. Usually
|
||||
`sys.argv[1:]`.
|
||||
|
||||
Returns:
|
||||
HomeServer
|
||||
"""
|
||||
def change_resource_limit(soft_file_no):
|
||||
try:
|
||||
config = HomeServerConfig.load_or_generate_config(
|
||||
"Synapse Homeserver",
|
||||
config_options,
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
|
||||
if not config:
|
||||
# If a config isn't returned, and an exception isn't raised, we're just
|
||||
# generating config files and shouldn't try to continue.
|
||||
sys.exit(0)
|
||||
if not soft_file_no:
|
||||
soft_file_no = hard
|
||||
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_file_no, hard))
|
||||
|
||||
logger.info("Set file limit to: %d", soft_file_no)
|
||||
except (ValueError, resource.error) as e:
|
||||
logger.warn("Failed to set file limit: %s", e)
|
||||
|
||||
|
||||
def setup():
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse Homeserver",
|
||||
sys.argv[1:],
|
||||
generate_section="Homeserver"
|
||||
)
|
||||
|
||||
config.setup_logging()
|
||||
|
||||
# check any extra requirements we have now we have a config
|
||||
check_requirements(config)
|
||||
check_requirements()
|
||||
|
||||
version_string = get_version_string("Synapse", synapse)
|
||||
version_string = get_version_string()
|
||||
|
||||
logger.info("Server hostname: %s", config.server_name)
|
||||
logger.info("Server version: %s", version_string)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
if re.search(":[0-9]+$", config.server_name):
|
||||
domain_with_port = config.server_name
|
||||
else:
|
||||
domain_with_port = "%s:%s" % (config.server_name, config.bind_port)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
|
||||
tls_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
hs = SynapseHomeServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
domain_with_port=domain_with_port,
|
||||
upload_dir=os.path.abspath("uploads"),
|
||||
db_name=config.database_path,
|
||||
tls_context_factory=tls_context_factory,
|
||||
config=config,
|
||||
content_addr=config.content_addr,
|
||||
version_string=version_string,
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
logger.info("Preparing database: %s...", config.database_config['name'])
|
||||
hs.create_resource_tree(
|
||||
web_client=config.webclient,
|
||||
redirect_root_to_web_client=True,
|
||||
)
|
||||
|
||||
db_name = hs.get_db_name()
|
||||
|
||||
logger.info("Preparing database: %s...", db_name)
|
||||
|
||||
try:
|
||||
db_conn = hs.get_db_conn(run_new_connection=False)
|
||||
prepare_database(db_conn, database_engine, config=config)
|
||||
database_engine.on_new_connection(db_conn)
|
||||
|
||||
hs.run_startup_checks(db_conn, database_engine)
|
||||
|
||||
db_conn.commit()
|
||||
with sqlite3.connect(db_name) as db_conn:
|
||||
prepare_sqlite3_database(db_conn)
|
||||
prepare_database(db_conn)
|
||||
except UpgradeDatabaseException:
|
||||
sys.stderr.write(
|
||||
"\nFailed to upgrade database.\n"
|
||||
@@ -324,121 +350,33 @@ def setup(config_options):
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
logger.info("Database prepared in %s.", config.database_config['name'])
|
||||
logger.info("Database prepared in %s.", db_name)
|
||||
|
||||
hs.setup()
|
||||
hs.start_listening()
|
||||
if config.manhole:
|
||||
f = twisted.manhole.telnet.ShellFactory()
|
||||
f.username = "matrix"
|
||||
f.password = "rabbithole"
|
||||
f.namespace['hs'] = hs
|
||||
reactor.listenTCP(config.manhole, f, interface='127.0.0.1')
|
||||
|
||||
def start():
|
||||
hs.get_pusherpool().start()
|
||||
hs.get_state_handler().start_caching()
|
||||
hs.get_datastore().start_profiling()
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
hs.get_replication_layer().start_get_pdu_cache()
|
||||
bind_port = config.bind_port
|
||||
if config.no_tls:
|
||||
bind_port = None
|
||||
|
||||
register_memory_metrics(hs)
|
||||
hs.start_listening(bind_port, config.unsecure_port)
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
hs.get_pusherpool().start()
|
||||
hs.get_state_handler().start_caching()
|
||||
hs.get_datastore().start_profiling()
|
||||
hs.get_replication_layer().start_get_pdu_cache()
|
||||
|
||||
return hs
|
||||
|
||||
|
||||
class SynapseService(service.Service):
|
||||
"""A twisted Service class that will start synapse. Used to run synapse
|
||||
via twistd and a .tac.
|
||||
"""
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
def startService(self):
|
||||
hs = setup(self.config)
|
||||
change_resource_limit(hs.config.soft_file_limit)
|
||||
if hs.config.gc_thresholds:
|
||||
gc.set_threshold(*hs.config.gc_thresholds)
|
||||
|
||||
def stopService(self):
|
||||
return self._port.stopListening()
|
||||
|
||||
|
||||
def run(hs):
|
||||
PROFILE_SYNAPSE = False
|
||||
if PROFILE_SYNAPSE:
|
||||
def profile(func):
|
||||
from cProfile import Profile
|
||||
from threading import current_thread
|
||||
|
||||
def profiled(*args, **kargs):
|
||||
profile = Profile()
|
||||
profile.enable()
|
||||
func(*args, **kargs)
|
||||
profile.disable()
|
||||
ident = current_thread().ident
|
||||
profile.dump_stats("/tmp/%s.%s.%i.pstat" % (
|
||||
hs.hostname, func.__name__, ident
|
||||
))
|
||||
|
||||
return profiled
|
||||
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
ThreadPool._worker = profile(ThreadPool._worker)
|
||||
reactor.run = profile(reactor.run)
|
||||
|
||||
start_time = hs.get_clock().time()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home():
|
||||
logger.info("Gathering stats for reporting")
|
||||
now = int(hs.get_clock().time())
|
||||
uptime = int(now - start_time)
|
||||
if uptime < 0:
|
||||
uptime = 0
|
||||
|
||||
stats = {}
|
||||
stats["homeserver"] = hs.config.server_name
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||
|
||||
room_count = yield hs.get_datastore().get_room_count()
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||
daily_messages = yield hs.get_datastore().count_daily_messages()
|
||||
if daily_messages is not None:
|
||||
stats["daily_messages"] = daily_messages
|
||||
|
||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||
try:
|
||||
yield hs.get_simple_http_client().put_json(
|
||||
"https://matrix.org/report-usage-stats/push",
|
||||
stats
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn("Error reporting stats: %s", e)
|
||||
|
||||
if hs.config.report_stats:
|
||||
phone_home_task = task.LoopingCall(phone_stats_home)
|
||||
logger.info("Scheduling stats reporting for 24 hour intervals")
|
||||
phone_home_task.start(60 * 60 * 24, now=False)
|
||||
|
||||
def in_thread():
|
||||
# Uncomment to enable tracing of log context changes.
|
||||
# sys.settrace(logcontext_tracer)
|
||||
with LoggingContext("run"):
|
||||
change_resource_limit(hs.config.soft_file_limit)
|
||||
if hs.config.gc_thresholds:
|
||||
gc.set_threshold(*hs.config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
if hs.config.daemonize:
|
||||
|
||||
if hs.config.print_pidfile:
|
||||
print (hs.config.pid_file)
|
||||
if config.daemonize:
|
||||
print config.pid_file
|
||||
|
||||
daemon = Daemonize(
|
||||
app="synapse-homeserver",
|
||||
pid=hs.config.pid_file,
|
||||
action=lambda: in_thread(),
|
||||
pid=config.pid_file,
|
||||
action=lambda: run(config),
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
@@ -446,15 +384,20 @@ def run(hs):
|
||||
|
||||
daemon.start()
|
||||
else:
|
||||
in_thread()
|
||||
run(config)
|
||||
|
||||
|
||||
def run(config):
|
||||
with LoggingContext("run"):
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
|
||||
reactor.run()
|
||||
|
||||
|
||||
def main():
|
||||
with LoggingContext("main"):
|
||||
# check base requirements
|
||||
check_requirements()
|
||||
hs = setup(sys.argv[1:])
|
||||
run(hs)
|
||||
setup()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -1,314 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage import DataStore
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.pusher")
|
||||
|
||||
|
||||
class PusherSlaveStore(
|
||||
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore,
|
||||
SlavedAccountDataStore
|
||||
):
|
||||
update_pusher_last_stream_ordering_and_success = (
|
||||
DataStore.update_pusher_last_stream_ordering_and_success.__func__
|
||||
)
|
||||
|
||||
update_pusher_failing_since = (
|
||||
DataStore.update_pusher_failing_since.__func__
|
||||
)
|
||||
|
||||
update_pusher_last_stream_ordering = (
|
||||
DataStore.update_pusher_last_stream_ordering.__func__
|
||||
)
|
||||
|
||||
get_throttle_params_by_room = (
|
||||
DataStore.get_throttle_params_by_room.__func__
|
||||
)
|
||||
|
||||
set_throttle_params = (
|
||||
DataStore.set_throttle_params.__func__
|
||||
)
|
||||
|
||||
get_time_of_last_push_action_before = (
|
||||
DataStore.get_time_of_last_push_action_before.__func__
|
||||
)
|
||||
|
||||
get_profile_displayname = (
|
||||
DataStore.get_profile_displayname.__func__
|
||||
)
|
||||
|
||||
# XXX: This is a bit broken because we don't persist forgotten rooms
|
||||
# in a way that they can be streamed. This means that we don't have a
|
||||
# way to invalidate the forgotten rooms cache correctly.
|
||||
# For now we expire the cache every 10 minutes.
|
||||
BROKEN_CACHE_EXPIRY_MS = 60 * 60 * 1000
|
||||
who_forgot_in_room = (
|
||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||
)
|
||||
|
||||
|
||||
class PusherServer(HomeServer):
|
||||
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
http_client = self.get_simple_http_client()
|
||||
replication_url = self.config.worker_replication_url
|
||||
url = replication_url + "/remove_pushers"
|
||||
return http_client.post_json_get_json(url, {
|
||||
"remove": [{
|
||||
"app_id": app_id,
|
||||
"push_key": push_key,
|
||||
"user_id": user_id,
|
||||
}]
|
||||
})
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=bind_address
|
||||
)
|
||||
logger.info("Synapse pusher now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
pusher_pool = self.get_pusherpool()
|
||||
clock = self.get_clock()
|
||||
|
||||
def stop_pusher(user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
pushers_for_user = pusher_pool.pushers.get(user_id, {})
|
||||
pusher = pushers_for_user.pop(key, None)
|
||||
if pusher is None:
|
||||
return
|
||||
logger.info("Stopping pusher %r / %r", user_id, key)
|
||||
pusher.on_stop()
|
||||
|
||||
def start_pusher(user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
logger.info("Starting pusher %r / %r", user_id, key)
|
||||
return pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def poke_pushers(results):
|
||||
pushers_rows = set(
|
||||
map(tuple, results.get("pushers", {}).get("rows", []))
|
||||
)
|
||||
deleted_pushers_rows = set(
|
||||
map(tuple, results.get("deleted_pushers", {}).get("rows", []))
|
||||
)
|
||||
for row in sorted(pushers_rows | deleted_pushers_rows):
|
||||
if row in deleted_pushers_rows:
|
||||
user_id, app_id, pushkey = row[1:4]
|
||||
stop_pusher(user_id, app_id, pushkey)
|
||||
elif row in pushers_rows:
|
||||
user_id = row[1]
|
||||
app_id = row[5]
|
||||
pushkey = row[8]
|
||||
yield start_pusher(user_id, app_id, pushkey)
|
||||
|
||||
stream = results.get("events")
|
||||
if stream:
|
||||
min_stream_id = stream["rows"][0][0]
|
||||
max_stream_id = stream["position"]
|
||||
preserve_fn(pusher_pool.on_new_notifications)(
|
||||
min_stream_id, max_stream_id
|
||||
)
|
||||
|
||||
stream = results.get("receipts")
|
||||
if stream:
|
||||
rows = stream["rows"]
|
||||
affected_room_ids = set(row[1] for row in rows)
|
||||
min_stream_id = rows[0][0]
|
||||
max_stream_id = stream["position"]
|
||||
preserve_fn(pusher_pool.on_new_receipts)(
|
||||
min_stream_id, max_stream_id, affected_room_ids
|
||||
)
|
||||
|
||||
def expire_broken_caches():
|
||||
store.who_forgot_in_room.invalidate_all()
|
||||
|
||||
next_expire_broken_caches_ms = 0
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
now_ms = clock.time_msec()
|
||||
if now_ms > next_expire_broken_caches_ms:
|
||||
expire_broken_caches()
|
||||
next_expire_broken_caches_ms = (
|
||||
now_ms + store.BROKEN_CACHE_EXPIRY_MS
|
||||
)
|
||||
yield store.process_replication(result)
|
||||
poke_pushers(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(30)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse pusher", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.pusher"
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
if config.start_pushers:
|
||||
sys.stderr.write(
|
||||
"\nThe pushers must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``start_pushers: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.start_pushers = True
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ps = PusherServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string=get_version_string("Synapse", synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.replicate()
|
||||
ps.get_pusherpool().start()
|
||||
ps.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-pusher",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
ps = start(sys.argv[1:])
|
||||
@@ -1,465 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.api.constants import EventTypes, PresenceState
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.handlers.presence import PresenceHandler
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.presence import PresenceStore, UserPresenceState
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import contextlib
|
||||
import gc
|
||||
import ujson as json
|
||||
|
||||
logger = logging.getLogger("synapse.app.synchrotron")
|
||||
|
||||
|
||||
class SynchrotronSlavedStore(
|
||||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedFilteringStore,
|
||||
SlavedPresenceStore,
|
||||
BaseSlavedStore,
|
||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
||||
):
|
||||
# XXX: This is a bit broken because we don't persist forgotten rooms
|
||||
# in a way that they can be streamed. This means that we don't have a
|
||||
# way to invalidate the forgotten rooms cache correctly.
|
||||
# For now we expire the cache every 10 minutes.
|
||||
BROKEN_CACHE_EXPIRY_MS = 60 * 60 * 1000
|
||||
who_forgot_in_room = (
|
||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||
)
|
||||
|
||||
# XXX: This is a bit broken because we don't persist the accepted list in a
|
||||
# way that can be replicated. This means that we don't have a way to
|
||||
# invalidate the cache correctly.
|
||||
get_presence_list_accepted = PresenceStore.__dict__[
|
||||
"get_presence_list_accepted"
|
||||
]
|
||||
|
||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||
|
||||
|
||||
class SynchrotronPresence(object):
|
||||
def __init__(self, hs):
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.store = hs.get_datastore()
|
||||
self.user_to_num_current_syncs = {}
|
||||
self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users"
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
active_presence = self.store.take_presence_startup_info()
|
||||
self.user_to_current_state = {
|
||||
state.user_id: state
|
||||
for state in active_presence
|
||||
}
|
||||
|
||||
self.process_id = random_string(16)
|
||||
logger.info("Presence process_id is %r", self.process_id)
|
||||
|
||||
self._sending_sync = False
|
||||
self._need_to_send_sync = False
|
||||
self.clock.looping_call(
|
||||
self._send_syncing_users_regularly,
|
||||
UPDATE_SYNCING_USERS_MS,
|
||||
)
|
||||
|
||||
reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown)
|
||||
|
||||
def set_state(self, user, state):
|
||||
# TODO Hows this supposed to work?
|
||||
pass
|
||||
|
||||
get_states = PresenceHandler.get_states.__func__
|
||||
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def user_syncing(self, user_id, affect_presence):
|
||||
if affect_presence:
|
||||
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||
prev_states = yield self.current_state_for_users([user_id])
|
||||
if prev_states[user_id].state == PresenceState.OFFLINE:
|
||||
# TODO: Don't block the sync request on this HTTP hit.
|
||||
yield self._send_syncing_users_now()
|
||||
|
||||
def _end():
|
||||
# We check that the user_id is in user_to_num_current_syncs because
|
||||
# user_to_num_current_syncs may have been cleared if we are
|
||||
# shutting down.
|
||||
if affect_presence and user_id in self.user_to_num_current_syncs:
|
||||
self.user_to_num_current_syncs[user_id] -= 1
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _user_syncing():
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_end()
|
||||
|
||||
defer.returnValue(_user_syncing())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_shutdown(self):
|
||||
# When the synchrotron is shutdown tell the master to clear the in
|
||||
# progress syncs for this process
|
||||
self.user_to_num_current_syncs.clear()
|
||||
yield self._send_syncing_users_now()
|
||||
|
||||
def _send_syncing_users_regularly(self):
|
||||
# Only send an update if we aren't in the middle of sending one.
|
||||
if not self._sending_sync:
|
||||
preserve_fn(self._send_syncing_users_now)()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _send_syncing_users_now(self):
|
||||
if self._sending_sync:
|
||||
# We don't want to race with sending another update.
|
||||
# Instead we wait for that update to finish and send another
|
||||
# update afterwards.
|
||||
self._need_to_send_sync = True
|
||||
return
|
||||
|
||||
# Flag that we are sending an update.
|
||||
self._sending_sync = True
|
||||
|
||||
yield self.http_client.post_json_get_json(self.syncing_users_url, {
|
||||
"process_id": self.process_id,
|
||||
"syncing_users": [
|
||||
user_id for user_id, count in self.user_to_num_current_syncs.items()
|
||||
if count > 0
|
||||
],
|
||||
})
|
||||
|
||||
# Unset the flag as we are no longer sending an update.
|
||||
self._sending_sync = False
|
||||
if self._need_to_send_sync:
|
||||
# If something happened while we were sending the update then
|
||||
# we might need to send another update.
|
||||
# TODO: Check if the update that was sent matches the current state
|
||||
# as we only need to send an update if they are different.
|
||||
self._need_to_send_sync = False
|
||||
yield self._send_syncing_users_now()
|
||||
|
||||
def process_replication(self, result):
|
||||
stream = result.get("presence", {"rows": []})
|
||||
for row in stream["rows"]:
|
||||
(
|
||||
position, user_id, state, last_active_ts,
|
||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
||||
currently_active
|
||||
) = row
|
||||
self.user_to_current_state[user_id] = UserPresenceState(
|
||||
user_id, state, last_active_ts,
|
||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
||||
currently_active
|
||||
)
|
||||
|
||||
|
||||
class SynchrotronTyping(object):
|
||||
def __init__(self, hs):
|
||||
self._latest_room_serial = 0
|
||||
self._room_serials = {}
|
||||
self._room_typing = {}
|
||||
|
||||
def stream_positions(self):
|
||||
return {"typing": self._latest_room_serial}
|
||||
|
||||
def process_replication(self, result):
|
||||
stream = result.get("typing")
|
||||
if stream:
|
||||
self._latest_room_serial = int(stream["position"])
|
||||
|
||||
for row in stream["rows"]:
|
||||
position, room_id, typing_json = row
|
||||
typing = json.loads(typing_json)
|
||||
self._room_serials[room_id] = position
|
||||
self._room_typing[room_id] = typing
|
||||
|
||||
|
||||
class SynchrotronApplicationService(object):
|
||||
def notify_interested_services(self, event):
|
||||
pass
|
||||
|
||||
|
||||
class SynchrotronServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
sync.register_servlets(self, resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=bind_address
|
||||
)
|
||||
logger.info("Synapse synchrotron now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
clock = self.get_clock()
|
||||
notifier = self.get_notifier()
|
||||
presence_handler = self.get_presence_handler()
|
||||
typing_handler = self.get_typing_handler()
|
||||
|
||||
def expire_broken_caches():
|
||||
store.who_forgot_in_room.invalidate_all()
|
||||
store.get_presence_list_accepted.invalidate_all()
|
||||
|
||||
def notify_from_stream(
|
||||
result, stream_name, stream_key, room=None, user=None
|
||||
):
|
||||
stream = result.get(stream_name)
|
||||
if stream:
|
||||
position_index = stream["field_names"].index("position")
|
||||
if room:
|
||||
room_index = stream["field_names"].index(room)
|
||||
if user:
|
||||
user_index = stream["field_names"].index(user)
|
||||
|
||||
users = ()
|
||||
rooms = ()
|
||||
for row in stream["rows"]:
|
||||
position = row[position_index]
|
||||
|
||||
if user:
|
||||
users = (row[user_index],)
|
||||
|
||||
if room:
|
||||
rooms = (row[room_index],)
|
||||
|
||||
notifier.on_new_event(
|
||||
stream_key, position, users=users, rooms=rooms
|
||||
)
|
||||
|
||||
def notify(result):
|
||||
stream = result.get("events")
|
||||
if stream:
|
||||
max_position = stream["position"]
|
||||
for row in stream["rows"]:
|
||||
position = row[0]
|
||||
internal = json.loads(row[1])
|
||||
event_json = json.loads(row[2])
|
||||
event = FrozenEvent(event_json, internal_metadata_dict=internal)
|
||||
extra_users = ()
|
||||
if event.type == EventTypes.Member:
|
||||
extra_users = (event.state_key,)
|
||||
notifier.on_new_room_event(
|
||||
event, position, max_position, extra_users
|
||||
)
|
||||
|
||||
notify_from_stream(
|
||||
result, "push_rules", "push_rules_key", user="user_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "user_account_data", "account_data_key", user="user_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "room_account_data", "account_data_key", user="user_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "tag_account_data", "account_data_key", user="user_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "receipts", "receipt_key", room="room_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "typing", "typing_key", room="room_id"
|
||||
)
|
||||
|
||||
next_expire_broken_caches_ms = 0
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args.update(typing_handler.stream_positions())
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
now_ms = clock.time_msec()
|
||||
if now_ms > next_expire_broken_caches_ms:
|
||||
expire_broken_caches()
|
||||
next_expire_broken_caches_ms = (
|
||||
now_ms + store.BROKEN_CACHE_EXPIRY_MS
|
||||
)
|
||||
yield store.process_replication(result)
|
||||
typing_handler.process_replication(result)
|
||||
presence_handler.process_replication(result)
|
||||
notify(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(5)
|
||||
|
||||
def build_presence_handler(self):
|
||||
return SynchrotronPresence(self)
|
||||
|
||||
def build_typing_handler(self):
|
||||
return SynchrotronTyping(self)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse synchrotron", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.synchrotron"
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ss = SynchrotronServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string=get_version_string("Synapse", synapse),
|
||||
database_engine=database_engine,
|
||||
application_service_handler=SynchrotronApplicationService(),
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.replicate()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-synchrotron",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,198 +14,56 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import glob
|
||||
import os
|
||||
import os.path
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import yaml
|
||||
import os
|
||||
import subprocess
|
||||
import signal
|
||||
|
||||
SYNAPSE = ["python", "-B", "-m", "synapse.app.homeserver"]
|
||||
|
||||
CONFIGFILE = "homeserver.yaml"
|
||||
PIDFILE = "homeserver.pid"
|
||||
|
||||
GREEN = "\x1b[1;32m"
|
||||
RED = "\x1b[1;31m"
|
||||
NORMAL = "\x1b[m"
|
||||
|
||||
|
||||
def write(message, colour=NORMAL, stream=sys.stdout):
|
||||
if colour == NORMAL:
|
||||
stream.write(message + "\n")
|
||||
else:
|
||||
stream.write(colour + message + NORMAL + "\n")
|
||||
|
||||
|
||||
def start(configfile):
|
||||
write("Starting ...")
|
||||
args = SYNAPSE
|
||||
args.extend(["--daemonize", "-c", configfile])
|
||||
|
||||
try:
|
||||
subprocess.check_call(args)
|
||||
write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN)
|
||||
except subprocess.CalledProcessError as e:
|
||||
write(
|
||||
"error starting (exit code: %d); see above for logs" % e.returncode,
|
||||
colour=RED,
|
||||
)
|
||||
|
||||
|
||||
def start_worker(app, configfile, worker_configfile):
|
||||
args = [
|
||||
"python", "-B",
|
||||
"-m", app,
|
||||
"-c", configfile,
|
||||
"-c", worker_configfile
|
||||
]
|
||||
|
||||
try:
|
||||
subprocess.check_call(args)
|
||||
write("started %s(%r)" % (app, worker_configfile), colour=GREEN)
|
||||
except subprocess.CalledProcessError as e:
|
||||
write(
|
||||
"error starting %s(%r) (exit code: %d); see above for logs" % (
|
||||
app, worker_configfile, e.returncode,
|
||||
),
|
||||
colour=RED,
|
||||
)
|
||||
|
||||
|
||||
def stop(pidfile, app):
|
||||
if os.path.exists(pidfile):
|
||||
pid = int(open(pidfile).read())
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
write("stopped %s" % (app,), colour=GREEN)
|
||||
|
||||
|
||||
Worker = collections.namedtuple("Worker", [
|
||||
"app", "configfile", "pidfile", "cache_factor"
|
||||
])
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"action",
|
||||
choices=["start", "stop", "restart"],
|
||||
help="whether to start, stop or restart the synapse",
|
||||
)
|
||||
parser.add_argument(
|
||||
"configfile",
|
||||
nargs="?",
|
||||
default="homeserver.yaml",
|
||||
help="the homeserver config file, defaults to homserver.yaml",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-w", "--worker",
|
||||
metavar="WORKERCONFIG",
|
||||
help="start or stop a single worker",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-a", "--all-processes",
|
||||
metavar="WORKERCONFIGDIR",
|
||||
help="start or stop all the workers in the given directory"
|
||||
" and the main synapse process",
|
||||
)
|
||||
|
||||
options = parser.parse_args()
|
||||
|
||||
if options.worker and options.all_processes:
|
||||
write(
|
||||
'Cannot use "--worker" with "--all-processes"',
|
||||
stream=sys.stderr
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
configfile = options.configfile
|
||||
|
||||
if not os.path.exists(configfile):
|
||||
write(
|
||||
def start():
|
||||
if not os.path.exists(CONFIGFILE):
|
||||
sys.stderr.write(
|
||||
"No config file found\n"
|
||||
"To generate a config file, run '%s -c %s --generate-config"
|
||||
" --server-name=<server name>'\n" % (
|
||||
" ".join(SYNAPSE), options.configfile
|
||||
),
|
||||
stream=sys.stderr,
|
||||
" ".join(SYNAPSE), CONFIGFILE
|
||||
)
|
||||
)
|
||||
sys.exit(1)
|
||||
print "Starting ...",
|
||||
args = SYNAPSE
|
||||
args.extend(["--daemonize", "-c", CONFIGFILE, "--pid-file", PIDFILE])
|
||||
subprocess.check_call(args)
|
||||
print GREEN + "started" + NORMAL
|
||||
|
||||
with open(configfile) as stream:
|
||||
config = yaml.load(stream)
|
||||
|
||||
pidfile = config["pid_file"]
|
||||
cache_factor = config.get("synctl_cache_factor")
|
||||
start_stop_synapse = True
|
||||
def stop():
|
||||
if os.path.exists(PIDFILE):
|
||||
pid = int(open(PIDFILE).read())
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
print GREEN + "stopped" + NORMAL
|
||||
|
||||
if cache_factor:
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
|
||||
|
||||
worker_configfiles = []
|
||||
if options.worker:
|
||||
start_stop_synapse = False
|
||||
worker_configfile = options.worker
|
||||
if not os.path.exists(worker_configfile):
|
||||
write(
|
||||
"No worker config found at %r" % (worker_configfile,),
|
||||
stream=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
worker_configfiles.append(worker_configfile)
|
||||
|
||||
if options.all_processes:
|
||||
worker_configdir = options.all_processes
|
||||
if not os.path.isdir(worker_configdir):
|
||||
write(
|
||||
"No worker config directory found at %r" % (worker_configdir,),
|
||||
stream=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
worker_configfiles.extend(sorted(glob.glob(
|
||||
os.path.join(worker_configdir, "*.yaml")
|
||||
)))
|
||||
|
||||
workers = []
|
||||
for worker_configfile in worker_configfiles:
|
||||
with open(worker_configfile) as stream:
|
||||
worker_config = yaml.load(stream)
|
||||
worker_app = worker_config["worker_app"]
|
||||
worker_pidfile = worker_config["worker_pid_file"]
|
||||
worker_daemonize = worker_config["worker_daemonize"]
|
||||
assert worker_daemonize # TODO print something more user friendly
|
||||
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
||||
workers.append(Worker(
|
||||
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
|
||||
))
|
||||
|
||||
action = options.action
|
||||
|
||||
if action == "stop" or action == "restart":
|
||||
for worker in workers:
|
||||
stop(worker.pidfile, worker.app)
|
||||
|
||||
if start_stop_synapse:
|
||||
stop(pidfile, "synapse.app.homeserver")
|
||||
|
||||
# TODO: Wait for synapse to actually shutdown before starting it again
|
||||
|
||||
if action == "start" or action == "restart":
|
||||
if start_stop_synapse:
|
||||
start(configfile)
|
||||
|
||||
for worker in workers:
|
||||
if worker.cache_factor:
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor)
|
||||
|
||||
start_worker(worker.app, configfile, worker.configfile)
|
||||
|
||||
if cache_factor:
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
|
||||
else:
|
||||
os.environ.pop("SYNAPSE_CACHE_FACTOR", None)
|
||||
def main():
|
||||
action = sys.argv[1] if sys.argv[1:] else "usage"
|
||||
if action == "start":
|
||||
start()
|
||||
elif action == "stop":
|
||||
stop()
|
||||
elif action == "restart":
|
||||
stop()
|
||||
start()
|
||||
else:
|
||||
sys.stderr.write("Usage: %s [start|stop|restart]\n" % (sys.argv[0],))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -20,50 +20,6 @@ import re
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ApplicationServiceState(object):
|
||||
DOWN = "down"
|
||||
UP = "up"
|
||||
|
||||
|
||||
class AppServiceTransaction(object):
|
||||
"""Represents an application service transaction."""
|
||||
|
||||
def __init__(self, service, id, events):
|
||||
self.service = service
|
||||
self.id = id
|
||||
self.events = events
|
||||
|
||||
def send(self, as_api):
|
||||
"""Sends this transaction using the provided AS API interface.
|
||||
|
||||
Args:
|
||||
as_api(ApplicationServiceApi): The API to use to send.
|
||||
Returns:
|
||||
A Deferred which resolves to True if the transaction was sent.
|
||||
"""
|
||||
return as_api.push_bulk(
|
||||
service=self.service,
|
||||
events=self.events,
|
||||
txn_id=self.id
|
||||
)
|
||||
|
||||
def complete(self, store):
|
||||
"""Completes this transaction as successful.
|
||||
|
||||
Marks this transaction ID on the application service and removes the
|
||||
transaction contents from the database.
|
||||
|
||||
Args:
|
||||
store: The database store to operate on.
|
||||
Returns:
|
||||
A Deferred which resolves to True if the transaction was completed.
|
||||
"""
|
||||
return store.complete_appservice_txn(
|
||||
service=self.service,
|
||||
txn_id=self.id
|
||||
)
|
||||
|
||||
|
||||
class ApplicationService(object):
|
||||
"""Defines an application service. This definition is mostly what is
|
||||
provided to the /register AS API.
|
||||
@@ -79,13 +35,13 @@ class ApplicationService(object):
|
||||
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
||||
|
||||
def __init__(self, token, url=None, namespaces=None, hs_token=None,
|
||||
sender=None, id=None):
|
||||
sender=None, txn_id=None):
|
||||
self.token = token
|
||||
self.url = url
|
||||
self.hs_token = hs_token
|
||||
self.sender = sender
|
||||
self.namespaces = self._check_namespaces(namespaces)
|
||||
self.id = id
|
||||
self.txn_id = txn_id
|
||||
|
||||
def _check_namespaces(self, namespaces):
|
||||
# Sanity check that it is of the form:
|
||||
@@ -95,7 +51,7 @@ class ApplicationService(object):
|
||||
# rooms: [ {regex: "[A-z]+.*", exclusive: true}, ...],
|
||||
# }
|
||||
if not namespaces:
|
||||
namespaces = {}
|
||||
return None
|
||||
|
||||
for ns in ApplicationService.NS_LIST:
|
||||
if ns not in namespaces:
|
||||
@@ -148,8 +104,8 @@ class ApplicationService(object):
|
||||
and self.is_interested_in_user(event.state_key)):
|
||||
return True
|
||||
# check joined member events
|
||||
for user_id in member_list:
|
||||
if self.is_interested_in_user(user_id):
|
||||
for member in member_list:
|
||||
if self.is_interested_in_user(member.state_key):
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -173,7 +129,7 @@ class ApplicationService(object):
|
||||
restrict_to(str): The namespace to restrict regex tests to.
|
||||
aliases_for_event(list): A list of all the known room aliases for
|
||||
this event.
|
||||
member_list(list): A list of all joined user_ids in this room.
|
||||
member_list(list): A list of all joined room members in this room.
|
||||
Returns:
|
||||
bool: True if this service would like to know about this event.
|
||||
"""
|
||||
@@ -199,10 +155,7 @@ class ApplicationService(object):
|
||||
return self._matches_user(event, member_list)
|
||||
|
||||
def is_interested_in_user(self, user_id):
|
||||
return (
|
||||
self._matches_regex(user_id, ApplicationService.NS_USERS)
|
||||
or user_id == self.sender
|
||||
)
|
||||
return self._matches_regex(user_id, ApplicationService.NS_USERS)
|
||||
|
||||
def is_interested_in_alias(self, alias):
|
||||
return self._matches_regex(alias, ApplicationService.NS_ALIASES)
|
||||
@@ -211,10 +164,7 @@ class ApplicationService(object):
|
||||
return self._matches_regex(room_id, ApplicationService.NS_ROOMS)
|
||||
|
||||
def is_exclusive_user(self, user_id):
|
||||
return (
|
||||
self._is_exclusive(ApplicationService.NS_USERS, user_id)
|
||||
or user_id == self.sender
|
||||
)
|
||||
return self._is_exclusive(ApplicationService.NS_USERS, user_id)
|
||||
|
||||
def is_exclusive_alias(self, alias):
|
||||
return self._is_exclusive(ApplicationService.NS_ALIASES, alias)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -29,7 +29,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
pushing.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs):
|
||||
super(ApplicationServiceApi, self).__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
@@ -72,19 +72,14 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push_bulk(self, service, events, txn_id=None):
|
||||
def push_bulk(self, service, events):
|
||||
events = self._serialize(events)
|
||||
|
||||
if txn_id is None:
|
||||
logger.warning("push_bulk: Missing txn ID sending events to %s",
|
||||
service.url)
|
||||
txn_id = str(0)
|
||||
txn_id = str(txn_id)
|
||||
|
||||
uri = service.url + ("/transactions/%s" %
|
||||
urllib.quote(txn_id))
|
||||
urllib.quote(str(0))) # TODO txn_ids
|
||||
response = None
|
||||
try:
|
||||
yield self.put_json(
|
||||
response = yield self.put_json(
|
||||
uri=uri,
|
||||
json_body={
|
||||
"events": events
|
||||
@@ -92,14 +87,20 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
args={
|
||||
"access_token": service.hs_token
|
||||
})
|
||||
defer.returnValue(True)
|
||||
return
|
||||
if response: # just an empty json object
|
||||
# TODO: Mark txn as sent successfully
|
||||
defer.returnValue(True)
|
||||
except CodeMessageException as e:
|
||||
logger.warning("push_bulk to %s received %s", uri, e.code)
|
||||
except Exception as ex:
|
||||
logger.warning("push_bulk to %s threw exception %s", uri, ex)
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push(self, service, event):
|
||||
response = yield self.push_bulk(service, [event])
|
||||
defer.returnValue(response)
|
||||
|
||||
def _serialize(self, events):
|
||||
time_now = self.clock.time_msec()
|
||||
return [
|
||||
|
||||
@@ -1,254 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
This module controls the reliability for application service transactions.
|
||||
|
||||
The nominal flow through this module looks like:
|
||||
__________
|
||||
1---ASa[e]-->| Service |--> Queue ASa[f]
|
||||
2----ASb[e]->| Queuer |
|
||||
3--ASa[f]--->|__________|-----------+ ASa[e], ASb[e]
|
||||
V
|
||||
-````````- +------------+
|
||||
|````````|<--StoreTxn-|Transaction |
|
||||
|Database| | Controller |---> SEND TO AS
|
||||
`--------` +------------+
|
||||
What happens on SEND TO AS depends on the state of the Application Service:
|
||||
- If the AS is marked as DOWN, do nothing.
|
||||
- If the AS is marked as UP, send the transaction.
|
||||
* SUCCESS : Increment where the AS is up to txn-wise and nuke the txn
|
||||
contents from the db.
|
||||
* FAILURE : Marked AS as DOWN and start Recoverer.
|
||||
|
||||
Recoverer attempts to recover ASes who have died. The flow for this looks like:
|
||||
,--------------------- backoff++ --------------.
|
||||
V |
|
||||
START ---> Wait exp ------> Get oldest txn ID from ----> FAILURE
|
||||
backoff DB and try to send it
|
||||
^ |___________
|
||||
Mark AS as | V
|
||||
UP & quit +---------- YES SUCCESS
|
||||
| | |
|
||||
NO <--- Have more txns? <------ Mark txn success & nuke <-+
|
||||
from db; incr AS pos.
|
||||
Reset backoff.
|
||||
|
||||
This is all tied together by the AppServiceScheduler which DIs the required
|
||||
components.
|
||||
"""
|
||||
|
||||
from synapse.appservice import ApplicationServiceState
|
||||
from twisted.internet import defer
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ApplicationServiceScheduler(object):
|
||||
""" Public facing API for this module. Does the required DI to tie the
|
||||
components together. This also serves as the "event_pool", which in this
|
||||
case is a simple array.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.as_api = hs.get_application_service_api()
|
||||
|
||||
def create_recoverer(service, callback):
|
||||
return _Recoverer(self.clock, self.store, self.as_api, service, callback)
|
||||
|
||||
self.txn_ctrl = _TransactionController(
|
||||
self.clock, self.store, self.as_api, create_recoverer
|
||||
)
|
||||
self.queuer = _ServiceQueuer(self.txn_ctrl)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start(self):
|
||||
logger.info("Starting appservice scheduler")
|
||||
# check for any DOWN ASes and start recoverers for them.
|
||||
recoverers = yield _Recoverer.start(
|
||||
self.clock, self.store, self.as_api, self.txn_ctrl.on_recovered
|
||||
)
|
||||
self.txn_ctrl.add_recoverers(recoverers)
|
||||
|
||||
def submit_event_for_as(self, service, event):
|
||||
self.queuer.enqueue(service, event)
|
||||
|
||||
|
||||
class _ServiceQueuer(object):
|
||||
"""Queues events for the same application service together, sending
|
||||
transactions as soon as possible. Once a transaction is sent successfully,
|
||||
this schedules any other events in the queue to run.
|
||||
"""
|
||||
|
||||
def __init__(self, txn_ctrl):
|
||||
self.queued_events = {} # dict of {service_id: [events]}
|
||||
self.pending_requests = {} # dict of {service_id: Deferred}
|
||||
self.txn_ctrl = txn_ctrl
|
||||
|
||||
def enqueue(self, service, event):
|
||||
# if this service isn't being sent something
|
||||
if not self.pending_requests.get(service.id):
|
||||
self._send_request(service, [event])
|
||||
else:
|
||||
# add to queue for this service
|
||||
if service.id not in self.queued_events:
|
||||
self.queued_events[service.id] = []
|
||||
self.queued_events[service.id].append(event)
|
||||
|
||||
def _send_request(self, service, events):
|
||||
# send request and add callbacks
|
||||
d = self.txn_ctrl.send(service, events)
|
||||
d.addBoth(self._on_request_finish)
|
||||
d.addErrback(self._on_request_fail)
|
||||
self.pending_requests[service.id] = d
|
||||
|
||||
def _on_request_finish(self, service):
|
||||
self.pending_requests[service.id] = None
|
||||
# if there are queued events, then send them.
|
||||
if (service.id in self.queued_events
|
||||
and len(self.queued_events[service.id]) > 0):
|
||||
self._send_request(service, self.queued_events[service.id])
|
||||
self.queued_events[service.id] = []
|
||||
|
||||
def _on_request_fail(self, err):
|
||||
logger.error("AS request failed: %s", err)
|
||||
|
||||
|
||||
class _TransactionController(object):
|
||||
|
||||
def __init__(self, clock, store, as_api, recoverer_fn):
|
||||
self.clock = clock
|
||||
self.store = store
|
||||
self.as_api = as_api
|
||||
self.recoverer_fn = recoverer_fn
|
||||
# keep track of how many recoverers there are
|
||||
self.recoverers = []
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send(self, service, events):
|
||||
try:
|
||||
txn = yield self.store.create_appservice_txn(
|
||||
service=service,
|
||||
events=events
|
||||
)
|
||||
service_is_up = yield self._is_service_up(service)
|
||||
if service_is_up:
|
||||
sent = yield txn.send(self.as_api)
|
||||
if sent:
|
||||
txn.complete(self.store)
|
||||
else:
|
||||
self._start_recoverer(service)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
self._start_recoverer(service)
|
||||
# request has finished
|
||||
defer.returnValue(service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_recovered(self, recoverer):
|
||||
self.recoverers.remove(recoverer)
|
||||
logger.info("Successfully recovered application service AS ID %s",
|
||||
recoverer.service.id)
|
||||
logger.info("Remaining active recoverers: %s", len(self.recoverers))
|
||||
yield self.store.set_appservice_state(
|
||||
recoverer.service,
|
||||
ApplicationServiceState.UP
|
||||
)
|
||||
|
||||
def add_recoverers(self, recoverers):
|
||||
for r in recoverers:
|
||||
self.recoverers.append(r)
|
||||
if len(recoverers) > 0:
|
||||
logger.info("New active recoverers: %s", len(self.recoverers))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _start_recoverer(self, service):
|
||||
yield self.store.set_appservice_state(
|
||||
service,
|
||||
ApplicationServiceState.DOWN
|
||||
)
|
||||
logger.info(
|
||||
"Application service falling behind. Starting recoverer. AS ID %s",
|
||||
service.id
|
||||
)
|
||||
recoverer = self.recoverer_fn(service, self.on_recovered)
|
||||
self.add_recoverers([recoverer])
|
||||
recoverer.recover()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_service_up(self, service):
|
||||
state = yield self.store.get_appservice_state(service)
|
||||
defer.returnValue(state == ApplicationServiceState.UP or state is None)
|
||||
|
||||
|
||||
class _Recoverer(object):
|
||||
|
||||
@staticmethod
|
||||
@defer.inlineCallbacks
|
||||
def start(clock, store, as_api, callback):
|
||||
services = yield store.get_appservices_by_state(
|
||||
ApplicationServiceState.DOWN
|
||||
)
|
||||
recoverers = [
|
||||
_Recoverer(clock, store, as_api, s, callback) for s in services
|
||||
]
|
||||
for r in recoverers:
|
||||
logger.info("Starting recoverer for AS ID %s which was marked as "
|
||||
"DOWN", r.service.id)
|
||||
r.recover()
|
||||
defer.returnValue(recoverers)
|
||||
|
||||
def __init__(self, clock, store, as_api, service, callback):
|
||||
self.clock = clock
|
||||
self.store = store
|
||||
self.as_api = as_api
|
||||
self.service = service
|
||||
self.callback = callback
|
||||
self.backoff_counter = 1
|
||||
|
||||
def recover(self):
|
||||
self.clock.call_later((2 ** self.backoff_counter), self.retry)
|
||||
|
||||
def _backoff(self):
|
||||
# cap the backoff to be around 8.5min => (2^9) = 512 secs
|
||||
if self.backoff_counter < 9:
|
||||
self.backoff_counter += 1
|
||||
self.recover()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def retry(self):
|
||||
try:
|
||||
txn = yield self.store.get_oldest_unsent_txn(self.service)
|
||||
if txn:
|
||||
logger.info("Retrying transaction %s for AS ID %s",
|
||||
txn.id, txn.service.id)
|
||||
sent = yield txn.send(self.as_api)
|
||||
if sent:
|
||||
yield txn.complete(self.store)
|
||||
# reset the backoff counter and retry immediately
|
||||
self.backoff_counter = 1
|
||||
yield self.retry()
|
||||
else:
|
||||
self._backoff()
|
||||
else:
|
||||
self._set_service_recovered()
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
self._backoff()
|
||||
|
||||
def _set_service_recovered(self):
|
||||
self.callback(self)
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.config._base import ConfigError
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
from homeserver import HomeServerConfig
|
||||
|
||||
action = sys.argv[1]
|
||||
|
||||
if action == "read":
|
||||
key = sys.argv[2]
|
||||
try:
|
||||
config = HomeServerConfig.load_config("", sys.argv[3:])
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
print (getattr(config, key))
|
||||
sys.exit(0)
|
||||
else:
|
||||
sys.stderr.write("Unknown command %r\n" % (action,))
|
||||
sys.exit(1)
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,67 +14,28 @@
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import errno
|
||||
import sys
|
||||
import os
|
||||
import yaml
|
||||
from textwrap import dedent
|
||||
|
||||
|
||||
class ConfigError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# We split these messages out to allow packages to override with package
|
||||
# specific instructions.
|
||||
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS = """\
|
||||
Please opt in or out of reporting anonymized homeserver usage statistics, by
|
||||
setting the `report_stats` key in your config file to either True or False.
|
||||
"""
|
||||
|
||||
MISSING_REPORT_STATS_SPIEL = """\
|
||||
We would really appreciate it if you could help our project out by reporting
|
||||
anonymized usage statistics from your homeserver. Only very basic aggregate
|
||||
data (e.g. number of users) will be reported, but it helps us to track the
|
||||
growth of the Matrix community, and helps us to make Matrix a success, as well
|
||||
as to convince other networks that they should peer with us.
|
||||
|
||||
Thank you.
|
||||
"""
|
||||
|
||||
MISSING_SERVER_NAME = """\
|
||||
Missing mandatory `server_name` config option.
|
||||
"""
|
||||
|
||||
|
||||
class Config(object):
|
||||
def __init__(self, args):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def parse_size(value):
|
||||
if isinstance(value, int) or isinstance(value, long):
|
||||
return value
|
||||
def parse_size(string):
|
||||
sizes = {"K": 1024, "M": 1024 * 1024}
|
||||
size = 1
|
||||
suffix = value[-1]
|
||||
suffix = string[-1]
|
||||
if suffix in sizes:
|
||||
value = value[:-1]
|
||||
string = string[:-1]
|
||||
size = sizes[suffix]
|
||||
return int(value) * size
|
||||
|
||||
@staticmethod
|
||||
def parse_duration(value):
|
||||
if isinstance(value, int) or isinstance(value, long):
|
||||
return value
|
||||
second = 1000
|
||||
hour = 60 * 60 * second
|
||||
day = 24 * hour
|
||||
week = 7 * day
|
||||
year = 365 * day
|
||||
sizes = {"s": second, "h": hour, "d": day, "w": week, "y": year}
|
||||
size = 1
|
||||
suffix = value[-1]
|
||||
if suffix in sizes:
|
||||
value = value[:-1]
|
||||
size = sizes[suffix]
|
||||
return int(value) * size
|
||||
return int(string) * size
|
||||
|
||||
@staticmethod
|
||||
def abspath(file_path):
|
||||
@@ -102,11 +63,8 @@ class Config(object):
|
||||
@classmethod
|
||||
def ensure_directory(cls, dir_path):
|
||||
dir_path = cls.abspath(dir_path)
|
||||
try:
|
||||
if not os.path.exists(dir_path):
|
||||
os.makedirs(dir_path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
if not os.path.isdir(dir_path):
|
||||
raise ConfigError(
|
||||
"%s is not a directory" % (dir_path,)
|
||||
@@ -128,272 +86,83 @@ class Config(object):
|
||||
with open(file_path) as file_stream:
|
||||
return yaml.load(file_stream)
|
||||
|
||||
def invoke_all(self, name, *args, **kargs):
|
||||
results = []
|
||||
for cls in type(self).mro():
|
||||
if name in cls.__dict__:
|
||||
results.append(getattr(cls, name)(self, *args, **kargs))
|
||||
return results
|
||||
|
||||
def generate_config(
|
||||
self,
|
||||
config_dir_path,
|
||||
server_name,
|
||||
is_generating_file,
|
||||
report_stats=None,
|
||||
):
|
||||
default_config = "# vim:ft=yaml\n"
|
||||
|
||||
default_config += "\n\n".join(dedent(conf) for conf in self.invoke_all(
|
||||
"default_config",
|
||||
config_dir_path=config_dir_path,
|
||||
server_name=server_name,
|
||||
is_generating_file=is_generating_file,
|
||||
report_stats=report_stats,
|
||||
))
|
||||
|
||||
config = yaml.load(default_config)
|
||||
|
||||
return default_config, config
|
||||
@classmethod
|
||||
def add_arguments(cls, parser):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def load_config(cls, description, argv):
|
||||
config_parser = argparse.ArgumentParser(
|
||||
description=description,
|
||||
)
|
||||
config_parser.add_argument(
|
||||
"-c", "--config-path",
|
||||
action="append",
|
||||
metavar="CONFIG_FILE",
|
||||
help="Specify config file. Can be given multiple times and"
|
||||
" may specify directories containing *.yaml files."
|
||||
)
|
||||
|
||||
config_parser.add_argument(
|
||||
"--keys-directory",
|
||||
metavar="DIRECTORY",
|
||||
help="Where files such as certs and signing keys are stored when"
|
||||
" their location is given explicitly in the config."
|
||||
" Defaults to the directory containing the last config file",
|
||||
)
|
||||
|
||||
config_args = config_parser.parse_args(argv)
|
||||
|
||||
config_files = find_config_files(search_paths=config_args.config_path)
|
||||
|
||||
obj = cls()
|
||||
obj.read_config_files(
|
||||
config_files,
|
||||
keys_directory=config_args.keys_directory,
|
||||
generate_keys=False,
|
||||
)
|
||||
return obj
|
||||
def generate_config(cls, args, config_dir_path):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def load_or_generate_config(cls, description, argv):
|
||||
def load_config(cls, description, argv, generate_section=None):
|
||||
config_parser = argparse.ArgumentParser(add_help=False)
|
||||
config_parser.add_argument(
|
||||
"-c", "--config-path",
|
||||
action="append",
|
||||
metavar="CONFIG_FILE",
|
||||
help="Specify config file. Can be given multiple times and"
|
||||
" may specify directories containing *.yaml files."
|
||||
help="Specify config file"
|
||||
)
|
||||
config_parser.add_argument(
|
||||
"--generate-config",
|
||||
action="store_true",
|
||||
help="Generate a config file for the server name"
|
||||
)
|
||||
config_parser.add_argument(
|
||||
"--report-stats",
|
||||
action="store",
|
||||
help="Whether the generated config reports anonymized usage statistics",
|
||||
choices=["yes", "no"]
|
||||
)
|
||||
config_parser.add_argument(
|
||||
"--generate-keys",
|
||||
action="store_true",
|
||||
help="Generate any missing key files then exit"
|
||||
)
|
||||
config_parser.add_argument(
|
||||
"--keys-directory",
|
||||
metavar="DIRECTORY",
|
||||
help="Used with 'generate-*' options to specify where files such as"
|
||||
" certs and signing keys should be stored in, unless explicitly"
|
||||
" specified in the config."
|
||||
)
|
||||
config_parser.add_argument(
|
||||
"-H", "--server-name",
|
||||
help="The server name to generate a config file for"
|
||||
help="Generate config file"
|
||||
)
|
||||
config_args, remaining_args = config_parser.parse_known_args(argv)
|
||||
|
||||
config_files = find_config_files(search_paths=config_args.config_path)
|
||||
|
||||
generate_keys = config_args.generate_keys
|
||||
|
||||
obj = cls()
|
||||
|
||||
if config_args.generate_config:
|
||||
if config_args.report_stats is None:
|
||||
if not config_args.config_path:
|
||||
config_parser.error(
|
||||
"Please specify either --report-stats=yes or --report-stats=no\n\n" +
|
||||
MISSING_REPORT_STATS_SPIEL
|
||||
"Must specify where to generate the config file"
|
||||
)
|
||||
if not config_files:
|
||||
config_parser.error(
|
||||
"Must supply a config file.\nA config file can be automatically"
|
||||
" generated using \"--generate-config -H SERVER_NAME"
|
||||
" -c CONFIG-FILE\""
|
||||
)
|
||||
(config_path,) = config_files
|
||||
if not os.path.exists(config_path):
|
||||
if config_args.keys_directory:
|
||||
config_dir_path = config_args.keys_directory
|
||||
else:
|
||||
config_dir_path = os.path.dirname(config_path)
|
||||
config_dir_path = os.path.abspath(config_dir_path)
|
||||
|
||||
server_name = config_args.server_name
|
||||
if not server_name:
|
||||
raise ConfigError(
|
||||
"Must specify a server_name to a generate config for."
|
||||
" Pass -H server.name."
|
||||
)
|
||||
if not os.path.exists(config_dir_path):
|
||||
os.makedirs(config_dir_path)
|
||||
with open(config_path, "wb") as config_file:
|
||||
config_bytes, config = obj.generate_config(
|
||||
config_dir_path=config_dir_path,
|
||||
server_name=server_name,
|
||||
report_stats=(config_args.report_stats == "yes"),
|
||||
is_generating_file=True
|
||||
)
|
||||
obj.invoke_all("generate_files", config)
|
||||
config_file.write(config_bytes)
|
||||
print (
|
||||
"A config file has been generated in %r for server name"
|
||||
" %r with corresponding SSL keys and self-signed"
|
||||
" certificates. Please review this file and customise it"
|
||||
" to your needs."
|
||||
) % (config_path, server_name)
|
||||
print (
|
||||
"If this server name is incorrect, you will need to"
|
||||
" regenerate the SSL certificates"
|
||||
)
|
||||
return
|
||||
config_dir_path = os.path.dirname(config_args.config_path)
|
||||
if os.path.exists(config_args.config_path):
|
||||
defaults = cls.read_config_file(config_args.config_path)
|
||||
else:
|
||||
print (
|
||||
"Config file %r already exists. Generating any missing key"
|
||||
" files."
|
||||
) % (config_path,)
|
||||
generate_keys = True
|
||||
defaults = {}
|
||||
else:
|
||||
if config_args.config_path:
|
||||
defaults = cls.read_config_file(config_args.config_path)
|
||||
else:
|
||||
defaults = {}
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
parents=[config_parser],
|
||||
description=description,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
cls.add_arguments(parser)
|
||||
parser.set_defaults(**defaults)
|
||||
|
||||
obj.invoke_all("add_arguments", parser)
|
||||
args = parser.parse_args(remaining_args)
|
||||
|
||||
if not config_files:
|
||||
config_parser.error(
|
||||
"Must supply a config file.\nA config file can be automatically"
|
||||
" generated using \"--generate-config -H SERVER_NAME"
|
||||
" -c CONFIG-FILE\""
|
||||
if config_args.generate_config:
|
||||
config_dir_path = os.path.dirname(config_args.config_path)
|
||||
config_dir_path = os.path.abspath(config_dir_path)
|
||||
if not os.path.exists(config_dir_path):
|
||||
os.makedirs(config_dir_path)
|
||||
cls.generate_config(args, config_dir_path)
|
||||
config = {}
|
||||
for key, value in vars(args).items():
|
||||
if (key not in set(["config_path", "generate_config"])
|
||||
and value is not None):
|
||||
config[key] = value
|
||||
with open(config_args.config_path, "w") as config_file:
|
||||
# TODO(paul) it would be lovely if we wrote out vim- and emacs-
|
||||
# style mode markers into the file, to hint to people that
|
||||
# this is a YAML file.
|
||||
yaml.dump(config, config_file, default_flow_style=False)
|
||||
print (
|
||||
"A config file has been generated in %s for server name"
|
||||
" '%s' with corresponding SSL keys and self-signed"
|
||||
" certificates. Please review this file and customise it to"
|
||||
" your needs."
|
||||
) % (
|
||||
config_args.config_path, config['server_name']
|
||||
)
|
||||
|
||||
obj.read_config_files(
|
||||
config_files,
|
||||
keys_directory=config_args.keys_directory,
|
||||
generate_keys=generate_keys,
|
||||
)
|
||||
|
||||
if generate_keys:
|
||||
return None
|
||||
|
||||
obj.invoke_all("read_arguments", args)
|
||||
|
||||
return obj
|
||||
|
||||
def read_config_files(self, config_files, keys_directory=None,
|
||||
generate_keys=False):
|
||||
if not keys_directory:
|
||||
keys_directory = os.path.dirname(config_files[-1])
|
||||
|
||||
config_dir_path = os.path.abspath(keys_directory)
|
||||
|
||||
specified_config = {}
|
||||
for config_file in config_files:
|
||||
yaml_config = self.read_config_file(config_file)
|
||||
specified_config.update(yaml_config)
|
||||
|
||||
if "server_name" not in specified_config:
|
||||
raise ConfigError(MISSING_SERVER_NAME)
|
||||
|
||||
server_name = specified_config["server_name"]
|
||||
_, config = self.generate_config(
|
||||
config_dir_path=config_dir_path,
|
||||
server_name=server_name,
|
||||
is_generating_file=False,
|
||||
)
|
||||
config.pop("log_config")
|
||||
config.update(specified_config)
|
||||
|
||||
if "report_stats" not in config:
|
||||
raise ConfigError(
|
||||
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
|
||||
MISSING_REPORT_STATS_SPIEL
|
||||
print (
|
||||
"If this server name is incorrect, you will need to regenerate"
|
||||
" the SSL certificates"
|
||||
)
|
||||
sys.exit(0)
|
||||
|
||||
if generate_keys:
|
||||
self.invoke_all("generate_files", config)
|
||||
return
|
||||
|
||||
self.invoke_all("read_config", config)
|
||||
|
||||
|
||||
def find_config_files(search_paths):
|
||||
"""Finds config files using a list of search paths. If a path is a file
|
||||
then that file path is added to the list. If a search path is a directory
|
||||
then all the "*.yaml" files in that directory are added to the list in
|
||||
sorted order.
|
||||
|
||||
Args:
|
||||
search_paths(list(str)): A list of paths to search.
|
||||
|
||||
Returns:
|
||||
list(str): A list of file paths.
|
||||
"""
|
||||
|
||||
config_files = []
|
||||
if search_paths:
|
||||
for config_path in search_paths:
|
||||
if os.path.isdir(config_path):
|
||||
# We accept specifying directories as config paths, we search
|
||||
# inside that directory for all files matching *.yaml, and then
|
||||
# we apply them in *sorted* order.
|
||||
files = []
|
||||
for entry in os.listdir(config_path):
|
||||
entry_path = os.path.join(config_path, entry)
|
||||
if not os.path.isfile(entry_path):
|
||||
print (
|
||||
"Found subdirectory in config directory: %r. IGNORING."
|
||||
) % (entry_path, )
|
||||
continue
|
||||
|
||||
if not entry.endswith(".yaml"):
|
||||
print (
|
||||
"Found file in config directory that does not"
|
||||
" end in '.yaml': %r. IGNORING."
|
||||
) % (entry_path, )
|
||||
continue
|
||||
|
||||
files.append(entry_path)
|
||||
|
||||
config_files.extend(sorted(files))
|
||||
else:
|
||||
config_files.append(config_path)
|
||||
return config_files
|
||||
return cls(args)
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
|
||||
|
||||
class ApiConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.room_invite_state_types = config.get("room_invite_state_types", [
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.CanonicalAlias,
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.Name,
|
||||
])
|
||||
|
||||
def default_config(cls, **kwargs):
|
||||
return """\
|
||||
## API Configuration ##
|
||||
|
||||
# A list of event types that will be included in the room_invite_state
|
||||
room_invite_state_types:
|
||||
- "{JoinRules}"
|
||||
- "{CanonicalAlias}"
|
||||
- "{RoomAvatar}"
|
||||
- "{Name}"
|
||||
""".format(**vars(EventTypes))
|
||||
@@ -1,132 +0,0 @@
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.types import UserID
|
||||
|
||||
import urllib
|
||||
import yaml
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AppServiceConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.app_service_config_files = config.get("app_service_config_files", [])
|
||||
|
||||
def default_config(cls, **kwargs):
|
||||
return """\
|
||||
# A list of application service config file to use
|
||||
app_service_config_files: []
|
||||
"""
|
||||
|
||||
|
||||
def load_appservices(hostname, config_files):
|
||||
"""Returns a list of Application Services from the config files."""
|
||||
if not isinstance(config_files, list):
|
||||
logger.warning(
|
||||
"Expected %s to be a list of AS config files.", config_files
|
||||
)
|
||||
return []
|
||||
|
||||
# Dicts of value -> filename
|
||||
seen_as_tokens = {}
|
||||
seen_ids = {}
|
||||
|
||||
appservices = []
|
||||
|
||||
for config_file in config_files:
|
||||
try:
|
||||
with open(config_file, 'r') as f:
|
||||
appservice = _load_appservice(
|
||||
hostname, yaml.load(f), config_file
|
||||
)
|
||||
if appservice.id in seen_ids:
|
||||
raise ConfigError(
|
||||
"Cannot reuse ID across application services: "
|
||||
"%s (files: %s, %s)" % (
|
||||
appservice.id, config_file, seen_ids[appservice.id],
|
||||
)
|
||||
)
|
||||
seen_ids[appservice.id] = config_file
|
||||
if appservice.token in seen_as_tokens:
|
||||
raise ConfigError(
|
||||
"Cannot reuse as_token across application services: "
|
||||
"%s (files: %s, %s)" % (
|
||||
appservice.token,
|
||||
config_file,
|
||||
seen_as_tokens[appservice.token],
|
||||
)
|
||||
)
|
||||
seen_as_tokens[appservice.token] = config_file
|
||||
logger.info("Loaded application service: %s", appservice)
|
||||
appservices.append(appservice)
|
||||
except Exception as e:
|
||||
logger.error("Failed to load appservice from '%s'", config_file)
|
||||
logger.exception(e)
|
||||
raise
|
||||
return appservices
|
||||
|
||||
|
||||
def _load_appservice(hostname, as_info, config_filename):
|
||||
required_string_fields = [
|
||||
"id", "url", "as_token", "hs_token", "sender_localpart"
|
||||
]
|
||||
for field in required_string_fields:
|
||||
if not isinstance(as_info.get(field), basestring):
|
||||
raise KeyError("Required string field: '%s' (%s)" % (
|
||||
field, config_filename,
|
||||
))
|
||||
|
||||
localpart = as_info["sender_localpart"]
|
||||
if urllib.quote(localpart) != localpart:
|
||||
raise ValueError(
|
||||
"sender_localpart needs characters which are not URL encoded."
|
||||
)
|
||||
user = UserID(localpart, hostname)
|
||||
user_id = user.to_string()
|
||||
|
||||
# namespace checks
|
||||
if not isinstance(as_info.get("namespaces"), dict):
|
||||
raise KeyError("Requires 'namespaces' object.")
|
||||
for ns in ApplicationService.NS_LIST:
|
||||
# specific namespaces are optional
|
||||
if ns in as_info["namespaces"]:
|
||||
# expect a list of dicts with exclusive and regex keys
|
||||
for regex_obj in as_info["namespaces"][ns]:
|
||||
if not isinstance(regex_obj, dict):
|
||||
raise ValueError(
|
||||
"Expected namespace entry in %s to be an object,"
|
||||
" but got %s", ns, regex_obj
|
||||
)
|
||||
if not isinstance(regex_obj.get("regex"), basestring):
|
||||
raise ValueError(
|
||||
"Missing/bad type 'regex' key in %s", regex_obj
|
||||
)
|
||||
if not isinstance(regex_obj.get("exclusive"), bool):
|
||||
raise ValueError(
|
||||
"Missing/bad type 'exclusive' key in %s", regex_obj
|
||||
)
|
||||
return ApplicationService(
|
||||
token=as_info["as_token"],
|
||||
url=as_info["url"],
|
||||
namespaces=as_info["namespaces"],
|
||||
hs_token=as_info["hs_token"],
|
||||
sender=user_id,
|
||||
id=as_info["id"],
|
||||
)
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,32 +17,35 @@ from ._base import Config
|
||||
|
||||
class CaptchaConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.recaptcha_private_key = config["recaptcha_private_key"]
|
||||
self.recaptcha_public_key = config["recaptcha_public_key"]
|
||||
self.enable_registration_captcha = config["enable_registration_captcha"]
|
||||
self.captcha_bypass_secret = config.get("captcha_bypass_secret")
|
||||
self.recaptcha_siteverify_api = config["recaptcha_siteverify_api"]
|
||||
def __init__(self, args):
|
||||
super(CaptchaConfig, self).__init__(args)
|
||||
self.recaptcha_private_key = args.recaptcha_private_key
|
||||
self.enable_registration_captcha = args.enable_registration_captcha
|
||||
self.captcha_ip_origin_is_x_forwarded = (
|
||||
args.captcha_ip_origin_is_x_forwarded
|
||||
)
|
||||
self.captcha_bypass_secret = args.captcha_bypass_secret
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
## Captcha ##
|
||||
# See docs/CAPTCHA_SETUP for full details of configuring this.
|
||||
|
||||
# This Home Server's ReCAPTCHA public key.
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
|
||||
# This Home Server's ReCAPTCHA private key.
|
||||
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||
|
||||
# Enables ReCaptcha checks when registering, preventing signup
|
||||
# unless a captcha is answered. Requires a valid ReCaptcha
|
||||
# public/private key.
|
||||
enable_registration_captcha: False
|
||||
|
||||
# A secret key used to bypass the captcha test entirely.
|
||||
#captcha_bypass_secret: "YOUR_SECRET_HERE"
|
||||
|
||||
# The API endpoint to use for verifying m.login.recaptcha responses.
|
||||
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||
"""
|
||||
@classmethod
|
||||
def add_arguments(cls, parser):
|
||||
super(CaptchaConfig, cls).add_arguments(parser)
|
||||
group = parser.add_argument_group("recaptcha")
|
||||
group.add_argument(
|
||||
"--recaptcha-private-key", type=str, default="YOUR_PRIVATE_KEY",
|
||||
help="The matching private key for the web client's public key."
|
||||
)
|
||||
group.add_argument(
|
||||
"--enable-registration-captcha", type=bool, default=False,
|
||||
help="Enables ReCaptcha checks when registering, preventing signup"
|
||||
+ " unless a captcha is answered. Requires a valid ReCaptcha "
|
||||
+ "public/private key."
|
||||
)
|
||||
group.add_argument(
|
||||
"--captcha_ip_origin_is_x_forwarded", type=bool, default=False,
|
||||
help="When checking captchas, use the X-Forwarded-For (XFF) header"
|
||||
+ " as the client IP and not the actual client IP."
|
||||
)
|
||||
group.add_argument(
|
||||
"--captcha_bypass_secret", type=str,
|
||||
help="A secret key used to bypass the captcha test entirely."
|
||||
)
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class CasConfig(Config):
|
||||
"""Cas Configuration
|
||||
|
||||
cas_server_url: URL of CAS server
|
||||
"""
|
||||
|
||||
def read_config(self, config):
|
||||
cas_config = config.get("cas_config", None)
|
||||
if cas_config:
|
||||
self.cas_enabled = cas_config.get("enabled", True)
|
||||
self.cas_server_url = cas_config["server_url"]
|
||||
self.cas_service_url = cas_config["service_url"]
|
||||
self.cas_required_attributes = cas_config.get("required_attributes", {})
|
||||
else:
|
||||
self.cas_enabled = False
|
||||
self.cas_server_url = None
|
||||
self.cas_service_url = None
|
||||
self.cas_required_attributes = {}
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable CAS for registration and login.
|
||||
#cas_config:
|
||||
# enabled: true
|
||||
# server_url: "https://cas-server.com"
|
||||
# service_url: "https://homesever.domain.com:8448"
|
||||
# #required_attributes:
|
||||
# # name: value
|
||||
"""
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,66 +14,32 @@
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
import os
|
||||
|
||||
|
||||
class DatabaseConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.event_cache_size = self.parse_size(
|
||||
config.get("event_cache_size", "10K")
|
||||
)
|
||||
|
||||
self.database_config = config.get("database")
|
||||
|
||||
if self.database_config is None:
|
||||
self.database_config = {
|
||||
"name": "sqlite3",
|
||||
"args": {},
|
||||
}
|
||||
|
||||
name = self.database_config.get("name", None)
|
||||
if name == "psycopg2":
|
||||
pass
|
||||
elif name == "sqlite3":
|
||||
self.database_config.setdefault("args", {}).update({
|
||||
"cp_min": 1,
|
||||
"cp_max": 1,
|
||||
"check_same_thread": False,
|
||||
})
|
||||
def __init__(self, args):
|
||||
super(DatabaseConfig, self).__init__(args)
|
||||
if args.database_path == ":memory:":
|
||||
self.database_path = ":memory:"
|
||||
else:
|
||||
raise RuntimeError("Unsupported database type '%s'" % (name,))
|
||||
self.database_path = self.abspath(args.database_path)
|
||||
self.event_cache_size = self.parse_size(args.event_cache_size)
|
||||
|
||||
self.set_databasepath(config.get("database_path"))
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
database_path = self.abspath("homeserver.db")
|
||||
return """\
|
||||
# Database configuration
|
||||
database:
|
||||
# The database engine name
|
||||
name: "sqlite3"
|
||||
# Arguments to pass to the engine
|
||||
args:
|
||||
# Path to the database
|
||||
database: "%(database_path)s"
|
||||
|
||||
# Number of events to cache in memory.
|
||||
event_cache_size: "10K"
|
||||
""" % locals()
|
||||
|
||||
def read_arguments(self, args):
|
||||
self.set_databasepath(args.database_path)
|
||||
|
||||
def set_databasepath(self, database_path):
|
||||
if database_path != ":memory:":
|
||||
database_path = self.abspath(database_path)
|
||||
if self.database_config.get("name", None) == "sqlite3":
|
||||
if database_path is not None:
|
||||
self.database_config["args"]["database"] = database_path
|
||||
|
||||
def add_arguments(self, parser):
|
||||
@classmethod
|
||||
def add_arguments(cls, parser):
|
||||
super(DatabaseConfig, cls).add_arguments(parser)
|
||||
db_group = parser.add_argument_group("database")
|
||||
db_group.add_argument(
|
||||
"-d", "--database-path", metavar="SQLITE_DATABASE_PATH",
|
||||
help="The path to a sqlite database to use."
|
||||
"-d", "--database-path", default="homeserver.db",
|
||||
help="The database name."
|
||||
)
|
||||
db_group.add_argument(
|
||||
"--event-cache-size", default="100K",
|
||||
help="Number of events to cache in memory."
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def generate_config(cls, args, config_dir_path):
|
||||
super(DatabaseConfig, cls).generate_config(args, config_dir_path)
|
||||
args.database_path = os.path.abspath(args.database_path)
|
||||
|
||||
42
synapse/config/email.py
Normal file
42
synapse/config/email.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class EmailConfig(Config):
|
||||
|
||||
def __init__(self, args):
|
||||
super(EmailConfig, self).__init__(args)
|
||||
self.email_from_address = args.email_from_address
|
||||
self.email_smtp_server = args.email_smtp_server
|
||||
|
||||
@classmethod
|
||||
def add_arguments(cls, parser):
|
||||
super(EmailConfig, cls).add_arguments(parser)
|
||||
email_group = parser.add_argument_group("email")
|
||||
email_group.add_argument(
|
||||
"--email-from-address",
|
||||
default="FROM@EXAMPLE.COM",
|
||||
help="The address to send emails from (e.g. for password resets)."
|
||||
)
|
||||
email_group.add_argument(
|
||||
"--email-smtp-server",
|
||||
default="",
|
||||
help=(
|
||||
"The SMTP server to send emails from (e.g. for password"
|
||||
" resets)."
|
||||
)
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user